Commit c5aad5f2 authored by hablich's avatar hablich Committed by Commit bot

Revert "Reland "ARM64: Add NEON support""

This reverts commit 8faf3d6f.

Reason: blocks roll https://codereview.chromium.org/2820753003/

TBR=martyn.capewell@arm.com,jarin@chromium.org,bmeurer@chromium.org,machenbach@chromium.org

NOTRY=true

Review-Url: https://codereview.chromium.org/2819093002
Cr-Commit-Position: refs/heads/master@{#44660}
parent a9e04c5f
...@@ -46,7 +46,6 @@ ...@@ -46,7 +46,6 @@
/src/inspector/build/closure-compiler /src/inspector/build/closure-compiler
/src/inspector/build/closure-compiler.tar.gz /src/inspector/build/closure-compiler.tar.gz
/test/benchmarks/data /test/benchmarks/data
/test/cctest/traces-arm64
/test/fuzzer/wasm /test/fuzzer/wasm
/test/fuzzer/wasm.tar.gz /test/fuzzer/wasm.tar.gz
/test/fuzzer/wasm_asmjs /test/fuzzer/wasm_asmjs
......
...@@ -2132,7 +2132,6 @@ v8_source_set("v8_base") { ...@@ -2132,7 +2132,6 @@ v8_source_set("v8_base") {
"src/arm64/macro-assembler-arm64.h", "src/arm64/macro-assembler-arm64.h",
"src/arm64/simulator-arm64.cc", "src/arm64/simulator-arm64.cc",
"src/arm64/simulator-arm64.h", "src/arm64/simulator-arm64.h",
"src/arm64/simulator-logic-arm64.cc",
"src/arm64/utils-arm64.cc", "src/arm64/utils-arm64.cc",
"src/arm64/utils-arm64.h", "src/arm64/utils-arm64.h",
"src/compiler/arm64/code-generator-arm64.cc", "src/compiler/arm64/code-generator-arm64.cc",
......
...@@ -37,8 +37,6 @@ deps = { ...@@ -37,8 +37,6 @@ deps = {
Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "230f9fc5688ce76bfaa99aba5f680a159eaac9e2", Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "230f9fc5688ce76bfaa99aba5f680a159eaac9e2",
"v8/test/test262/harness": "v8/test/test262/harness":
Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "0f2acdd882c84cff43b9d60df7574a1901e2cdcd", Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "0f2acdd882c84cff43b9d60df7574a1901e2cdcd",
"v8/test/cctest/traces-arm64":
Var("chromium_url") + "/external/git.linaro.org/arm/vixl-simulator-traces.git" + "@" + "6168e7e1eec52c9cb0a62f87f94df0582dc48aa8",
"v8/tools/clang": "v8/tools/clang":
Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "49df471350a60efaec6951f321dd65475496ba17", Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "49df471350a60efaec6951f321dd65475496ba17",
"v8/test/wasm-js": "v8/test/wasm-js":
......
...@@ -57,15 +57,6 @@ inline int CPURegister::SizeInBytes() const { ...@@ -57,15 +57,6 @@ inline int CPURegister::SizeInBytes() const {
return reg_size / 8; return reg_size / 8;
} }
inline bool CPURegister::Is8Bits() const {
DCHECK(IsValid());
return reg_size == 8;
}
inline bool CPURegister::Is16Bits() const {
DCHECK(IsValid());
return reg_size == 16;
}
inline bool CPURegister::Is32Bits() const { inline bool CPURegister::Is32Bits() const {
DCHECK(IsValid()); DCHECK(IsValid());
...@@ -78,13 +69,9 @@ inline bool CPURegister::Is64Bits() const { ...@@ -78,13 +69,9 @@ inline bool CPURegister::Is64Bits() const {
return reg_size == 64; return reg_size == 64;
} }
inline bool CPURegister::Is128Bits() const {
DCHECK(IsValid());
return reg_size == 128;
}
inline bool CPURegister::IsValid() const { inline bool CPURegister::IsValid() const {
if (IsValidRegister() || IsValidVRegister()) { if (IsValidRegister() || IsValidFPRegister()) {
DCHECK(!IsNone()); DCHECK(!IsNone());
return true; return true;
} else { } else {
...@@ -100,14 +87,14 @@ inline bool CPURegister::IsValidRegister() const { ...@@ -100,14 +87,14 @@ inline bool CPURegister::IsValidRegister() const {
((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode)); ((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode));
} }
inline bool CPURegister::IsValidVRegister() const {
return IsVRegister() && inline bool CPURegister::IsValidFPRegister() const {
((reg_size == kBRegSizeInBits) || (reg_size == kHRegSizeInBits) || return IsFPRegister() &&
(reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits) || ((reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits)) &&
(reg_size == kQRegSizeInBits)) && (reg_code < kNumberOfFPRegisters);
(reg_code < kNumberOfVRegisters);
} }
inline bool CPURegister::IsNone() const { inline bool CPURegister::IsNone() const {
// kNoRegister types should always have size 0 and code 0. // kNoRegister types should always have size 0 and code 0.
DCHECK((reg_type != kNoRegister) || (reg_code == 0)); DCHECK((reg_type != kNoRegister) || (reg_code == 0));
...@@ -133,7 +120,11 @@ inline bool CPURegister::IsRegister() const { ...@@ -133,7 +120,11 @@ inline bool CPURegister::IsRegister() const {
return reg_type == kRegister; return reg_type == kRegister;
} }
inline bool CPURegister::IsVRegister() const { return reg_type == kVRegister; }
inline bool CPURegister::IsFPRegister() const {
return reg_type == kFPRegister;
}
inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const { inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
return (reg_size == other.reg_size) && (reg_type == other.reg_type); return (reg_size == other.reg_size) && (reg_type == other.reg_type);
...@@ -209,7 +200,7 @@ inline Register Register::XRegFromCode(unsigned code) { ...@@ -209,7 +200,7 @@ inline Register Register::XRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) { if (code == kSPRegInternalCode) {
return csp; return csp;
} else { } else {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters)); DCHECK(code < kNumberOfRegisters);
return Register::Create(code, kXRegSizeInBits); return Register::Create(code, kXRegSizeInBits);
} }
} }
...@@ -219,40 +210,23 @@ inline Register Register::WRegFromCode(unsigned code) { ...@@ -219,40 +210,23 @@ inline Register Register::WRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) { if (code == kSPRegInternalCode) {
return wcsp; return wcsp;
} else { } else {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters)); DCHECK(code < kNumberOfRegisters);
return Register::Create(code, kWRegSizeInBits); return Register::Create(code, kWRegSizeInBits);
} }
} }
inline VRegister VRegister::BRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kBRegSizeInBits);
}
inline VRegister VRegister::HRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kHRegSizeInBits);
}
inline VRegister VRegister::SRegFromCode(unsigned code) { inline FPRegister FPRegister::SRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters)); DCHECK(code < kNumberOfFPRegisters);
return VRegister::Create(code, kSRegSizeInBits); return FPRegister::Create(code, kSRegSizeInBits);
} }
inline VRegister VRegister::DRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kDRegSizeInBits);
}
inline VRegister VRegister::QRegFromCode(unsigned code) { inline FPRegister FPRegister::DRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters)); DCHECK(code < kNumberOfFPRegisters);
return VRegister::Create(code, kQRegSizeInBits); return FPRegister::Create(code, kDRegSizeInBits);
} }
inline VRegister VRegister::VRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kVRegSizeInBits);
}
inline Register CPURegister::W() const { inline Register CPURegister::W() const {
DCHECK(IsValidRegister()); DCHECK(IsValidRegister());
...@@ -265,34 +239,16 @@ inline Register CPURegister::X() const { ...@@ -265,34 +239,16 @@ inline Register CPURegister::X() const {
return Register::XRegFromCode(reg_code); return Register::XRegFromCode(reg_code);
} }
inline VRegister CPURegister::V() const {
DCHECK(IsValidVRegister());
return VRegister::VRegFromCode(reg_code);
}
inline VRegister CPURegister::B() const {
DCHECK(IsValidVRegister());
return VRegister::BRegFromCode(reg_code);
}
inline VRegister CPURegister::H() const {
DCHECK(IsValidVRegister());
return VRegister::HRegFromCode(reg_code);
}
inline VRegister CPURegister::S() const { inline FPRegister CPURegister::S() const {
DCHECK(IsValidVRegister()); DCHECK(IsValidFPRegister());
return VRegister::SRegFromCode(reg_code); return FPRegister::SRegFromCode(reg_code);
} }
inline VRegister CPURegister::D() const {
DCHECK(IsValidVRegister());
return VRegister::DRegFromCode(reg_code);
}
inline VRegister CPURegister::Q() const { inline FPRegister CPURegister::D() const {
DCHECK(IsValidVRegister()); DCHECK(IsValidFPRegister());
return VRegister::QRegFromCode(reg_code); return FPRegister::DRegFromCode(reg_code);
} }
...@@ -535,7 +491,7 @@ MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode) ...@@ -535,7 +491,7 @@ MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
regoffset_ = NoReg; regoffset_ = NoReg;
} else if (offset.IsShiftedRegister()) { } else if (offset.IsShiftedRegister()) {
DCHECK((addrmode == Offset) || (addrmode == PostIndex)); DCHECK(addrmode == Offset);
regoffset_ = offset.reg(); regoffset_ = offset.reg();
shift_ = offset.shift(); shift_ = offset.shift();
...@@ -921,20 +877,21 @@ LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) { ...@@ -921,20 +877,21 @@ LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
if (rt.IsRegister()) { if (rt.IsRegister()) {
return rt.Is64Bits() ? LDR_x : LDR_w; return rt.Is64Bits() ? LDR_x : LDR_w;
} else { } else {
DCHECK(rt.IsVRegister()); DCHECK(rt.IsFPRegister());
switch (rt.SizeInBits()) { return rt.Is64Bits() ? LDR_d : LDR_s;
case kBRegSizeInBits: }
return LDR_b; }
case kHRegSizeInBits:
return LDR_h;
case kSRegSizeInBits: LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
return LDR_s; const CPURegister& rt2) {
case kDRegSizeInBits: DCHECK(AreSameSizeAndType(rt, rt2));
return LDR_d; USE(rt2);
default: if (rt.IsRegister()) {
DCHECK(rt.IsQ()); return rt.Is64Bits() ? LDP_x : LDP_w;
return LDR_q; } else {
} DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? LDP_d : LDP_s;
} }
} }
...@@ -944,29 +901,11 @@ LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) { ...@@ -944,29 +901,11 @@ LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
if (rt.IsRegister()) { if (rt.IsRegister()) {
return rt.Is64Bits() ? STR_x : STR_w; return rt.Is64Bits() ? STR_x : STR_w;
} else { } else {
DCHECK(rt.IsVRegister()); DCHECK(rt.IsFPRegister());
switch (rt.SizeInBits()) { return rt.Is64Bits() ? STR_d : STR_s;
case kBRegSizeInBits:
return STR_b;
case kHRegSizeInBits:
return STR_h;
case kSRegSizeInBits:
return STR_s;
case kDRegSizeInBits:
return STR_d;
default:
DCHECK(rt.IsQ());
return STR_q;
}
} }
} }
LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
const CPURegister& rt2) {
DCHECK_EQ(STP_w | LoadStorePairLBit, LDP_w);
return static_cast<LoadStorePairOp>(StorePairOpFor(rt, rt2) |
LoadStorePairLBit);
}
LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt, LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
const CPURegister& rt2) { const CPURegister& rt2) {
...@@ -975,16 +914,8 @@ LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt, ...@@ -975,16 +914,8 @@ LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
if (rt.IsRegister()) { if (rt.IsRegister()) {
return rt.Is64Bits() ? STP_x : STP_w; return rt.Is64Bits() ? STP_x : STP_w;
} else { } else {
DCHECK(rt.IsVRegister()); DCHECK(rt.IsFPRegister());
switch (rt.SizeInBits()) { return rt.Is64Bits() ? STP_d : STP_s;
case kSRegSizeInBits:
return STP_s;
case kDRegSizeInBits:
return STP_d;
default:
DCHECK(rt.IsQ());
return STP_q;
}
} }
} }
...@@ -993,7 +924,7 @@ LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) { ...@@ -993,7 +924,7 @@ LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
if (rt.IsRegister()) { if (rt.IsRegister()) {
return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit; return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
} else { } else {
DCHECK(rt.IsVRegister()); DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? LDR_d_lit : LDR_s_lit; return rt.Is64Bits() ? LDR_d_lit : LDR_s_lit;
} }
} }
...@@ -1177,8 +1108,9 @@ Instr Assembler::ImmLS(int imm9) { ...@@ -1177,8 +1108,9 @@ Instr Assembler::ImmLS(int imm9) {
return truncate_to_int9(imm9) << ImmLS_offset; return truncate_to_int9(imm9) << ImmLS_offset;
} }
Instr Assembler::ImmLSPair(int imm7, unsigned size) {
DCHECK_EQ((imm7 >> size) << size, imm7); Instr Assembler::ImmLSPair(int imm7, LSDataSize size) {
DCHECK(((imm7 >> size) << size) == imm7);
int scaled_imm7 = imm7 >> size; int scaled_imm7 = imm7 >> size;
DCHECK(is_int7(scaled_imm7)); DCHECK(is_int7(scaled_imm7));
return truncate_to_int7(scaled_imm7) << ImmLSPair_offset; return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
...@@ -1220,17 +1152,10 @@ Instr Assembler::ImmBarrierType(int imm2) { ...@@ -1220,17 +1152,10 @@ Instr Assembler::ImmBarrierType(int imm2) {
return imm2 << ImmBarrierType_offset; return imm2 << ImmBarrierType_offset;
} }
unsigned Assembler::CalcLSDataSize(LoadStoreOp op) {
DCHECK((LSSize_offset + LSSize_width) == (kInstructionSize * 8)); LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
unsigned size = static_cast<Instr>(op >> LSSize_offset); DCHECK((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
if ((op & LSVector_mask) != 0) { return static_cast<LSDataSize>(op >> SizeLS_offset);
// Vector register memory operations encode the access size in the "size"
// and "opc" fields.
if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
size = kQRegSizeLog2;
}
}
return size;
} }
...@@ -1245,7 +1170,11 @@ Instr Assembler::ShiftMoveWide(int shift) { ...@@ -1245,7 +1170,11 @@ Instr Assembler::ShiftMoveWide(int shift) {
return shift << ShiftMoveWide_offset; return shift << ShiftMoveWide_offset;
} }
Instr Assembler::FPType(VRegister fd) { return fd.Is64Bits() ? FP64 : FP32; }
Instr Assembler::FPType(FPRegister fd) {
return fd.Is64Bits() ? FP64 : FP32;
}
Instr Assembler::FPScale(unsigned scale) { Instr Assembler::FPScale(unsigned scale) {
DCHECK(is_uint6(scale)); DCHECK(is_uint6(scale));
......
This diff is collapsed.
This diff is collapsed.
...@@ -147,8 +147,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { ...@@ -147,8 +147,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// See call site for description. // See call site for description.
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left, static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
Register right, Register scratch, Register right, Register scratch,
VRegister double_scratch, Label* slow, FPRegister double_scratch,
Condition cond) { Label* slow, Condition cond) {
DCHECK(!AreAliased(left, right, scratch)); DCHECK(!AreAliased(left, right, scratch));
Label not_identical, return_equal, heap_number; Label not_identical, return_equal, heap_number;
Register result = x0; Register result = x0;
...@@ -292,9 +292,12 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, ...@@ -292,9 +292,12 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// See call site for description. // See call site for description.
static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register left, static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register right, VRegister left_d, Register left,
VRegister right_d, Label* slow, Register right,
FPRegister left_d,
FPRegister right_d,
Label* slow,
bool strict) { bool strict) {
DCHECK(!AreAliased(left_d, right_d)); DCHECK(!AreAliased(left_d, right_d));
DCHECK((left.is(x0) && right.is(x1)) || DCHECK((left.is(x0) && right.is(x1)) ||
...@@ -473,8 +476,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) { ...@@ -473,8 +476,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// In case 3, we have found out that we were dealing with a number-number // In case 3, we have found out that we were dealing with a number-number
// comparison. The double values of the numbers have been loaded, right into // comparison. The double values of the numbers have been loaded, right into
// rhs_d, left into lhs_d. // rhs_d, left into lhs_d.
VRegister rhs_d = d0; FPRegister rhs_d = d0;
VRegister lhs_d = d1; FPRegister lhs_d = d1;
EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict()); EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict());
__ Bind(&both_loaded_as_doubles); __ Bind(&both_loaded_as_doubles);
...@@ -610,7 +613,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) { ...@@ -610,7 +613,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
CPURegList saved_regs = kCallerSaved; CPURegList saved_regs = kCallerSaved;
CPURegList saved_fp_regs = kCallerSavedV; CPURegList saved_fp_regs = kCallerSavedFP;
// We don't allow a GC during a store buffer overflow so there is no need to // We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and // store the registers in any particular way, but we do have to store and
...@@ -683,12 +686,12 @@ void MathPowStub::Generate(MacroAssembler* masm) { ...@@ -683,12 +686,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Register exponent_integer = MathPowIntegerDescriptor::exponent(); Register exponent_integer = MathPowIntegerDescriptor::exponent();
DCHECK(exponent_integer.is(x12)); DCHECK(exponent_integer.is(x12));
Register saved_lr = x19; Register saved_lr = x19;
VRegister result_double = d0; FPRegister result_double = d0;
VRegister base_double = d0; FPRegister base_double = d0;
VRegister exponent_double = d1; FPRegister exponent_double = d1;
VRegister base_double_copy = d2; FPRegister base_double_copy = d2;
VRegister scratch1_double = d6; FPRegister scratch1_double = d6;
VRegister scratch0_double = d7; FPRegister scratch0_double = d7;
// A fast-path for integer exponents. // A fast-path for integer exponents.
Label exponent_is_smi, exponent_is_integer; Label exponent_is_smi, exponent_is_integer;
...@@ -1646,8 +1649,8 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) { ...@@ -1646,8 +1649,8 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
Register result = x0; Register result = x0;
Register rhs = x0; Register rhs = x0;
Register lhs = x1; Register lhs = x1;
VRegister rhs_d = d0; FPRegister rhs_d = d0;
VRegister lhs_d = d1; FPRegister lhs_d = d1;
if (left() == CompareICState::SMI) { if (left() == CompareICState::SMI) {
__ JumpIfNotSmi(lhs, &miss); __ JumpIfNotSmi(lhs, &miss);
...@@ -2106,7 +2109,7 @@ RecordWriteStub::RegisterAllocation::RegisterAllocation(Register object, ...@@ -2106,7 +2109,7 @@ RecordWriteStub::RegisterAllocation::RegisterAllocation(Register object,
address_(address), address_(address),
scratch0_(scratch), scratch0_(scratch),
saved_regs_(kCallerSaved), saved_regs_(kCallerSaved),
saved_fp_regs_(kCallerSavedV) { saved_fp_regs_(kCallerSavedFP) {
DCHECK(!AreAliased(scratch, object, address)); DCHECK(!AreAliased(scratch, object, address));
// The SaveCallerSaveRegisters method needs to save caller-saved // The SaveCallerSaveRegisters method needs to save caller-saved
......
This diff is collapsed.
...@@ -213,11 +213,6 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) { ...@@ -213,11 +213,6 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
(instr->Bits(27, 24) == 0xC) || (instr->Bits(27, 24) == 0xC) ||
(instr->Bits(27, 24) == 0xD) ); (instr->Bits(27, 24) == 0xD) );
if ((instr->Bit(28) == 0) && (instr->Bit(29) == 0) && (instr->Bit(26) == 1)) {
DecodeNEONLoadStore(instr);
return;
}
if (instr->Bit(24) == 0) { if (instr->Bit(24) == 0) {
if (instr->Bit(28) == 0) { if (instr->Bit(28) == 0) {
if (instr->Bit(29) == 0) { if (instr->Bit(29) == 0) {
...@@ -231,6 +226,8 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) { ...@@ -231,6 +226,8 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
} else { } else {
V::VisitLoadStoreAcquireRelease(instr); V::VisitLoadStoreAcquireRelease(instr);
} }
} else {
DecodeAdvSIMDLoadStore(instr);
} }
} else { } else {
if ((instr->Bits(31, 30) == 0x3) || if ((instr->Bits(31, 30) == 0x3) ||
...@@ -516,14 +513,16 @@ void Decoder<V>::DecodeFP(Instruction* instr) { ...@@ -516,14 +513,16 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
(instr->Bits(27, 24) == 0xF) ); (instr->Bits(27, 24) == 0xF) );
if (instr->Bit(28) == 0) { if (instr->Bit(28) == 0) {
DecodeNEONVectorDataProcessing(instr); DecodeAdvSIMDDataProcessing(instr);
} else { } else {
if (instr->Bits(31, 30) == 0x3) { if (instr->Bit(29) == 1) {
V::VisitUnallocated(instr); V::VisitUnallocated(instr);
} else if (instr->Bits(31, 30) == 0x1) {
DecodeNEONScalarDataProcessing(instr);
} else { } else {
if (instr->Bit(29) == 0) { if (instr->Bits(31, 30) == 0x3) {
V::VisitUnallocated(instr);
} else if (instr->Bits(31, 30) == 0x1) {
DecodeAdvSIMDDataProcessing(instr);
} else {
if (instr->Bit(24) == 0) { if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) { if (instr->Bit(21) == 0) {
if ((instr->Bit(23) == 1) || if ((instr->Bit(23) == 1) ||
...@@ -630,190 +629,25 @@ void Decoder<V>::DecodeFP(Instruction* instr) { ...@@ -630,190 +629,25 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
V::VisitFPDataProcessing3Source(instr); V::VisitFPDataProcessing3Source(instr);
} }
} }
} else {
V::VisitUnallocated(instr);
} }
} }
} }
} }
template <typename V>
void Decoder<V>::DecodeNEONLoadStore(Instruction* instr) {
DCHECK(instr->Bits(29, 25) == 0x6);
if (instr->Bit(31) == 0) {
if ((instr->Bit(24) == 0) && (instr->Bit(21) == 1)) {
V::VisitUnallocated(instr);
return;
}
if (instr->Bit(23) == 0) { template<typename V>
if (instr->Bits(20, 16) == 0) { void Decoder<V>::DecodeAdvSIMDLoadStore(Instruction* instr) {
if (instr->Bit(24) == 0) { // TODO(all): Implement Advanced SIMD load/store instruction decode.
V::VisitNEONLoadStoreMultiStruct(instr); DCHECK(instr->Bits(29, 25) == 0x6);
} else { V::VisitUnimplemented(instr);
V::VisitNEONLoadStoreSingleStruct(instr);
}
} else {
V::VisitUnallocated(instr);
}
} else {
if (instr->Bit(24) == 0) {
V::VisitNEONLoadStoreMultiStructPostIndex(instr);
} else {
V::VisitNEONLoadStoreSingleStructPostIndex(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
} }
template <typename V>
void Decoder<V>::DecodeNEONVectorDataProcessing(Instruction* instr) {
DCHECK(instr->Bits(28, 25) == 0x7);
if (instr->Bit(31) == 0) {
if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) {
if (instr->Bit(15) == 0) {
if (instr->Bit(10) == 0) {
if (instr->Bit(29) == 0) {
if (instr->Bit(11) == 0) {
V::VisitNEONTable(instr);
} else {
V::VisitNEONPerm(instr);
}
} else {
V::VisitNEONExtract(instr);
}
} else {
if (instr->Bits(23, 22) == 0) {
V::VisitNEONCopy(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
} else {
if (instr->Bit(10) == 0) {
if (instr->Bit(11) == 0) {
V::VisitNEON3Different(instr);
} else {
if (instr->Bits(18, 17) == 0) {
if (instr->Bit(20) == 0) {
if (instr->Bit(19) == 0) {
V::VisitNEON2RegMisc(instr);
} else {
if (instr->Bits(30, 29) == 0x2) {
V::VisitUnallocated(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
if (instr->Bit(19) == 0) {
V::VisitNEONAcrossLanes(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitNEON3Same(instr);
}
}
} else {
if (instr->Bit(10) == 0) {
V::VisitNEONByIndexedElement(instr);
} else {
if (instr->Bit(23) == 0) {
if (instr->Bits(22, 19) == 0) {
V::VisitNEONModifiedImmediate(instr);
} else {
V::VisitNEONShiftImmediate(instr);
}
} else {
V::VisitUnallocated(instr);
}
}
}
} else {
V::VisitUnallocated(instr);
}
}
template <typename V> template<typename V>
void Decoder<V>::DecodeNEONScalarDataProcessing(Instruction* instr) { void Decoder<V>::DecodeAdvSIMDDataProcessing(Instruction* instr) {
DCHECK(instr->Bits(28, 25) == 0xF); // TODO(all): Implement Advanced SIMD data processing instruction decode.
if (instr->Bit(24) == 0) { DCHECK(instr->Bits(27, 25) == 0x7);
if (instr->Bit(21) == 0) { V::VisitUnimplemented(instr);
if (instr->Bit(15) == 0) {
if (instr->Bit(10) == 0) {
if (instr->Bit(29) == 0) {
if (instr->Bit(11) == 0) {
V::VisitUnallocated(instr);
} else {
V::VisitUnallocated(instr);
}
} else {
V::VisitUnallocated(instr);
}
} else {
if (instr->Bits(23, 22) == 0) {
V::VisitNEONScalarCopy(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
} else {
if (instr->Bit(10) == 0) {
if (instr->Bit(11) == 0) {
V::VisitNEONScalar3Diff(instr);
} else {
if (instr->Bits(18, 17) == 0) {
if (instr->Bit(20) == 0) {
if (instr->Bit(19) == 0) {
V::VisitNEONScalar2RegMisc(instr);
} else {
if (instr->Bit(29) == 0) {
V::VisitUnallocated(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
if (instr->Bit(19) == 0) {
V::VisitNEONScalarPairwise(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitNEONScalar3Same(instr);
}
}
} else {
if (instr->Bit(10) == 0) {
V::VisitNEONScalarByIndexedElement(instr);
} else {
if (instr->Bit(23) == 0) {
V::VisitNEONScalarShiftImmediate(instr);
} else {
V::VisitUnallocated(instr);
}
}
}
} }
......
...@@ -16,72 +16,50 @@ namespace internal { ...@@ -16,72 +16,50 @@ namespace internal {
// List macro containing all visitors needed by the decoder class. // List macro containing all visitors needed by the decoder class.
#define VISITOR_LIST(V) \ #define VISITOR_LIST(V) \
V(PCRelAddressing) \ V(PCRelAddressing) \
V(AddSubImmediate) \ V(AddSubImmediate) \
V(LogicalImmediate) \ V(LogicalImmediate) \
V(MoveWideImmediate) \ V(MoveWideImmediate) \
V(Bitfield) \ V(Bitfield) \
V(Extract) \ V(Extract) \
V(UnconditionalBranch) \ V(UnconditionalBranch) \
V(UnconditionalBranchToRegister) \ V(UnconditionalBranchToRegister) \
V(CompareBranch) \ V(CompareBranch) \
V(TestBranch) \ V(TestBranch) \
V(ConditionalBranch) \ V(ConditionalBranch) \
V(System) \ V(System) \
V(Exception) \ V(Exception) \
V(LoadStorePairPostIndex) \ V(LoadStorePairPostIndex) \
V(LoadStorePairOffset) \ V(LoadStorePairOffset) \
V(LoadStorePairPreIndex) \ V(LoadStorePairPreIndex) \
V(LoadLiteral) \ V(LoadLiteral) \
V(LoadStoreUnscaledOffset) \ V(LoadStoreUnscaledOffset) \
V(LoadStorePostIndex) \ V(LoadStorePostIndex) \
V(LoadStorePreIndex) \ V(LoadStorePreIndex) \
V(LoadStoreRegisterOffset) \ V(LoadStoreRegisterOffset) \
V(LoadStoreUnsignedOffset) \ V(LoadStoreUnsignedOffset) \
V(LoadStoreAcquireRelease) \ V(LoadStoreAcquireRelease) \
V(LogicalShifted) \ V(LogicalShifted) \
V(AddSubShifted) \ V(AddSubShifted) \
V(AddSubExtended) \ V(AddSubExtended) \
V(AddSubWithCarry) \ V(AddSubWithCarry) \
V(ConditionalCompareRegister) \ V(ConditionalCompareRegister) \
V(ConditionalCompareImmediate) \ V(ConditionalCompareImmediate) \
V(ConditionalSelect) \ V(ConditionalSelect) \
V(DataProcessing1Source) \ V(DataProcessing1Source) \
V(DataProcessing2Source) \ V(DataProcessing2Source) \
V(DataProcessing3Source) \ V(DataProcessing3Source) \
V(FPCompare) \ V(FPCompare) \
V(FPConditionalCompare) \ V(FPConditionalCompare) \
V(FPConditionalSelect) \ V(FPConditionalSelect) \
V(FPImmediate) \ V(FPImmediate) \
V(FPDataProcessing1Source) \ V(FPDataProcessing1Source) \
V(FPDataProcessing2Source) \ V(FPDataProcessing2Source) \
V(FPDataProcessing3Source) \ V(FPDataProcessing3Source) \
V(FPIntegerConvert) \ V(FPIntegerConvert) \
V(FPFixedPointConvert) \ V(FPFixedPointConvert) \
V(NEON2RegMisc) \ V(Unallocated) \
V(NEON3Different) \
V(NEON3Same) \
V(NEONAcrossLanes) \
V(NEONByIndexedElement) \
V(NEONCopy) \
V(NEONExtract) \
V(NEONLoadStoreMultiStruct) \
V(NEONLoadStoreMultiStructPostIndex) \
V(NEONLoadStoreSingleStruct) \
V(NEONLoadStoreSingleStructPostIndex) \
V(NEONModifiedImmediate) \
V(NEONScalar2RegMisc) \
V(NEONScalar3Diff) \
V(NEONScalar3Same) \
V(NEONScalarByIndexedElement) \
V(NEONScalarCopy) \
V(NEONScalarPairwise) \
V(NEONScalarShiftImmediate) \
V(NEONShiftImmediate) \
V(NEONTable) \
V(NEONPerm) \
V(Unallocated) \
V(Unimplemented) V(Unimplemented)
// The Visitor interface. Disassembler and simulator (and other tools) // The Visitor interface. Disassembler and simulator (and other tools)
...@@ -131,8 +109,6 @@ class DispatchingDecoderVisitor : public DecoderVisitor { ...@@ -131,8 +109,6 @@ class DispatchingDecoderVisitor : public DecoderVisitor {
// stored by the decoder. // stored by the decoder.
void RemoveVisitor(DecoderVisitor* visitor); void RemoveVisitor(DecoderVisitor* visitor);
void VisitNEONShiftImmediate(const Instruction* instr);
#define DECLARE(A) void Visit##A(Instruction* instr); #define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE) VISITOR_LIST(DECLARE)
#undef DECLARE #undef DECLARE
...@@ -197,17 +173,12 @@ class Decoder : public V { ...@@ -197,17 +173,12 @@ class Decoder : public V {
// Decode the Advanced SIMD (NEON) load/store part of the instruction tree, // Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
// and call the corresponding visitors. // and call the corresponding visitors.
// On entry, instruction bits 29:25 = 0x6. // On entry, instruction bits 29:25 = 0x6.
void DecodeNEONLoadStore(Instruction* instr); void DecodeAdvSIMDLoadStore(Instruction* instr);
// Decode the Advanced SIMD (NEON) data processing part of the instruction // Decode the Advanced SIMD (NEON) data processing part of the instruction
// tree, and call the corresponding visitors. // tree, and call the corresponding visitors.
// On entry, instruction bits 27:25 = 0x7. // On entry, instruction bits 27:25 = 0x7.
void DecodeNEONVectorDataProcessing(Instruction* instr); void DecodeAdvSIMDDataProcessing(Instruction* instr);
// Decode the Advanced SIMD (NEON) scalar data processing part of the
// instruction tree, and call the corresponding visitors.
// On entry, instruction bits 28:25 = 0xF.
void DecodeNEONScalarDataProcessing(Instruction* instr);
}; };
......
...@@ -99,13 +99,13 @@ void Deoptimizer::TableEntryGenerator::Generate() { ...@@ -99,13 +99,13 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all allocatable double registers. // Save all allocatable double registers.
CPURegList saved_double_registers( CPURegList saved_double_registers(
CPURegister::kVRegister, kDRegSizeInBits, CPURegister::kFPRegister, kDRegSizeInBits,
RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask()); RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask());
__ PushCPURegList(saved_double_registers); __ PushCPURegList(saved_double_registers);
// Save all allocatable float registers. // Save all allocatable float registers.
CPURegList saved_float_registers( CPURegList saved_float_registers(
CPURegister::kVRegister, kSRegSizeInBits, CPURegister::kFPRegister, kSRegSizeInBits,
RegisterConfiguration::Crankshaft()->allocatable_float_codes_mask()); RegisterConfiguration::Crankshaft()->allocatable_float_codes_mask());
__ PushCPURegList(saved_float_registers); __ PushCPURegList(saved_float_registers);
......
This diff is collapsed.
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
#ifndef V8_ARM64_DISASM_ARM64_H #ifndef V8_ARM64_DISASM_ARM64_H
#define V8_ARM64_DISASM_ARM64_H #define V8_ARM64_DISASM_ARM64_H
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/decoder-arm64.h" #include "src/arm64/decoder-arm64.h"
#include "src/arm64/instructions-arm64.h" #include "src/arm64/instructions-arm64.h"
#include "src/globals.h" #include "src/globals.h"
...@@ -30,13 +29,6 @@ class DisassemblingDecoder : public DecoderVisitor { ...@@ -30,13 +29,6 @@ class DisassemblingDecoder : public DecoderVisitor {
protected: protected:
virtual void ProcessOutput(Instruction* instr); virtual void ProcessOutput(Instruction* instr);
// Default output functions. The functions below implement a default way of
// printing elements in the disassembly. A sub-class can override these to
// customize the disassembly output.
// Prints the name of a register.
virtual void AppendRegisterNameToOutput(const CPURegister& reg);
void Format(Instruction* instr, const char* mnemonic, const char* format); void Format(Instruction* instr, const char* mnemonic, const char* format);
void Substitute(Instruction* instr, const char* string); void Substitute(Instruction* instr, const char* string);
int SubstituteField(Instruction* instr, const char* format); int SubstituteField(Instruction* instr, const char* format);
......
This diff is collapsed.
This diff is collapsed.
...@@ -377,7 +377,7 @@ void Instrument::InstrumentLoadStore(Instruction* instr) { ...@@ -377,7 +377,7 @@ void Instrument::InstrumentLoadStore(Instruction* instr) {
static Counter* load_fp_counter = GetCounter("Load FP"); static Counter* load_fp_counter = GetCounter("Load FP");
static Counter* store_fp_counter = GetCounter("Store FP"); static Counter* store_fp_counter = GetCounter("Store FP");
switch (instr->Mask(LoadStoreMask)) { switch (instr->Mask(LoadStoreOpMask)) {
case STRB_w: // Fall through. case STRB_w: // Fall through.
case STRH_w: // Fall through. case STRH_w: // Fall through.
case STR_w: // Fall through. case STR_w: // Fall through.
...@@ -595,159 +595,6 @@ void Instrument::VisitFPFixedPointConvert(Instruction* instr) { ...@@ -595,159 +595,6 @@ void Instrument::VisitFPFixedPointConvert(Instruction* instr) {
counter->Increment(); counter->Increment();
} }
void Instrument::VisitNEON2RegMisc(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEON3Different(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEON3Same(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONAcrossLanes(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONByIndexedElement(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONCopy(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONExtract(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONLoadStoreMultiStruct(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONLoadStoreMultiStructPostIndex(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONLoadStoreSingleStruct(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONLoadStoreSingleStructPostIndex(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONModifiedImmediate(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONPerm(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar2RegMisc(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar3Diff(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar3Same(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalarByIndexedElement(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalarCopy(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalarPairwise(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalarShiftImmediate(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONShiftImmediate(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONTable(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitUnallocated(Instruction* instr) { void Instrument::VisitUnallocated(Instruction* instr) {
Update(); Update();
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
#include <cmath> #include <cmath>
#include "src/arm64/constants-arm64.h" #include "src/arm64/constants-arm64.h"
#include "src/utils.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -17,26 +16,40 @@ namespace internal { ...@@ -17,26 +16,40 @@ namespace internal {
STATIC_ASSERT((static_cast<int32_t>(-1) >> 1) == -1); STATIC_ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
STATIC_ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF); STATIC_ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF);
uint32_t float_sign(float val); // Floating point representation.
uint32_t float_exp(float val); static inline uint32_t float_to_rawbits(float value) {
uint32_t float_mantissa(float val); uint32_t bits = 0;
uint32_t double_sign(double val); memcpy(&bits, &value, 4);
uint32_t double_exp(double val); return bits;
uint64_t double_mantissa(double val); }
static inline uint64_t double_to_rawbits(double value) {
uint64_t bits = 0;
memcpy(&bits, &value, 8);
return bits;
}
static inline float rawbits_to_float(uint32_t bits) {
float value = 0.0;
memcpy(&value, &bits, 4);
return value;
}
float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa);
double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa);
// An fpclassify() function for 16-bit half-precision floats. static inline double rawbits_to_double(uint64_t bits) {
int float16classify(float16 value); double value = 0.0;
memcpy(&value, &bits, 8);
return value;
}
// Bit counting. // Bit counting.
int CountLeadingZeros(uint64_t value, int width); int CountLeadingZeros(uint64_t value, int width);
int CountLeadingSignBits(int64_t value, int width); int CountLeadingSignBits(int64_t value, int width);
int CountTrailingZeros(uint64_t value, int width); int CountTrailingZeros(uint64_t value, int width);
int CountSetBits(uint64_t value, int width); int CountSetBits(uint64_t value, int width);
int LowestSetBitPosition(uint64_t value);
int HighestSetBitPosition(uint64_t value);
uint64_t LargestPowerOf2Divisor(uint64_t value); uint64_t LargestPowerOf2Divisor(uint64_t value);
int MaskToBit(uint64_t mask); int MaskToBit(uint64_t mask);
...@@ -73,7 +86,7 @@ T ReverseBytes(T value, int block_bytes_log2) { ...@@ -73,7 +86,7 @@ T ReverseBytes(T value, int block_bytes_log2) {
// NaN tests. // NaN tests.
inline bool IsSignallingNaN(double num) { inline bool IsSignallingNaN(double num) {
uint64_t raw = bit_cast<uint64_t>(num); uint64_t raw = double_to_rawbits(num);
if (std::isnan(num) && ((raw & kDQuietNanMask) == 0)) { if (std::isnan(num) && ((raw & kDQuietNanMask) == 0)) {
return true; return true;
} }
...@@ -82,17 +95,13 @@ inline bool IsSignallingNaN(double num) { ...@@ -82,17 +95,13 @@ inline bool IsSignallingNaN(double num) {
inline bool IsSignallingNaN(float num) { inline bool IsSignallingNaN(float num) {
uint32_t raw = bit_cast<uint32_t>(num); uint32_t raw = float_to_rawbits(num);
if (std::isnan(num) && ((raw & kSQuietNanMask) == 0)) { if (std::isnan(num) && ((raw & kSQuietNanMask) == 0)) {
return true; return true;
} }
return false; return false;
} }
inline bool IsSignallingNaN(float16 num) {
const uint16_t kFP16QuietNaNMask = 0x0200;
return (float16classify(num) == FP_NAN) && ((num & kFP16QuietNaNMask) == 0);
}
template <typename T> template <typename T>
inline bool IsQuietNaN(T num) { inline bool IsQuietNaN(T num) {
...@@ -103,14 +112,13 @@ inline bool IsQuietNaN(T num) { ...@@ -103,14 +112,13 @@ inline bool IsQuietNaN(T num) {
// Convert the NaN in 'num' to a quiet NaN. // Convert the NaN in 'num' to a quiet NaN.
inline double ToQuietNaN(double num) { inline double ToQuietNaN(double num) {
DCHECK(std::isnan(num)); DCHECK(std::isnan(num));
return bit_cast<double>(bit_cast<uint64_t>(num) | kDQuietNanMask); return rawbits_to_double(double_to_rawbits(num) | kDQuietNanMask);
} }
inline float ToQuietNaN(float num) { inline float ToQuietNaN(float num) {
DCHECK(std::isnan(num)); DCHECK(std::isnan(num));
return bit_cast<float>(bit_cast<uint32_t>(num) | return rawbits_to_float(float_to_rawbits(num) | kSQuietNanMask);
static_cast<uint32_t>(kSQuietNanMask));
} }
......
This diff is collapsed.
...@@ -44,12 +44,14 @@ void DelayedMasm::Mov(const Register& rd, ...@@ -44,12 +44,14 @@ void DelayedMasm::Mov(const Register& rd,
__ Mov(rd, operand, discard_mode); __ Mov(rd, operand, discard_mode);
} }
void DelayedMasm::Fmov(VRegister fd, VRegister fn) {
void DelayedMasm::Fmov(FPRegister fd, FPRegister fn) {
EmitPending(); EmitPending();
__ Fmov(fd, fn); __ Fmov(fd, fn);
} }
void DelayedMasm::Fmov(VRegister fd, double imm) {
void DelayedMasm::Fmov(FPRegister fd, double imm) {
EmitPending(); EmitPending();
__ Fmov(fd, imm); __ Fmov(fd, imm);
} }
......
This diff is collapsed.
...@@ -1468,7 +1468,6 @@ ...@@ -1468,7 +1468,6 @@
'arm64/macro-assembler-arm64-inl.h', 'arm64/macro-assembler-arm64-inl.h',
'arm64/simulator-arm64.cc', 'arm64/simulator-arm64.cc',
'arm64/simulator-arm64.h', 'arm64/simulator-arm64.h',
'arm64/simulator-logic-arm64.cc',
'arm64/utils-arm64.cc', 'arm64/utils-arm64.cc',
'arm64/utils-arm64.h', 'arm64/utils-arm64.h',
'arm64/eh-frame-arm64.cc', 'arm64/eh-frame-arm64.cc',
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment