Commit 8faf3d6f authored by martyn.capewell's avatar martyn.capewell Committed by Commit bot

Reland "ARM64: Add NEON support"

This reverts commit cc047635.
The CL was reverted due to a missing DEPS mirror.

Original issue's description:
> ARM64: Add NEON support
>
> Add assembler, disassembler and simulator support for NEON in the ARM64 backend.
>
> BUG=
>
> Review-Url: https://codereview.chromium.org/2622643005
> Cr-Commit-Position: refs/heads/master@{#44306}

BUG=

Review-Url: https://codereview.chromium.org/2812573003
Cr-Commit-Position: refs/heads/master@{#44652}
parent 6e0f52e1
......@@ -46,6 +46,7 @@
/src/inspector/build/closure-compiler
/src/inspector/build/closure-compiler.tar.gz
/test/benchmarks/data
/test/cctest/traces-arm64
/test/fuzzer/wasm
/test/fuzzer/wasm.tar.gz
/test/fuzzer/wasm_asmjs
......
......@@ -2133,6 +2133,7 @@ v8_source_set("v8_base") {
"src/arm64/macro-assembler-arm64.h",
"src/arm64/simulator-arm64.cc",
"src/arm64/simulator-arm64.h",
"src/arm64/simulator-logic-arm64.cc",
"src/arm64/utils-arm64.cc",
"src/arm64/utils-arm64.h",
"src/compiler/arm64/code-generator-arm64.cc",
......
......@@ -37,6 +37,8 @@ deps = {
Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "230f9fc5688ce76bfaa99aba5f680a159eaac9e2",
"v8/test/test262/harness":
Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "0f2acdd882c84cff43b9d60df7574a1901e2cdcd",
"v8/test/cctest/traces-arm64":
Var("chromium_url") + "/external/git.linaro.org/arm/vixl-simulator-traces.git" + "@" + "6168e7e1eec52c9cb0a62f87f94df0582dc48aa8",
"v8/tools/clang":
Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "49df471350a60efaec6951f321dd65475496ba17",
"v8/test/wasm-js":
......
......@@ -57,6 +57,15 @@ inline int CPURegister::SizeInBytes() const {
return reg_size / 8;
}
inline bool CPURegister::Is8Bits() const {
DCHECK(IsValid());
return reg_size == 8;
}
inline bool CPURegister::Is16Bits() const {
DCHECK(IsValid());
return reg_size == 16;
}
inline bool CPURegister::Is32Bits() const {
DCHECK(IsValid());
......@@ -69,9 +78,13 @@ inline bool CPURegister::Is64Bits() const {
return reg_size == 64;
}
inline bool CPURegister::Is128Bits() const {
DCHECK(IsValid());
return reg_size == 128;
}
inline bool CPURegister::IsValid() const {
if (IsValidRegister() || IsValidFPRegister()) {
if (IsValidRegister() || IsValidVRegister()) {
DCHECK(!IsNone());
return true;
} else {
......@@ -87,14 +100,14 @@ inline bool CPURegister::IsValidRegister() const {
((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode));
}
inline bool CPURegister::IsValidFPRegister() const {
return IsFPRegister() &&
((reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits)) &&
(reg_code < kNumberOfFPRegisters);
inline bool CPURegister::IsValidVRegister() const {
return IsVRegister() &&
((reg_size == kBRegSizeInBits) || (reg_size == kHRegSizeInBits) ||
(reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits) ||
(reg_size == kQRegSizeInBits)) &&
(reg_code < kNumberOfVRegisters);
}
inline bool CPURegister::IsNone() const {
// kNoRegister types should always have size 0 and code 0.
DCHECK((reg_type != kNoRegister) || (reg_code == 0));
......@@ -120,11 +133,7 @@ inline bool CPURegister::IsRegister() const {
return reg_type == kRegister;
}
inline bool CPURegister::IsFPRegister() const {
return reg_type == kFPRegister;
}
inline bool CPURegister::IsVRegister() const { return reg_type == kVRegister; }
inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
return (reg_size == other.reg_size) && (reg_type == other.reg_type);
......@@ -200,7 +209,7 @@ inline Register Register::XRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
return csp;
} else {
DCHECK(code < kNumberOfRegisters);
DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
return Register::Create(code, kXRegSizeInBits);
}
}
......@@ -210,23 +219,40 @@ inline Register Register::WRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
return wcsp;
} else {
DCHECK(code < kNumberOfRegisters);
DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
return Register::Create(code, kWRegSizeInBits);
}
}
inline VRegister VRegister::BRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kBRegSizeInBits);
}
inline VRegister VRegister::HRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kHRegSizeInBits);
}
inline FPRegister FPRegister::SRegFromCode(unsigned code) {
DCHECK(code < kNumberOfFPRegisters);
return FPRegister::Create(code, kSRegSizeInBits);
inline VRegister VRegister::SRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kSRegSizeInBits);
}
inline VRegister VRegister::DRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kDRegSizeInBits);
}
inline FPRegister FPRegister::DRegFromCode(unsigned code) {
DCHECK(code < kNumberOfFPRegisters);
return FPRegister::Create(code, kDRegSizeInBits);
inline VRegister VRegister::QRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kQRegSizeInBits);
}
inline VRegister VRegister::VRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kVRegSizeInBits);
}
inline Register CPURegister::W() const {
DCHECK(IsValidRegister());
......@@ -239,16 +265,34 @@ inline Register CPURegister::X() const {
return Register::XRegFromCode(reg_code);
}
inline VRegister CPURegister::V() const {
DCHECK(IsValidVRegister());
return VRegister::VRegFromCode(reg_code);
}
inline VRegister CPURegister::B() const {
DCHECK(IsValidVRegister());
return VRegister::BRegFromCode(reg_code);
}
inline VRegister CPURegister::H() const {
DCHECK(IsValidVRegister());
return VRegister::HRegFromCode(reg_code);
}
inline FPRegister CPURegister::S() const {
DCHECK(IsValidFPRegister());
return FPRegister::SRegFromCode(reg_code);
inline VRegister CPURegister::S() const {
DCHECK(IsValidVRegister());
return VRegister::SRegFromCode(reg_code);
}
inline VRegister CPURegister::D() const {
DCHECK(IsValidVRegister());
return VRegister::DRegFromCode(reg_code);
}
inline FPRegister CPURegister::D() const {
DCHECK(IsValidFPRegister());
return FPRegister::DRegFromCode(reg_code);
inline VRegister CPURegister::Q() const {
DCHECK(IsValidVRegister());
return VRegister::QRegFromCode(reg_code);
}
......@@ -491,7 +535,7 @@ MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
regoffset_ = NoReg;
} else if (offset.IsShiftedRegister()) {
DCHECK(addrmode == Offset);
DCHECK((addrmode == Offset) || (addrmode == PostIndex));
regoffset_ = offset.reg();
shift_ = offset.shift();
......@@ -877,21 +921,20 @@ LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDR_x : LDR_w;
} else {
DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? LDR_d : LDR_s;
}
}
LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
const CPURegister& rt2) {
DCHECK(AreSameSizeAndType(rt, rt2));
USE(rt2);
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDP_x : LDP_w;
} else {
DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? LDP_d : LDP_s;
DCHECK(rt.IsVRegister());
switch (rt.SizeInBits()) {
case kBRegSizeInBits:
return LDR_b;
case kHRegSizeInBits:
return LDR_h;
case kSRegSizeInBits:
return LDR_s;
case kDRegSizeInBits:
return LDR_d;
default:
DCHECK(rt.IsQ());
return LDR_q;
}
}
}
......@@ -901,11 +944,29 @@ LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
if (rt.IsRegister()) {
return rt.Is64Bits() ? STR_x : STR_w;
} else {
DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? STR_d : STR_s;
DCHECK(rt.IsVRegister());
switch (rt.SizeInBits()) {
case kBRegSizeInBits:
return STR_b;
case kHRegSizeInBits:
return STR_h;
case kSRegSizeInBits:
return STR_s;
case kDRegSizeInBits:
return STR_d;
default:
DCHECK(rt.IsQ());
return STR_q;
}
}
}
LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
const CPURegister& rt2) {
DCHECK_EQ(STP_w | LoadStorePairLBit, LDP_w);
return static_cast<LoadStorePairOp>(StorePairOpFor(rt, rt2) |
LoadStorePairLBit);
}
LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
const CPURegister& rt2) {
......@@ -914,8 +975,16 @@ LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
if (rt.IsRegister()) {
return rt.Is64Bits() ? STP_x : STP_w;
} else {
DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? STP_d : STP_s;
DCHECK(rt.IsVRegister());
switch (rt.SizeInBits()) {
case kSRegSizeInBits:
return STP_s;
case kDRegSizeInBits:
return STP_d;
default:
DCHECK(rt.IsQ());
return STP_q;
}
}
}
......@@ -924,7 +993,7 @@ LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
} else {
DCHECK(rt.IsFPRegister());
DCHECK(rt.IsVRegister());
return rt.Is64Bits() ? LDR_d_lit : LDR_s_lit;
}
}
......@@ -1108,9 +1177,8 @@ Instr Assembler::ImmLS(int imm9) {
return truncate_to_int9(imm9) << ImmLS_offset;
}
Instr Assembler::ImmLSPair(int imm7, LSDataSize size) {
DCHECK(((imm7 >> size) << size) == imm7);
Instr Assembler::ImmLSPair(int imm7, unsigned size) {
DCHECK_EQ((imm7 >> size) << size, imm7);
int scaled_imm7 = imm7 >> size;
DCHECK(is_int7(scaled_imm7));
return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
......@@ -1152,10 +1220,17 @@ Instr Assembler::ImmBarrierType(int imm2) {
return imm2 << ImmBarrierType_offset;
}
LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
DCHECK((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
return static_cast<LSDataSize>(op >> SizeLS_offset);
unsigned Assembler::CalcLSDataSize(LoadStoreOp op) {
DCHECK((LSSize_offset + LSSize_width) == (kInstructionSize * 8));
unsigned size = static_cast<Instr>(op >> LSSize_offset);
if ((op & LSVector_mask) != 0) {
// Vector register memory operations encode the access size in the "size"
// and "opc" fields.
if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
size = kQRegSizeLog2;
}
}
return size;
}
......@@ -1170,11 +1245,7 @@ Instr Assembler::ShiftMoveWide(int shift) {
return shift << ShiftMoveWide_offset;
}
Instr Assembler::FPType(FPRegister fd) {
return fd.Is64Bits() ? FP64 : FP32;
}
Instr Assembler::FPType(VRegister fd) { return fd.Is64Bits() ? FP64 : FP32; }
Instr Assembler::FPScale(unsigned scale) {
DCHECK(is_uint6(scale));
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
......@@ -147,8 +147,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// See call site for description.
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
Register right, Register scratch,
FPRegister double_scratch,
Label* slow, Condition cond) {
VRegister double_scratch, Label* slow,
Condition cond) {
DCHECK(!AreAliased(left, right, scratch));
Label not_identical, return_equal, heap_number;
Register result = x0;
......@@ -292,12 +292,9 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// See call site for description.
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register left,
Register right,
FPRegister left_d,
FPRegister right_d,
Label* slow,
static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register left,
Register right, VRegister left_d,
VRegister right_d, Label* slow,
bool strict) {
DCHECK(!AreAliased(left_d, right_d));
DCHECK((left.is(x0) && right.is(x1)) ||
......@@ -476,8 +473,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// In case 3, we have found out that we were dealing with a number-number
// comparison. The double values of the numbers have been loaded, right into
// rhs_d, left into lhs_d.
FPRegister rhs_d = d0;
FPRegister lhs_d = d1;
VRegister rhs_d = d0;
VRegister lhs_d = d1;
EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict());
__ Bind(&both_loaded_as_doubles);
......@@ -613,7 +610,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
CPURegList saved_regs = kCallerSaved;
CPURegList saved_fp_regs = kCallerSavedFP;
CPURegList saved_fp_regs = kCallerSavedV;
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
......@@ -686,12 +683,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Register exponent_integer = MathPowIntegerDescriptor::exponent();
DCHECK(exponent_integer.is(x12));
Register saved_lr = x19;
FPRegister result_double = d0;
FPRegister base_double = d0;
FPRegister exponent_double = d1;
FPRegister base_double_copy = d2;
FPRegister scratch1_double = d6;
FPRegister scratch0_double = d7;
VRegister result_double = d0;
VRegister base_double = d0;
VRegister exponent_double = d1;
VRegister base_double_copy = d2;
VRegister scratch1_double = d6;
VRegister scratch0_double = d7;
// A fast-path for integer exponents.
Label exponent_is_smi, exponent_is_integer;
......@@ -1649,8 +1646,8 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
Register result = x0;
Register rhs = x0;
Register lhs = x1;
FPRegister rhs_d = d0;
FPRegister lhs_d = d1;
VRegister rhs_d = d0;
VRegister lhs_d = d1;
if (left() == CompareICState::SMI) {
__ JumpIfNotSmi(lhs, &miss);
......@@ -2109,7 +2106,7 @@ RecordWriteStub::RegisterAllocation::RegisterAllocation(Register object,
address_(address),
scratch0_(scratch),
saved_regs_(kCallerSaved),
saved_fp_regs_(kCallerSavedFP) {
saved_fp_regs_(kCallerSavedV) {
DCHECK(!AreAliased(scratch, object, address));
// The SaveCallerSaveRegisters method needs to save caller-saved
......
This diff is collapsed.
......@@ -213,6 +213,11 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
(instr->Bits(27, 24) == 0xC) ||
(instr->Bits(27, 24) == 0xD) );
if ((instr->Bit(28) == 0) && (instr->Bit(29) == 0) && (instr->Bit(26) == 1)) {
DecodeNEONLoadStore(instr);
return;
}
if (instr->Bit(24) == 0) {
if (instr->Bit(28) == 0) {
if (instr->Bit(29) == 0) {
......@@ -226,8 +231,6 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
} else {
V::VisitLoadStoreAcquireRelease(instr);
}
} else {
DecodeAdvSIMDLoadStore(instr);
}
} else {
if ((instr->Bits(31, 30) == 0x3) ||
......@@ -513,16 +516,14 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
(instr->Bits(27, 24) == 0xF) );
if (instr->Bit(28) == 0) {
DecodeAdvSIMDDataProcessing(instr);
DecodeNEONVectorDataProcessing(instr);
} else {
if (instr->Bit(29) == 1) {
if (instr->Bits(31, 30) == 0x3) {
V::VisitUnallocated(instr);
} else if (instr->Bits(31, 30) == 0x1) {
DecodeNEONScalarDataProcessing(instr);
} else {
if (instr->Bits(31, 30) == 0x3) {
V::VisitUnallocated(instr);
} else if (instr->Bits(31, 30) == 0x1) {
DecodeAdvSIMDDataProcessing(instr);
} else {
if (instr->Bit(29) == 0) {
if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) {
if ((instr->Bit(23) == 1) ||
......@@ -629,25 +630,190 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
V::VisitFPDataProcessing3Source(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
}
}
}
template<typename V>
void Decoder<V>::DecodeAdvSIMDLoadStore(Instruction* instr) {
// TODO(all): Implement Advanced SIMD load/store instruction decode.
template <typename V>
void Decoder<V>::DecodeNEONLoadStore(Instruction* instr) {
DCHECK(instr->Bits(29, 25) == 0x6);
V::VisitUnimplemented(instr);
if (instr->Bit(31) == 0) {
if ((instr->Bit(24) == 0) && (instr->Bit(21) == 1)) {
V::VisitUnallocated(instr);
return;
}
if (instr->Bit(23) == 0) {
if (instr->Bits(20, 16) == 0) {
if (instr->Bit(24) == 0) {
V::VisitNEONLoadStoreMultiStruct(instr);
} else {
V::VisitNEONLoadStoreSingleStruct(instr);
}
} else {
V::VisitUnallocated(instr);
}
} else {
if (instr->Bit(24) == 0) {
V::VisitNEONLoadStoreMultiStructPostIndex(instr);
} else {
V::VisitNEONLoadStoreSingleStructPostIndex(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
}
template <typename V>
void Decoder<V>::DecodeNEONVectorDataProcessing(Instruction* instr) {
DCHECK(instr->Bits(28, 25) == 0x7);
if (instr->Bit(31) == 0) {
if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) {
if (instr->Bit(15) == 0) {
if (instr->Bit(10) == 0) {
if (instr->Bit(29) == 0) {
if (instr->Bit(11) == 0) {
V::VisitNEONTable(instr);
} else {
V::VisitNEONPerm(instr);
}
} else {
V::VisitNEONExtract(instr);
}
} else {
if (instr->Bits(23, 22) == 0) {
V::VisitNEONCopy(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
} else {
if (instr->Bit(10) == 0) {
if (instr->Bit(11) == 0) {
V::VisitNEON3Different(instr);
} else {
if (instr->Bits(18, 17) == 0) {
if (instr->Bit(20) == 0) {
if (instr->Bit(19) == 0) {
V::VisitNEON2RegMisc(instr);
} else {
if (instr->Bits(30, 29) == 0x2) {
V::VisitUnallocated(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
if (instr->Bit(19) == 0) {
V::VisitNEONAcrossLanes(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitNEON3Same(instr);
}
}
} else {
if (instr->Bit(10) == 0) {
V::VisitNEONByIndexedElement(instr);
} else {
if (instr->Bit(23) == 0) {
if (instr->Bits(22, 19) == 0) {
V::VisitNEONModifiedImmediate(instr);
} else {
V::VisitNEONShiftImmediate(instr);
}
} else {
V::VisitUnallocated(instr);
}
}
}
} else {
V::VisitUnallocated(instr);
}
}
template<typename V>
void Decoder<V>::DecodeAdvSIMDDataProcessing(Instruction* instr) {
// TODO(all): Implement Advanced SIMD data processing instruction decode.
DCHECK(instr->Bits(27, 25) == 0x7);
V::VisitUnimplemented(instr);
template <typename V>
void Decoder<V>::DecodeNEONScalarDataProcessing(Instruction* instr) {
DCHECK(instr->Bits(28, 25) == 0xF);
if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) {
if (instr->Bit(15) == 0) {
if (instr->Bit(10) == 0) {
if (instr->Bit(29) == 0) {
if (instr->Bit(11) == 0) {
V::VisitUnallocated(instr);
} else {
V::VisitUnallocated(instr);
}
} else {
V::VisitUnallocated(instr);
}
} else {
if (instr->Bits(23, 22) == 0) {
V::VisitNEONScalarCopy(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
} else {
if (instr->Bit(10) == 0) {
if (instr->Bit(11) == 0) {
V::VisitNEONScalar3Diff(instr);
} else {
if (instr->Bits(18, 17) == 0) {
if (instr->Bit(20) == 0) {
if (instr->Bit(19) == 0) {
V::VisitNEONScalar2RegMisc(instr);
} else {
if (instr->Bit(29) == 0) {
V::VisitUnallocated(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
if (instr->Bit(19) == 0) {
V::VisitNEONScalarPairwise(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitNEONScalar3Same(instr);
}
}
} else {
if (instr->Bit(10) == 0) {
V::VisitNEONScalarByIndexedElement(instr);
} else {
if (instr->Bit(23) == 0) {
V::VisitNEONScalarShiftImmediate(instr);
} else {
V::VisitUnallocated(instr);
}
}
}
}
......
......@@ -16,50 +16,72 @@ namespace internal {
// List macro containing all visitors needed by the decoder class.
#define VISITOR_LIST(V) \
V(PCRelAddressing) \
V(AddSubImmediate) \
V(LogicalImmediate) \
V(MoveWideImmediate) \
V(Bitfield) \
V(Extract) \
V(UnconditionalBranch) \
V(UnconditionalBranchToRegister) \
V(CompareBranch) \
V(TestBranch) \
V(ConditionalBranch) \
V(System) \
V(Exception) \
V(LoadStorePairPostIndex) \
V(LoadStorePairOffset) \
V(LoadStorePairPreIndex) \
V(LoadLiteral) \
V(LoadStoreUnscaledOffset) \
V(LoadStorePostIndex) \
V(LoadStorePreIndex) \
V(LoadStoreRegisterOffset) \
V(LoadStoreUnsignedOffset) \
V(LoadStoreAcquireRelease) \
V(LogicalShifted) \
V(AddSubShifted) \
V(AddSubExtended) \
V(AddSubWithCarry) \
V(ConditionalCompareRegister) \
V(ConditionalCompareImmediate) \
V(ConditionalSelect) \
V(DataProcessing1Source) \
V(DataProcessing2Source) \
V(DataProcessing3Source) \
V(FPCompare) \
V(FPConditionalCompare) \
V(FPConditionalSelect) \
V(FPImmediate) \
V(FPDataProcessing1Source) \
V(FPDataProcessing2Source) \
V(FPDataProcessing3Source) \
V(FPIntegerConvert) \
V(FPFixedPointConvert) \
V(Unallocated) \
#define VISITOR_LIST(V) \
V(PCRelAddressing) \
V(AddSubImmediate) \
V(LogicalImmediate) \
V(MoveWideImmediate) \
V(Bitfield) \
V(Extract) \
V(UnconditionalBranch) \
V(UnconditionalBranchToRegister) \
V(CompareBranch) \
V(TestBranch) \
V(ConditionalBranch) \
V(System) \
V(Exception) \
V(LoadStorePairPostIndex) \
V(LoadStorePairOffset) \
V(LoadStorePairPreIndex) \
V(LoadLiteral) \
V(LoadStoreUnscaledOffset) \
V(LoadStorePostIndex) \
V(LoadStorePreIndex) \
V(LoadStoreRegisterOffset) \
V(LoadStoreUnsignedOffset) \
V(LoadStoreAcquireRelease) \
V(LogicalShifted) \
V(AddSubShifted) \
V(AddSubExtended) \
V(AddSubWithCarry) \
V(ConditionalCompareRegister) \
V(ConditionalCompareImmediate) \
V(ConditionalSelect) \
V(DataProcessing1Source) \
V(DataProcessing2Source) \
V(DataProcessing3Source) \
V(FPCompare) \
V(FPConditionalCompare) \
V(FPConditionalSelect) \
V(FPImmediate) \
V(FPDataProcessing1Source) \
V(FPDataProcessing2Source) \
V(FPDataProcessing3Source) \
V(FPIntegerConvert) \
V(FPFixedPointConvert) \
V(NEON2RegMisc) \
V(NEON3Different) \
V(NEON3Same) \
V(NEONAcrossLanes) \
V(NEONByIndexedElement) \
V(NEONCopy) \
V(NEONExtract) \
V(NEONLoadStoreMultiStruct) \
V(NEONLoadStoreMultiStructPostIndex) \
V(NEONLoadStoreSingleStruct) \
V(NEONLoadStoreSingleStructPostIndex) \
V(NEONModifiedImmediate) \
V(NEONScalar2RegMisc) \
V(NEONScalar3Diff) \
V(NEONScalar3Same) \
V(NEONScalarByIndexedElement) \
V(NEONScalarCopy) \
V(NEONScalarPairwise) \
V(NEONScalarShiftImmediate) \
V(NEONShiftImmediate) \
V(NEONTable) \
V(NEONPerm) \
V(Unallocated) \
V(Unimplemented)
// The Visitor interface. Disassembler and simulator (and other tools)
......@@ -109,6 +131,8 @@ class DispatchingDecoderVisitor : public DecoderVisitor {
// stored by the decoder.
void RemoveVisitor(DecoderVisitor* visitor);
void VisitNEONShiftImmediate(const Instruction* instr);
#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
......@@ -173,12 +197,17 @@ class Decoder : public V {
// Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
// and call the corresponding visitors.
// On entry, instruction bits 29:25 = 0x6.
void DecodeAdvSIMDLoadStore(Instruction* instr);
void DecodeNEONLoadStore(Instruction* instr);
// Decode the Advanced SIMD (NEON) data processing part of the instruction
// tree, and call the corresponding visitors.
// On entry, instruction bits 27:25 = 0x7.
void DecodeAdvSIMDDataProcessing(Instruction* instr);
void DecodeNEONVectorDataProcessing(Instruction* instr);
// Decode the Advanced SIMD (NEON) scalar data processing part of the
// instruction tree, and call the corresponding visitors.
// On entry, instruction bits 28:25 = 0xF.
void DecodeNEONScalarDataProcessing(Instruction* instr);
};
......
......@@ -99,13 +99,13 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all allocatable double registers.
CPURegList saved_double_registers(
CPURegister::kFPRegister, kDRegSizeInBits,
CPURegister::kVRegister, kDRegSizeInBits,
RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask());
__ PushCPURegList(saved_double_registers);
// Save all allocatable float registers.
CPURegList saved_float_registers(
CPURegister::kFPRegister, kSRegSizeInBits,
CPURegister::kVRegister, kSRegSizeInBits,
RegisterConfiguration::Crankshaft()->allocatable_float_codes_mask());
__ PushCPURegList(saved_float_registers);
......
This diff is collapsed.
......@@ -5,6 +5,7 @@
#ifndef V8_ARM64_DISASM_ARM64_H
#define V8_ARM64_DISASM_ARM64_H
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/decoder-arm64.h"
#include "src/arm64/instructions-arm64.h"
#include "src/globals.h"
......@@ -29,6 +30,13 @@ class DisassemblingDecoder : public DecoderVisitor {
protected:
virtual void ProcessOutput(Instruction* instr);
// Default output functions. The functions below implement a default way of
// printing elements in the disassembly. A sub-class can override these to
// customize the disassembly output.
// Prints the name of a register.
virtual void AppendRegisterNameToOutput(const CPURegister& reg);
void Format(Instruction* instr, const char* mnemonic, const char* format);
void Substitute(Instruction* instr, const char* string);
int SubstituteField(Instruction* instr, const char* format);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -103,13 +103,13 @@ class Arm64OperandGenerator final : public OperandGenerator {
case kArithmeticImm:
return Assembler::IsImmAddSub(value);
case kLoadStoreImm8:
return IsLoadStoreImmediate(value, LSByte);
return IsLoadStoreImmediate(value, 0);
case kLoadStoreImm16:
return IsLoadStoreImmediate(value, LSHalfword);
return IsLoadStoreImmediate(value, 1);
case kLoadStoreImm32:
return IsLoadStoreImmediate(value, LSWord);
return IsLoadStoreImmediate(value, 2);
case kLoadStoreImm64:
return IsLoadStoreImmediate(value, LSDoubleWord);
return IsLoadStoreImmediate(value, 3);
case kNoImmediate:
return false;
case kShift32Imm: // Fall through.
......@@ -130,7 +130,7 @@ class Arm64OperandGenerator final : public OperandGenerator {
}
private:
bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
bool IsLoadStoreImmediate(int64_t value, unsigned size) {
return Assembler::IsImmLSScaled(value, size) ||
Assembler::IsImmLSUnscaled(value);
}
......
......@@ -44,14 +44,12 @@ void DelayedMasm::Mov(const Register& rd,
__ Mov(rd, operand, discard_mode);
}
void DelayedMasm::Fmov(FPRegister fd, FPRegister fn) {
void DelayedMasm::Fmov(VRegister fd, VRegister fn) {
EmitPending();
__ Fmov(fd, fn);
}
void DelayedMasm::Fmov(FPRegister fd, double imm) {
void DelayedMasm::Fmov(VRegister fd, double imm) {
EmitPending();
__ Fmov(fd, imm);
}
......
......@@ -61,8 +61,8 @@ class DelayedMasm BASE_EMBEDDED {
inline void Mov(const Register& rd,
const Operand& operand,
DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
inline void Fmov(FPRegister fd, FPRegister fn);
inline void Fmov(FPRegister fd, double imm);
inline void Fmov(VRegister fd, VRegister fn);
inline void Fmov(VRegister fd, double imm);
inline void LoadObject(Register result, Handle<Object> object);
// Instructions which try to merge which the pending instructions.
void StackSlotMove(LOperand* src, LOperand* dst);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment