Commit d208cdd7 authored by binji's avatar binji Committed by Commit bot

Add {lda,stl}x?r{,b,h} instructions to ARM64 assembler/disassembler

They are not currently implemented by the ARM64 simulator.

R=jarin@chromium.org, bmeurer@chromium.org

Review-Url: https://codereview.chromium.org/1990073002
Cr-Commit-Position: refs/heads/master@{#36385}
parent 31ac67ee
...@@ -1716,6 +1716,83 @@ void Assembler::ldr(const CPURegister& rt, const Immediate& imm) { ...@@ -1716,6 +1716,83 @@ void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
ldr_pcrel(rt, 0); ldr_pcrel(rt, 0);
} }
void Assembler::ldar(const Register& rt, const Register& rn) {
DCHECK(rn.Is64Bits());
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAR_w : LDAR_x;
Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::ldaxr(const Register& rt, const Register& rn) {
DCHECK(rn.Is64Bits());
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAXR_w : LDAXR_x;
Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::stlr(const Register& rt, const Register& rn) {
DCHECK(rn.Is64Bits());
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLR_w : STLR_x;
Emit(op | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::stlxr(const Register& rs, const Register& rt,
const Register& rn) {
DCHECK(rs.Is32Bits());
DCHECK(rn.Is64Bits());
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLXR_w : STLXR_x;
Emit(op | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::ldarb(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(LDAR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::ldaxrb(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(LDAXR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::stlrb(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(STLR_b | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::stlxrb(const Register& rs, const Register& rt,
const Register& rn) {
DCHECK(rs.Is32Bits());
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(STLXR_b | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::ldarh(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(LDAR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::ldaxrh(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(LDAXR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::stlrh(const Register& rt, const Register& rn) {
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(STLR_h | Rs(x31) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::stlxrh(const Register& rs, const Register& rt,
const Register& rn) {
DCHECK(rs.Is32Bits());
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
Emit(STLXR_h | Rs(rs) | Rt2(x31) | Rn(rn) | Rt(rt));
}
void Assembler::mov(const Register& rd, const Register& rm) { void Assembler::mov(const Register& rd, const Register& rm) {
// Moves involving the stack pointer are encoded as add immediate with // Moves involving the stack pointer are encoded as add immediate with
......
...@@ -1401,6 +1401,42 @@ class Assembler : public AssemblerBase { ...@@ -1401,6 +1401,42 @@ class Assembler : public AssemblerBase {
// Load literal to register. // Load literal to register.
void ldr(const CPURegister& rt, const Immediate& imm); void ldr(const CPURegister& rt, const Immediate& imm);
// Load-acquire word.
void ldar(const Register& rt, const Register& rn);
// Load-acquire exclusive word.
void ldaxr(const Register& rt, const Register& rn);
// Store-release word.
void stlr(const Register& rt, const Register& rn);
// Store-release exclusive word.
void stlxr(const Register& rs, const Register& rt, const Register& rn);
// Load-acquire byte.
void ldarb(const Register& rt, const Register& rn);
// Load-acquire exclusive byte.
void ldaxrb(const Register& rt, const Register& rn);
// Store-release byte.
void stlrb(const Register& rt, const Register& rn);
// Store-release exclusive byte.
void stlxrb(const Register& rs, const Register& rt, const Register& rn);
// Load-acquire half-word.
void ldarh(const Register& rt, const Register& rn);
// Load-acquire exclusive half-word.
void ldaxrh(const Register& rt, const Register& rn);
// Store-release half-word.
void stlrh(const Register& rt, const Register& rn);
// Store-release exclusive half-word.
void stlxrh(const Register& rs, const Register& rt, const Register& rn);
// Move instructions. The default shift of -1 indicates that the move // Move instructions. The default shift of -1 indicates that the move
// instruction will calculate an appropriate 16-bit immediate and left shift // instruction will calculate an appropriate 16-bit immediate and left shift
// that is equal to the 64-bit immediate argument. If an explicit left shift // that is equal to the 64-bit immediate argument. If an explicit left shift
...@@ -1695,6 +1731,11 @@ class Assembler : public AssemblerBase { ...@@ -1695,6 +1731,11 @@ class Assembler : public AssemblerBase {
return rt2.code() << Rt2_offset; return rt2.code() << Rt2_offset;
} }
static Instr Rs(CPURegister rs) {
DCHECK(rs.code() != kSPRegInternalCode);
return rs.code() << Rs_offset;
}
// These encoding functions allow the stack pointer to be encoded, and // These encoding functions allow the stack pointer to be encoded, and
// disallow the zero register. // disallow the zero register.
static Instr RdSP(Register rd) { static Instr RdSP(Register rd) {
......
This diff is collapsed.
...@@ -217,8 +217,15 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) { ...@@ -217,8 +217,15 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
if (instr->Bit(28) == 0) { if (instr->Bit(28) == 0) {
if (instr->Bit(29) == 0) { if (instr->Bit(29) == 0) {
if (instr->Bit(26) == 0) { if (instr->Bit(26) == 0) {
// TODO(all): VisitLoadStoreExclusive. if (instr->Mask(0xA08000) == 0x800000 ||
V::VisitUnimplemented(instr); instr->Mask(0xA00000) == 0xA00000) {
V::VisitUnallocated(instr);
} else if (instr->Mask(0x808000) == 0) {
// Load/Store exclusive without acquire/release are unimplemented.
V::VisitUnimplemented(instr);
} else {
V::VisitLoadStoreAcquireRelease(instr);
}
} else { } else {
DecodeAdvSIMDLoadStore(instr); DecodeAdvSIMDLoadStore(instr);
} }
......
...@@ -16,49 +16,50 @@ namespace internal { ...@@ -16,49 +16,50 @@ namespace internal {
// List macro containing all visitors needed by the decoder class. // List macro containing all visitors needed by the decoder class.
#define VISITOR_LIST(V) \ #define VISITOR_LIST(V) \
V(PCRelAddressing) \ V(PCRelAddressing) \
V(AddSubImmediate) \ V(AddSubImmediate) \
V(LogicalImmediate) \ V(LogicalImmediate) \
V(MoveWideImmediate) \ V(MoveWideImmediate) \
V(Bitfield) \ V(Bitfield) \
V(Extract) \ V(Extract) \
V(UnconditionalBranch) \ V(UnconditionalBranch) \
V(UnconditionalBranchToRegister) \ V(UnconditionalBranchToRegister) \
V(CompareBranch) \ V(CompareBranch) \
V(TestBranch) \ V(TestBranch) \
V(ConditionalBranch) \ V(ConditionalBranch) \
V(System) \ V(System) \
V(Exception) \ V(Exception) \
V(LoadStorePairPostIndex) \ V(LoadStorePairPostIndex) \
V(LoadStorePairOffset) \ V(LoadStorePairOffset) \
V(LoadStorePairPreIndex) \ V(LoadStorePairPreIndex) \
V(LoadLiteral) \ V(LoadLiteral) \
V(LoadStoreUnscaledOffset) \ V(LoadStoreUnscaledOffset) \
V(LoadStorePostIndex) \ V(LoadStorePostIndex) \
V(LoadStorePreIndex) \ V(LoadStorePreIndex) \
V(LoadStoreRegisterOffset) \ V(LoadStoreRegisterOffset) \
V(LoadStoreUnsignedOffset) \ V(LoadStoreUnsignedOffset) \
V(LogicalShifted) \ V(LoadStoreAcquireRelease) \
V(AddSubShifted) \ V(LogicalShifted) \
V(AddSubExtended) \ V(AddSubShifted) \
V(AddSubWithCarry) \ V(AddSubExtended) \
V(ConditionalCompareRegister) \ V(AddSubWithCarry) \
V(ConditionalCompareImmediate) \ V(ConditionalCompareRegister) \
V(ConditionalSelect) \ V(ConditionalCompareImmediate) \
V(DataProcessing1Source) \ V(ConditionalSelect) \
V(DataProcessing2Source) \ V(DataProcessing1Source) \
V(DataProcessing3Source) \ V(DataProcessing2Source) \
V(FPCompare) \ V(DataProcessing3Source) \
V(FPConditionalCompare) \ V(FPCompare) \
V(FPConditionalSelect) \ V(FPConditionalCompare) \
V(FPImmediate) \ V(FPConditionalSelect) \
V(FPDataProcessing1Source) \ V(FPImmediate) \
V(FPDataProcessing2Source) \ V(FPDataProcessing1Source) \
V(FPDataProcessing3Source) \ V(FPDataProcessing2Source) \
V(FPIntegerConvert) \ V(FPDataProcessing3Source) \
V(FPFixedPointConvert) \ V(FPIntegerConvert) \
V(Unallocated) \ V(FPFixedPointConvert) \
V(Unallocated) \
V(Unimplemented) V(Unimplemented)
// The Visitor interface. Disassembler and simulator (and other tools) // The Visitor interface. Disassembler and simulator (and other tools)
......
...@@ -914,6 +914,34 @@ void DisassemblingDecoder::VisitLoadStorePairOffset(Instruction* instr) { ...@@ -914,6 +914,34 @@ void DisassemblingDecoder::VisitLoadStorePairOffset(Instruction* instr) {
Format(instr, mnemonic, form); Format(instr, mnemonic, form);
} }
void DisassemblingDecoder::VisitLoadStoreAcquireRelease(Instruction *instr) {
const char *mnemonic = "unimplemented";
const char *form = "'Wt, ['Xn]";
const char *form_x = "'Xt, ['Xn]";
const char *form_stlx = "'Ws, 'Wt, ['Xn]";
const char *form_stlx_x = "'Ws, 'Xt, ['Xn]";
switch (instr->Mask(LoadStoreAcquireReleaseMask)) {
case LDAXR_b: mnemonic = "ldaxrb"; break;
case STLR_b: mnemonic = "stlrb"; break;
case LDAR_b: mnemonic = "ldarb"; break;
case LDAXR_h: mnemonic = "ldaxrh"; break;
case STLR_h: mnemonic = "stlrh"; break;
case LDAR_h: mnemonic = "ldarh"; break;
case LDAXR_w: mnemonic = "ldaxr"; break;
case STLR_w: mnemonic = "stlr"; break;
case LDAR_w: mnemonic = "ldar"; break;
case LDAXR_x: mnemonic = "ldaxr"; form = form_x; break;
case STLR_x: mnemonic = "stlr"; form = form_x; break;
case LDAR_x: mnemonic = "ldar"; form = form_x; break;
case STLXR_h: mnemonic = "stlxrh"; form = form_stlx; break;
case STLXR_b: mnemonic = "stlxrb"; form = form_stlx; break;
case STLXR_w: mnemonic = "stlxr"; form = form_stlx; break;
case STLXR_x: mnemonic = "stlxr"; form = form_stlx_x; break;
default: form = "(LoadStoreAcquireReleaseMask)";
}
Format(instr, mnemonic, form);
}
void DisassemblingDecoder::VisitFPCompare(Instruction* instr) { void DisassemblingDecoder::VisitFPCompare(Instruction* instr) {
const char *mnemonic = "unimplemented"; const char *mnemonic = "unimplemented";
...@@ -1295,6 +1323,9 @@ int DisassemblingDecoder::SubstituteRegisterField(Instruction* instr, ...@@ -1295,6 +1323,9 @@ int DisassemblingDecoder::SubstituteRegisterField(Instruction* instr,
} }
break; break;
} }
case 's':
reg_num = instr->Rs();
break;
default: UNREACHABLE(); default: UNREACHABLE();
} }
......
...@@ -429,6 +429,31 @@ void Instrument::VisitLoadStoreUnsignedOffset(Instruction* instr) { ...@@ -429,6 +429,31 @@ void Instrument::VisitLoadStoreUnsignedOffset(Instruction* instr) {
InstrumentLoadStore(instr); InstrumentLoadStore(instr);
} }
void Instrument::VisitLoadStoreAcquireRelease(Instruction* instr) {
Update();
static Counter* load_counter = GetCounter("Load Acquire");
static Counter* store_counter = GetCounter("Store Release");
switch (instr->Mask(LoadStoreAcquireReleaseMask)) {
case LDAR_b: // Fall-through.
case LDAR_h: // Fall-through.
case LDAR_w: // Fall-through.
case LDAR_x: // Fall-through.
case LDAXR_b: // Fall-through.
case LDAXR_h: // Fall-through.
case LDAXR_w: // Fall-through.
case LDAXR_x: load_counter->Increment(); break;
case STLR_b: // Fall-through.
case STLR_h: // Fall-through.
case STLR_w: // Fall-through.
case STLR_x: // Fall-through.
case STLXR_b: // Fall-through.
case STLXR_h: // Fall-through.
case STLXR_w: // Fall-through.
case STLXR_x: store_counter->Increment(); break;
default: UNREACHABLE();
}
}
void Instrument::VisitLogicalShifted(Instruction* instr) { void Instrument::VisitLogicalShifted(Instruction* instr) {
Update(); Update();
......
...@@ -309,6 +309,22 @@ LS_MACRO_LIST(DEFINE_FUNCTION) ...@@ -309,6 +309,22 @@ LS_MACRO_LIST(DEFINE_FUNCTION)
LSPAIR_MACRO_LIST(DEFINE_FUNCTION) LSPAIR_MACRO_LIST(DEFINE_FUNCTION)
#undef DEFINE_FUNCTION #undef DEFINE_FUNCTION
#define DECLARE_FUNCTION(FN, OP) \
void MacroAssembler::FN(const Register& rt, const Register& rn) { \
DCHECK(allow_macro_instructions_); \
OP(rt, rn); \
}
LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION
#define DECLARE_FUNCTION(FN, OP) \
void MacroAssembler::FN(const Register& rs, const Register& rt, \
const Register& rn) { \
DCHECK(allow_macro_instructions_); \
OP(rs, rt, rn); \
}
STLX_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION
void MacroAssembler::Asr(const Register& rd, void MacroAssembler::Asr(const Register& rd,
const Register& rn, const Register& rn,
......
...@@ -68,6 +68,21 @@ namespace internal { ...@@ -68,6 +68,21 @@ namespace internal {
V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \ V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x) V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
#define LDA_STL_MACRO_LIST(V) \
V(Ldarb, ldarb) \
V(Ldarh, ldarh) \
V(Ldar, ldar) \
V(Ldaxrb, ldaxrb) \
V(Ldaxrh, ldaxrh) \
V(Ldaxr, ldaxr) \
V(Stlrb, stlrb) \
V(Stlrh, stlrh) \
V(Stlr, stlr)
#define STLX_MACRO_LIST(V) \
V(Stlxrb, stlxrb) \
V(Stlxrh, stlxrh) \
V(Stlxr, stlxr)
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Static helper functions // Static helper functions
...@@ -295,6 +310,17 @@ class MacroAssembler : public Assembler { ...@@ -295,6 +310,17 @@ class MacroAssembler : public Assembler {
void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2, void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& addr, LoadStorePairOp op); const MemOperand& addr, LoadStorePairOp op);
// Load-acquire/store-release macros.
#define DECLARE_FUNCTION(FN, OP) \
inline void FN(const Register& rt, const Register& rn);
LDA_STL_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION
#define DECLARE_FUNCTION(FN, OP) \
inline void FN(const Register& rs, const Register& rt, const Register& rn);
STLX_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION
// V8-specific load/store helpers. // V8-specific load/store helpers.
void Load(const Register& rt, const MemOperand& addr, Representation r); void Load(const Register& rt, const MemOperand& addr, Representation r);
void Store(const Register& rt, const MemOperand& addr, Representation r); void Store(const Register& rt, const MemOperand& addr, Representation r);
......
...@@ -1900,6 +1900,9 @@ void Simulator::LoadStoreWriteBack(unsigned addr_reg, ...@@ -1900,6 +1900,9 @@ void Simulator::LoadStoreWriteBack(unsigned addr_reg,
} }
} }
void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
// TODO(binji)
}
void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) { void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) {
if ((address >= stack_limit_) && (address < stack)) { if ((address >= stack_limit_) && (address < stack)) {
......
...@@ -1259,6 +1259,24 @@ TEST_(load_store_pair) { ...@@ -1259,6 +1259,24 @@ TEST_(load_store_pair) {
CLEANUP(); CLEANUP();
} }
TEST_(load_store_acquire_release) {
SET_UP_MASM();
COMPARE(ldar(w0, x1), "ldar w0, [x1]");
COMPARE(ldarb(w2, x3), "ldarb w2, [x3]");
COMPARE(ldarh(w4, x5), "ldarh w4, [x5]");
COMPARE(ldaxr(w6, x7), "ldaxr w6, [x7]");
COMPARE(ldaxrb(w8, x9), "ldaxrb w8, [x9]");
COMPARE(ldaxrh(w10, x11), "ldaxrh w10, [x11]");
COMPARE(stlr(w12, x13), "stlr w12, [x13]");
COMPARE(stlrb(w14, x15), "stlrb w14, [x15]");
COMPARE(stlrh(w16, x17), "stlrh w16, [x17]");
COMPARE(stlxr(w18, w19, x20), "stlxr w18, w19, [x20]");
COMPARE(stlxrb(w21, w22, x23), "stlxrb w21, w22, [x23]");
COMPARE(stlxrh(w24, w25, x26), "stlxrh w24, w25, [x26]");
CLEANUP();
}
#if 0 // TODO(all): enable. #if 0 // TODO(all): enable.
TEST_(load_literal) { TEST_(load_literal) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment