Commit a904b569 authored by jfb's avatar jfb Committed by Commit bot

Security: disable nontemporals.

The operations were available on ARM64 and x86-32 but were unused.

It has been conjectured that nontemporals can be used for rowhammer-like bitflips more easily than regular load/store operations. It is therefore desirable to avoid generating these instructions in the future.

R= titzer, jochen, jln, Mark Seaborn, ruiq

Review URL: https://codereview.chromium.org/1276113002

Cr-Commit-Position: refs/heads/master@{#30139}
parent 60268cee
......@@ -962,32 +962,6 @@ LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
}
LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
const CPURegister& rt, const CPURegister& rt2) {
DCHECK(AreSameSizeAndType(rt, rt2));
USE(rt2);
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDNP_x : LDNP_w;
} else {
DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? LDNP_d : LDNP_s;
}
}
LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
const CPURegister& rt, const CPURegister& rt2) {
DCHECK(AreSameSizeAndType(rt, rt2));
USE(rt2);
if (rt.IsRegister()) {
return rt.Is64Bits() ? STNP_x : STNP_w;
} else {
DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? STNP_d : STNP_s;
}
}
LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
......
......@@ -1628,37 +1628,6 @@ void Assembler::LoadStorePair(const CPURegister& rt,
}
void Assembler::ldnp(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& src) {
LoadStorePairNonTemporal(rt, rt2, src,
LoadPairNonTemporalOpFor(rt, rt2));
}
void Assembler::stnp(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& dst) {
LoadStorePairNonTemporal(rt, rt2, dst,
StorePairNonTemporalOpFor(rt, rt2));
}
void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& addr,
LoadStorePairNonTemporalOp op) {
DCHECK(!rt.Is(rt2));
DCHECK(AreSameSizeAndType(rt, rt2));
DCHECK(addr.IsImmediateOffset());
LSDataSize size = CalcLSPairDataSize(
static_cast<LoadStorePairOp>(op & LoadStorePairMask));
DCHECK(IsImmLSPair(addr.offset(), size));
int offset = static_cast<int>(addr.offset());
Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | ImmLSPair(offset, size));
}
// Memory instructions.
void Assembler::ldrb(const Register& rt, const MemOperand& src) {
LoadStore(rt, src, LDRB_w);
......
......@@ -1492,14 +1492,6 @@ class Assembler : public AssemblerBase {
// Load word pair with sign extension.
void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
// Load integer or FP register pair, non-temporal.
void ldnp(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& src);
// Store integer or FP register pair, non-temporal.
void stnp(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& dst);
// Load literal to register from a pc relative address.
void ldr_pcrel(const CPURegister& rt, int imm19);
......@@ -2007,10 +1999,6 @@ class Assembler : public AssemblerBase {
static inline LoadStoreOp StoreOpFor(const CPURegister& rt);
static inline LoadStorePairOp StorePairOpFor(const CPURegister& rt,
const CPURegister& rt2);
static inline LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
const CPURegister& rt, const CPURegister& rt2);
static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
const CPURegister& rt, const CPURegister& rt2);
static inline LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
// Remove the specified branch from the unbound label link chain.
......@@ -2036,10 +2024,6 @@ class Assembler : public AssemblerBase {
const Operand& operand,
FlagsUpdate S,
Instr op);
void LoadStorePairNonTemporal(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& addr,
LoadStorePairNonTemporalOp op);
void ConditionalSelect(const Register& rd,
const Register& rn,
const Register& rm,
......
......@@ -764,20 +764,6 @@ enum LoadStorePairOffsetOp {
#undef LOAD_STORE_PAIR_OFFSET
};
enum LoadStorePairNonTemporalOp {
LoadStorePairNonTemporalFixed = 0x28000000,
LoadStorePairNonTemporalFMask = 0x3B800000,
LoadStorePairNonTemporalMask = 0xFFC00000,
STNP_w = LoadStorePairNonTemporalFixed | STP_w,
LDNP_w = LoadStorePairNonTemporalFixed | LDP_w,
STNP_x = LoadStorePairNonTemporalFixed | STP_x,
LDNP_x = LoadStorePairNonTemporalFixed | LDP_x,
STNP_s = LoadStorePairNonTemporalFixed | STP_s,
LDNP_s = LoadStorePairNonTemporalFixed | LDP_s,
STNP_d = LoadStorePairNonTemporalFixed | STP_d,
LDNP_d = LoadStorePairNonTemporalFixed | LDP_d
};
// Load literal.
enum LoadLiteralOp {
LoadLiteralFixed = 0x18000000,
......
......@@ -231,7 +231,8 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
if (instr->Mask(0xC4400000) == 0xC0400000) {
V::VisitUnallocated(instr);
} else {
V::VisitLoadStorePairNonTemporal(instr);
// Nontemporals are unimplemented.
V::VisitUnimplemented(instr);
}
} else {
V::VisitLoadStorePairPostIndex(instr);
......
......@@ -33,7 +33,6 @@ namespace internal {
V(LoadStorePairPostIndex) \
V(LoadStorePairOffset) \
V(LoadStorePairPreIndex) \
V(LoadStorePairNonTemporal) \
V(LoadLiteral) \
V(LoadStoreUnscaledOffset) \
V(LoadStorePostIndex) \
......
......@@ -917,25 +917,6 @@ void Disassembler::VisitLoadStorePairOffset(Instruction* instr) {
}
void Disassembler::VisitLoadStorePairNonTemporal(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form;
switch (instr->Mask(LoadStorePairNonTemporalMask)) {
case STNP_w: mnemonic = "stnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
case LDNP_w: mnemonic = "ldnp"; form = "'Wt, 'Wt2, ['Xns'ILP4]"; break;
case STNP_x: mnemonic = "stnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
case LDNP_x: mnemonic = "ldnp"; form = "'Xt, 'Xt2, ['Xns'ILP8]"; break;
case STNP_s: mnemonic = "stnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
case LDNP_s: mnemonic = "ldnp"; form = "'St, 'St2, ['Xns'ILP4]"; break;
case STNP_d: mnemonic = "stnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
case LDNP_d: mnemonic = "ldnp"; form = "'Dt, 'Dt2, ['Xns'ILP8]"; break;
default: form = "(LoadStorePairNonTemporal)";
}
Format(instr, mnemonic, form);
}
void Disassembler::VisitFPCompare(Instruction* instr) {
const char *mnemonic = "unimplemented";
const char *form = "'Fn, 'Fm";
......
......@@ -364,12 +364,6 @@ void Instrument::VisitLoadStorePairPreIndex(Instruction* instr) {
}
void Instrument::VisitLoadStorePairNonTemporal(Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
void Instrument::VisitLoadLiteral(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Load Literal");
......
......@@ -869,15 +869,6 @@ void MacroAssembler::Isb() {
}
void MacroAssembler::Ldnp(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& src) {
DCHECK(allow_macro_instructions_);
DCHECK(!AreAliased(rt, rt2));
ldnp(rt, rt2, src);
}
void MacroAssembler::Ldr(const CPURegister& rt, const Immediate& imm) {
DCHECK(allow_macro_instructions_);
ldr(rt, imm);
......@@ -1134,14 +1125,6 @@ void MacroAssembler::Umull(const Register& rd, const Register& rn,
}
void MacroAssembler::Stnp(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& dst) {
DCHECK(allow_macro_instructions_);
stnp(rt, rt2, dst);
}
void MacroAssembler::Sxtb(const Register& rd, const Register& rn) {
DCHECK(allow_macro_instructions_);
DCHECK(!rd.IsZero());
......
......@@ -1676,11 +1676,6 @@ void Simulator::VisitLoadStorePairPostIndex(Instruction* instr) {
}
void Simulator::VisitLoadStorePairNonTemporal(Instruction* instr) {
LoadStorePairHelper(instr, Offset);
}
void Simulator::LoadStorePairHelper(Instruction* instr,
AddrMode addrmode) {
unsigned rt = instr->Rt();
......
......@@ -2325,26 +2325,6 @@ void Assembler::movdqu(XMMRegister dst, const Operand& src) {
}
void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
EMIT(0x38);
EMIT(0x2A);
emit_sse_operand(dst, src);
}
void Assembler::movntdq(const Operand& dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
EMIT(0xE7);
emit_sse_operand(src, dst);
}
void Assembler::prefetch(const Operand& src, int level) {
DCHECK(is_uint2(level));
EnsureSpace ensure_space(this);
......
......@@ -1077,10 +1077,6 @@ class Assembler : public AssemblerBase {
}
void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
// Parallel XMM operations.
void movntdqa(XMMRegister dst, const Operand& src);
void movntdq(const Operand& dst, XMMRegister src);
// AVX instructions
void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vfmadd132sd(dst, src1, Operand(src2));
......
......@@ -1618,11 +1618,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data++;
} else if (*data == 0x2A) {
// movntdqa
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("movntdqa %s,", NameOfXMMRegister(regop));
data += PrintRightOperand(data);
UnimplementedInstruction();
} else {
UnimplementedInstruction();
}
......@@ -1827,9 +1823,8 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
if (mod == 3) {
AppendToBuffer("movntdq ");
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
// movntdq
UnimplementedInstruction();
} else {
UnimplementedInstruction();
}
......
......@@ -1284,11 +1284,7 @@ int DisassemblerX87::InstructionDecode(v8::internal::Vector<char> out_buffer,
data++;
} else if (*data == 0x2A) {
// movntdqa
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("movntdqa %s,", NameOfXMMRegister(regop));
data += PrintRightOperand(data);
UnimplementedInstruction();
} else {
UnimplementedInstruction();
}
......@@ -1470,9 +1466,8 @@ int DisassemblerX87::InstructionDecode(v8::internal::Vector<char> out_buffer,
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
if (mod == 3) {
AppendToBuffer("movntdq ");
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
// movntdq
UnimplementedInstruction();
} else {
UnimplementedInstruction();
}
......
......@@ -2970,61 +2970,6 @@ TEST(ldp_stp_offset_wide) {
}
TEST(ldnp_stnp_offset) {
INIT_V8();
SETUP();
uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
0xffeeddccbbaa9988UL};
uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
START();
__ Mov(x16, src_base);
__ Mov(x17, dst_base);
__ Mov(x18, src_base + 24);
__ Mov(x19, dst_base + 56);
__ Ldnp(w0, w1, MemOperand(x16));
__ Ldnp(w2, w3, MemOperand(x16, 4));
__ Ldnp(x4, x5, MemOperand(x16, 8));
__ Ldnp(w6, w7, MemOperand(x18, -12));
__ Ldnp(x8, x9, MemOperand(x18, -16));
__ Stnp(w0, w1, MemOperand(x17));
__ Stnp(w2, w3, MemOperand(x17, 8));
__ Stnp(x4, x5, MemOperand(x17, 16));
__ Stnp(w6, w7, MemOperand(x19, -24));
__ Stnp(x8, x9, MemOperand(x19, -16));
END();
RUN();
CHECK_EQUAL_64(0x44556677, x0);
CHECK_EQUAL_64(0x00112233, x1);
CHECK_EQUAL_64(0x0011223344556677UL, dst[0]);
CHECK_EQUAL_64(0x00112233, x2);
CHECK_EQUAL_64(0xccddeeff, x3);
CHECK_EQUAL_64(0xccddeeff00112233UL, dst[1]);
CHECK_EQUAL_64(0x8899aabbccddeeffUL, x4);
CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x5);
CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
CHECK_EQUAL_64(0x8899aabb, x6);
CHECK_EQUAL_64(0xbbaa9988, x7);
CHECK_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
CHECK_EQUAL_64(0x8899aabbccddeeffUL, x8);
CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x9);
CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
CHECK_EQUAL_64(src_base, x16);
CHECK_EQUAL_64(dst_base, x17);
CHECK_EQUAL_64(src_base + 24, x18);
CHECK_EQUAL_64(dst_base + 56, x19);
TEARDOWN();
}
TEST(ldp_stp_preindex) {
INIT_V8();
SETUP();
......
......@@ -1249,25 +1249,6 @@ TEST_(load_store_pair) {
}
TEST_(load_store_pair_nontemp) {
SET_UP();
COMPARE(ldnp(w0, w1, MemOperand(x2)), "ldnp w0, w1, [x2]");
COMPARE(stnp(w3, w4, MemOperand(x5, 252)), "stnp w3, w4, [x5, #252]");
COMPARE(ldnp(w6, w7, MemOperand(x8, -256)), "ldnp w6, w7, [x8, #-256]");
COMPARE(stnp(x9, x10, MemOperand(x11)), "stnp x9, x10, [x11]");
COMPARE(ldnp(x12, x13, MemOperand(x14, 504)), "ldnp x12, x13, [x14, #504]");
COMPARE(stnp(x15, x16, MemOperand(x17, -512)), "stnp x15, x16, [x17, #-512]");
COMPARE(ldnp(s18, s19, MemOperand(x20)), "ldnp s18, s19, [x20]");
COMPARE(stnp(s21, s22, MemOperand(x23, 252)), "stnp s21, s22, [x23, #252]");
COMPARE(ldnp(s24, s25, MemOperand(x26, -256)), "ldnp s24, s25, [x26, #-256]");
COMPARE(stnp(d27, d28, MemOperand(fp)), "stnp d27, d28, [fp]");
COMPARE(ldnp(d30, d31, MemOperand(x0, 504)), "ldnp d30, d31, [x0, #504]");
COMPARE(stnp(d1, d2, MemOperand(x3, -512)), "stnp d1, d2, [x3, #-512]");
CLEANUP();
}
#if 0 // TODO(all): enable.
TEST_(load_literal) {
SET_UP();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment