Commit 800daded authored by Mike Stanton's avatar Mike Stanton Committed by Commit Bot

[turbofan] Masking/poisoning in codegen (optimized code, arm64)

This introduces masking of loads with speculation bit during code generation.
At the moment, this is done only under the
--branch-load-poisoning flag, and this CL enlarges the set of supported
platforms from {x64, arm} to {x64, arm, arm64}.

Overview of changes:
- new register configuration configuration with one register reserved for
  the speculation poison/mask (kSpeculationPoisonRegister).
- in codegen, we introduce an update to the poison register at the starts
  of all successors of branches (and deopts) that are marked as safety
  branches (deopts).
- in memory optimizer, we lower all field and element loads to PoisonedLoads.
- poisoned loads are then masked in codegen with the poison register.
  * only integer loads are masked at the moment.

Bug: chromium:798964
Change-Id: Ie6bc9c3bdac9998b0ef81f050a9c844399ca3ae4
Reviewed-on: https://chromium-review.googlesource.com/928724
Commit-Queue: Michael Stanton <mvstanton@chromium.org>
Reviewed-by: 's avatarMartyn Capewell <martyn.capewell@arm.com>
Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51576}
parent 3e92808a
......@@ -462,6 +462,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Bind(&not_js_frame);
// Reset the masking register. This is done independent of the underlying
// feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
// both configurations. It is safe to always do this, because the underlying
// register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
// Compute the handler entry address and jump to it.
__ Mov(x10, Operand(pending_handler_entrypoint_address));
__ Ldr(x10, MemOperand(x10));
......
......@@ -404,8 +404,7 @@ void MacroAssembler::CzeroX(const Register& rd,
// Conditionally move a value into the destination register. Only X registers
// are supported due to the truncation side-effect when used on W registers.
void MacroAssembler::CmovX(const Register& rd,
const Register& rn,
void TurboAssembler::CmovX(const Register& rd, const Register& rn,
Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsSP());
......
......@@ -3266,7 +3266,9 @@ void TurboAssembler::ComputeCodeStartAddress(const Register& rd) {
adr(rd, -pc_offset());
}
void TurboAssembler::ResetSpeculationPoisonRegister() { UNREACHABLE(); }
void TurboAssembler::ResetSpeculationPoisonRegister() {
Mov(kSpeculationPoisonRegister, -1);
}
#undef __
......
......@@ -1012,6 +1012,7 @@ class TurboAssembler : public Assembler {
void CanonicalizeNaN(const VRegister& dst, const VRegister& src);
void CanonicalizeNaN(const VRegister& reg) { CanonicalizeNaN(reg, reg); }
inline void CmovX(const Register& rd, const Register& rn, Condition cond);
inline void Cset(const Register& rd, Condition cond);
inline void Csetm(const Register& rd, Condition cond);
inline void Fccmp(const VRegister& fn, const VRegister& fm, StatusFlags nzcv,
......@@ -1314,7 +1315,6 @@ class MacroAssembler : public TurboAssembler {
inline void Cinc(const Register& rd, const Register& rn, Condition cond);
inline void Cinv(const Register& rd, const Register& rn, Condition cond);
inline void CzeroX(const Register& rd, Condition cond);
inline void CmovX(const Register& rd, const Register& rn, Condition cond);
inline void Csinv(const Register& rd, const Register& rn, const Register& rm,
Condition cond);
inline void Csneg(const Register& rd, const Register& rn, const Register& rm,
......
......@@ -2725,6 +2725,7 @@ void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
__ eor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
Operand(kSpeculationPoisonRegister), SBit::LeaveCC,
FlagsConditionToCondition(condition));
__ csdb();
}
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
......
......@@ -372,6 +372,19 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
Arm64OperandConverter& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
Register poison = value.Is64Bits() ? kSpeculationPoisonRegister
: kSpeculationPoisonRegister.W();
codegen->tasm()->And(value, value, Operand(poison));
}
}
} // namespace
#define ASSEMBLE_SHIFT(asm_instr, width) \
......@@ -1216,7 +1229,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArm64CompareAndBranch32:
case kArm64CompareAndBranch:
// Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
// Pseudo instruction handled in AssembleArchBranch.
break;
case kArm64Claim: {
int count = i.InputInt32(0);
......@@ -1519,33 +1532,40 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArm64Ldrb:
__ Ldrb(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Ldrsb:
__ Ldrsb(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Strb:
__ Strb(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64Ldrh:
__ Ldrh(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Ldrsh:
__ Ldrsh(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Strh:
__ Strh(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64Ldrsw:
__ Ldrsw(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrW:
__ Ldr(i.OutputRegister32(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64StrW:
__ Str(i.InputOrZeroRegister32(0), i.MemoryOperand(1));
break;
case kArm64Ldr:
__ Ldr(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Str:
__ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
......@@ -2124,9 +2144,11 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
FlagsCondition condition = branch->condition;
FlagsMode mode = FlagsModeField::decode(instr->opcode());
ArchOpcode opcode = instr->arch_opcode();
if (opcode == kArm64CompareAndBranch32) {
DCHECK(mode != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Cbz(i.InputRegister32(0), tlabel);
......@@ -2138,6 +2160,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64CompareAndBranch) {
DCHECK(mode != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Cbz(i.InputRegister64(0), tlabel);
......@@ -2149,6 +2172,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64TestAndBranch32) {
DCHECK(mode != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel);
......@@ -2160,6 +2184,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64TestAndBranch) {
DCHECK(mode != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel);
......@@ -2179,7 +2204,15 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
UNREACHABLE();
// TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
return;
}
condition = NegateFlagsCondition(condition);
__ CmovX(kSpeculationPoisonRegister, xzr,
FlagsConditionToCondition(condition));
__ Csdb();
}
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
......@@ -2363,6 +2396,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
InitializePoisonForLoadsIfNeeded();
}
if (info()->IsWasm() && shrink_slots > 128) {
......
......@@ -1952,6 +1952,9 @@ void EmitBranchOrDeoptimize(InstructionSelector* selector,
// against {value}, depending on the condition.
bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
Node* user, FlagsCondition cond, FlagsContinuation* cont) {
// Branch poisoning requires flags to be set, so when it's enabled for
// a particular branch, we shouldn't be applying the cbz/tbz optimization.
DCHECK(!cont->IsPoisoned());
// Only handle branches and deoptimisations.
if (!cont->IsBranch() && !cont->IsDeoptimize()) return false;
......@@ -2024,16 +2027,18 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Int32BinopMatcher m(node);
FlagsCondition cond = cont->condition();
if (m.right().HasValue()) {
if (TryEmitCbzOrTbz(selector, m.left().node(), m.right().Value(), node,
cond, cont)) {
return;
}
} else if (m.left().HasValue()) {
FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
if (TryEmitCbzOrTbz(selector, m.right().node(), m.left().Value(), node,
commuted_cond, cont)) {
return;
if (!cont->IsPoisoned()) {
if (m.right().HasValue()) {
if (TryEmitCbzOrTbz(selector, m.left().node(), m.right().Value(), node,
cond, cont)) {
return;
}
} else if (m.left().HasValue()) {
FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
if (TryEmitCbzOrTbz(selector, m.right().node(), m.left().Value(), node,
commuted_cond, cont)) {
return;
}
}
}
ArchOpcode opcode = kArm64Cmp32;
......@@ -2106,7 +2111,7 @@ bool TryEmitTestAndBranch(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
Matcher m(node);
if (cont->IsBranch() && m.right().HasValue() &&
if (cont->IsBranch() && !cont->IsPoisoned() && m.right().HasValue() &&
base::bits::IsPowerOfTwo(m.right().Value())) {
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont->condition() == kEqual) || (cont->condition() == kNotEqual));
......@@ -2204,7 +2209,8 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
kLogical64Imm);
}
// Merge the Word64Equal(x, 0) comparison into a cbz instruction.
if (cont->IsBranch() || cont->IsDeoptimize()) {
if ((cont->IsBranch() || cont->IsDeoptimize()) &&
!cont->IsPoisoned()) {
EmitBranchOrDeoptimize(this, cont->Encode(kArm64CompareAndBranch),
g.UseRegister(left), cont);
return;
......@@ -2315,9 +2321,16 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
// Branch could not be combined with a compare, compare against 0 and branch.
if (cont->IsBranch()) {
Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
g.UseRegister(value), g.Label(cont->true_block()),
g.Label(cont->false_block()));
if (cont->IsPoisoned()) {
// We need an instruction that sets flags for poisoning to work.
Emit(cont->Encode(kArm64Tst32), g.NoOutput(), g.UseRegister(value),
g.UseRegister(value), g.Label(cont->true_block()),
g.Label(cont->false_block()));
} else {
Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
g.UseRegister(value), g.Label(cont->true_block()),
g.Label(cont->false_block()));
}
} else if (cont->IsDeoptimize()) {
EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
g.UseRegister(value), g.UseRegister(value), cont->kind(),
......@@ -3160,7 +3173,7 @@ InstructionSelector::AlignmentRequirements() {
}
// static
bool InstructionSelector::SupportsSpeculationPoisoning() { return false; }
bool InstructionSelector::SupportsSpeculationPoisoning() { return true; }
} // namespace compiler
} // namespace internal
......
......@@ -411,6 +411,10 @@ class FlagsContinuation final {
bool IsDeoptimize() const {
return mode_ == kFlags_deoptimize || mode_ == kFlags_deoptimize_and_poison;
}
bool IsPoisoned() const {
return mode_ == kFlags_branch_and_poison ||
mode_ == kFlags_deoptimize_and_poison;
}
bool IsSet() const { return mode_ == kFlags_set; }
bool IsTrap() const { return mode_ == kFlags_trap; }
FlagsCondition condition() const {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment