Commit 1177f703 authored by Mike Stanton's avatar Mike Stanton Committed by Commit Bot

[turbofan] Masking/poisoning in codegen (optimized code, mips & mips64)

This introduces masking of loads with speculation bit during code generation.
At the moment, this is done only under the
--branch-load-poisoning flag, and this CL enlarges the set of supported
platforms from {x64, arm, arm64} to {x64, arm, arm64, mips, mips64}.

Overview of changes:
- new register configuration configuration with one register reserved for
  the speculation poison/mask (kSpeculationPoisonRegister).
- in codegen, we introduce an update to the poison register at the starts
  of all successors of branches (and deopts) that are marked as safety
  branches (deopts).
- in memory optimizer, we lower all field and element loads to PoisonedLoads.
- poisoned loads are then masked in codegen with the poison register.
  (In this CL, this last step is left as a TODO. You can run with the flag,
   though mitigations will just not be effective at this time).
  * only integer loads are masked at the moment.

TBR=mstarzinger@chromium.org

Change-Id: Ie6eb8719bf85d49c03b4a28e2f054480195a1471
Reviewed-on: https://chromium-review.googlesource.com/973616
Commit-Queue: Michael Stanton <mvstanton@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Reviewed-by: 's avatarMichael Stanton <mvstanton@chromium.org>
Cr-Commit-Position: refs/heads/master@{#52123}
parent 9b44ee4b
......@@ -349,6 +349,17 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
UNREACHABLE();
}
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
MipsOperandConverter& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
}
}
} // namespace
#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
......@@ -1505,24 +1516,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMipsLbu:
__ lbu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsLb:
__ lb(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSb:
__ sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMipsLhu:
__ lhu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsLh:
__ lh(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSh:
__ sh(i.InputOrZeroRegister(2), i.MemoryOperand());
......@@ -1532,9 +1549,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMipsLw:
__ lw(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSw:
__ sw(i.InputOrZeroRegister(2), i.MemoryOperand());
......@@ -2937,7 +2956,14 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
UNREACHABLE();
// TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
return;
}
// TODO(mips): insert instructions here that place 0 into the
// kSpeculationPoisonRegister if the negation of the condition is
// true.
}
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
......@@ -3264,6 +3290,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
InitializePoisonForLoadsIfNeeded();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
......
......@@ -2234,7 +2234,7 @@ InstructionSelector::AlignmentRequirements() {
}
// static
bool InstructionSelector::SupportsSpeculationPoisoning() { return false; }
bool InstructionSelector::SupportsSpeculationPoisoning() { return true; }
#undef SIMD_BINOP_LIST
#undef SIMD_SHIFT_OP_LIST
......
......@@ -362,6 +362,17 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
UNREACHABLE();
}
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
MipsOperandConverter& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
}
}
} // namespace
#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
......@@ -1709,24 +1720,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Lbu:
__ Lbu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lb:
__ Lb(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sb:
__ Sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Lhu:
__ Lhu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lh:
__ Lh(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sh:
__ Sh(i.InputOrZeroRegister(2), i.MemoryOperand());
......@@ -1736,21 +1753,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Lw:
__ Lw(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lwu:
__ Lwu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulwu:
__ Ulwu(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ld:
__ Ld(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Uld:
__ Uld(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sw:
__ Sw(i.InputOrZeroRegister(2), i.MemoryOperand());
......@@ -3157,7 +3180,14 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
UNREACHABLE();
// TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
return;
}
// TODO(mips64): insert instructions here that place 0 into the
// kSpeculationPoisonRegister if the negation of the condition is
// true.
}
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
......@@ -3490,6 +3520,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
InitializePoisonForLoadsIfNeeded();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
......
......@@ -2916,7 +2916,7 @@ InstructionSelector::AlignmentRequirements() {
}
// static
bool InstructionSelector::SupportsSpeculationPoisoning() { return false; }
bool InstructionSelector::SupportsSpeculationPoisoning() { return true; }
#undef SIMD_BINOP_LIST
#undef SIMD_SHIFT_OP_LIST
......
......@@ -466,6 +466,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&zero);
// Reset the masking register. This is done independent of the underlying
// feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
// both configurations. It is safe to always do this, because the underlying
// register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
// Compute the handler entry address and jump to it.
__ li(t9, Operand(pending_handler_entrypoint_address));
__ lw(t9, MemOperand(t9));
......
......@@ -5248,7 +5248,9 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
pop(ra); // Restore ra
}
void TurboAssembler::ResetSpeculationPoisonRegister() { UNREACHABLE(); }
void TurboAssembler::ResetSpeculationPoisonRegister() {
li(kSpeculationPoisonRegister, -1);
}
} // namespace internal
} // namespace v8
......
......@@ -465,6 +465,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&zero);
// Reset the masking register. This is done independent of the underlying
// feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
// both configurations. It is safe to always do this, because the underlying
// register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
// Compute the handler entry address and jump to it.
__ li(t9, Operand(pending_handler_entrypoint_address));
__ Ld(t9, MemOperand(t9));
......
......@@ -5583,7 +5583,9 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
pop(ra); // Restore ra
}
void TurboAssembler::ResetSpeculationPoisonRegister() { UNREACHABLE(); }
void TurboAssembler::ResetSpeculationPoisonRegister() {
li(kSpeculationPoisonRegister, -1);
}
} // namespace internal
} // namespace v8
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment