Commit 17f47d74 authored by Michael Achenbach's avatar Michael Achenbach Committed by Commit Bot

Revert "[ia32] Remove poisoning logic on ia32"

This reverts commit a31a6230.

Reason for revert:
https://ci.chromium.org/p/v8/builders/luci.v8.ci/V8%20Linux%20-%20nosnap%20-%20debug/21332

Original change's description:
> [ia32] Remove poisoning logic on ia32
> 
> Poisoning has been disabled by default on ia32 a while ago. This CL
> removes its logic from ia32 code generation, which will let us move
> towards fuller (and unconditional) root register support.
> 
> Bug: chromium:860429, v8:8254
> Change-Id: I8f672cf48a6ffc7bf21e7794c1b7463d7f8b9594
> Reviewed-on: https://chromium-review.googlesource.com/c/1296131
> Commit-Queue: Jakob Gruber <jgruber@chromium.org>
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#56978}

TBR=mstarzinger@chromium.org,jarin@chromium.org,jgruber@chromium.org

Change-Id: I305e9e1719fb4b3f8ef267c232723db9b52966e9
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: chromium:860429, v8:8254
Reviewed-on: https://chromium-review.googlesource.com/c/1299015Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Commit-Queue: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56980}
parent 9cde8808
...@@ -1334,8 +1334,13 @@ namespace { ...@@ -1334,8 +1334,13 @@ namespace {
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
bool java_script_builtin, bool java_script_builtin,
bool with_result) { bool with_result) {
#ifdef V8_EMBEDDED_BUILTINS
// TODO(v8:6666): Fold into Default config once root is fully supported.
const RegisterConfiguration* config( const RegisterConfiguration* config(
RegisterConfiguration::PreserveRootIA32()); RegisterConfiguration::PreserveRootIA32());
#else
const RegisterConfiguration* config(RegisterConfiguration::Default());
#endif
int allocatable_register_count = config->num_allocatable_general_registers(); int allocatable_register_count = config->num_allocatable_general_registers();
if (with_result) { if (with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from // Overwrite the hole inserted by the deoptimizer with the return value from
...@@ -1365,20 +1370,32 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, ...@@ -1365,20 +1370,32 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
} // namespace } // namespace
void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) { void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
#ifdef V8_EMBEDDED_BUILTINS
// TODO(v8:6666): Remove the ifdef once root is preserved by default.
#endif
Generate_ContinueToBuiltinHelper(masm, false, false); Generate_ContinueToBuiltinHelper(masm, false, false);
} }
void Builtins::Generate_ContinueToCodeStubBuiltinWithResult( void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
MacroAssembler* masm) { MacroAssembler* masm) {
#ifdef V8_EMBEDDED_BUILTINS
// TODO(v8:6666): Remove the ifdef once root is preserved by default.
#endif
Generate_ContinueToBuiltinHelper(masm, false, true); Generate_ContinueToBuiltinHelper(masm, false, true);
} }
void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) { void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
#ifdef V8_EMBEDDED_BUILTINS
// TODO(v8:6666): Remove the ifdef once root is preserved by default.
#endif
Generate_ContinueToBuiltinHelper(masm, true, false); Generate_ContinueToBuiltinHelper(masm, true, false);
} }
void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult( void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
MacroAssembler* masm) { MacroAssembler* masm) {
#ifdef V8_EMBEDDED_BUILTINS
// TODO(v8:6666): Remove the ifdef once root is preserved by default.
#endif
Generate_ContinueToBuiltinHelper(masm, true, true); Generate_ContinueToBuiltinHelper(masm, true, true);
} }
...@@ -2490,6 +2507,10 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, ...@@ -2490,6 +2507,10 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// If argv_mode == kArgvInRegister: // If argv_mode == kArgvInRegister:
// ecx: pointer to the first argument // ecx: pointer to the first argument
#ifdef V8_EMBEDDED_BUILTINS
// TODO(v8:6666): Remove the ifdef once branch load poisoning is removed.
#endif
STATIC_ASSERT(eax == kRuntimeCallArgCountRegister); STATIC_ASSERT(eax == kRuntimeCallArgCountRegister);
STATIC_ASSERT(ecx == kRuntimeCallArgvRegister); STATIC_ASSERT(ecx == kRuntimeCallArgvRegister);
STATIC_ASSERT(edx == kRuntimeCallFunctionRegister); STATIC_ASSERT(edx == kRuntimeCallFunctionRegister);
...@@ -2611,6 +2632,17 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, ...@@ -2611,6 +2632,17 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi); __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
__ bind(&skip); __ bind(&skip);
#ifdef V8_EMBEDDED_BUILTINS
STATIC_ASSERT(kRootRegister == kSpeculationPoisonRegister);
CHECK(!FLAG_untrusted_code_mitigations);
#else
// Reset the masking register. This is done independent of the underlying
// feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
// with both configurations. It is safe to always do this, because the
// underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
#endif
// Compute the handler entry address and jump to it. // Compute the handler entry address and jump to it.
__ mov(edi, __ ExternalReferenceAsOperand(pending_handler_entrypoint_address, __ mov(edi, __ ExternalReferenceAsOperand(pending_handler_entrypoint_address,
edi)); edi));
......
...@@ -309,6 +309,30 @@ class OutOfLineRecordWrite final : public OutOfLineCode { ...@@ -309,6 +309,30 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Zone* zone_; Zone* zone_;
}; };
void MoveOperandIfAliasedWithPoisonRegister(Instruction* call_instruction,
CodeGenerator* gen) {
IA32OperandConverter i(gen, call_instruction);
int const poison_index = i.InputInt32(1);
if (poison_index == -1) {
// No aliasing -> nothing to move.
return;
}
i.MoveInstructionOperandToRegister(kSpeculationPoisonRegister,
call_instruction->InputAt(poison_index));
}
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
IA32OperandConverter& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->and_(value, kSpeculationPoisonRegister);
}
}
} // namespace } // namespace
#define ASSEMBLE_COMPARE(asm_instr) \ #define ASSEMBLE_COMPARE(asm_instr) \
...@@ -605,13 +629,23 @@ void CodeGenerator::BailoutIfDeoptimized() { ...@@ -605,13 +629,23 @@ void CodeGenerator::BailoutIfDeoptimized() {
} }
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() { void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// TODO(860429): Remove remaining poisoning infrastructure on ia32. __ push(eax); // Push eax so we can use it as a scratch register.
UNREACHABLE();
// Set a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
__ ComputeCodeStartAddress(eax);
__ mov(kSpeculationPoisonRegister, Immediate(0));
__ cmp(kJavaScriptCallCodeStartRegister, eax);
__ mov(eax, Immediate(-1));
__ cmov(equal, kSpeculationPoisonRegister, eax);
__ pop(eax); // Restore eax.
} }
void CodeGenerator::AssembleRegisterArgumentPoisoning() { void CodeGenerator::AssembleRegisterArgumentPoisoning() {
// TODO(860429): Remove remaining poisoning infrastructure on ia32. __ and_(kJSFunctionRegister, kSpeculationPoisonRegister);
UNREACHABLE(); __ and_(kContextRegister, kSpeculationPoisonRegister);
__ and_(esp, kSpeculationPoisonRegister);
} }
// Assembles an instruction after register allocation, producing machine code. // Assembles an instruction after register allocation, producing machine code.
...@@ -622,6 +656,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -622,6 +656,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode); ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
switch (arch_opcode) { switch (arch_opcode) {
case kArchCallCodeObject: { case kArchCallCodeObject: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
InstructionOperand* op = instr->InputAt(0); InstructionOperand* op = instr->InputAt(0);
if (op->IsImmediate()) { if (op->IsImmediate()) {
Handle<Code> code = i.InputCode(0); Handle<Code> code = i.InputCode(0);
...@@ -660,6 +695,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -660,6 +695,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArchCallWasmFunction: { case kArchCallWasmFunction: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
if (HasImmediateInput(instr, 0)) { if (HasImmediateInput(instr, 0)) {
Constant constant = i.ToConstant(instr->InputAt(0)); Constant constant = i.ToConstant(instr->InputAt(0));
Address wasm_code = static_cast<Address>(constant.ToInt32()); Address wasm_code = static_cast<Address>(constant.ToInt32());
...@@ -686,6 +722,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -686,6 +722,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} }
case kArchTailCallCodeObjectFromJSFunction: case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: { case kArchTailCallCodeObject: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) { if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
no_reg, no_reg, no_reg); no_reg, no_reg, no_reg);
...@@ -710,6 +747,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -710,6 +747,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArchTailCallWasm: { case kArchTailCallWasm: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
if (HasImmediateInput(instr, 0)) { if (HasImmediateInput(instr, 0)) {
Constant constant = i.ToConstant(instr->InputAt(0)); Constant constant = i.ToConstant(instr->InputAt(0));
Address wasm_code = static_cast<Address>(constant.ToInt32()); Address wasm_code = static_cast<Address>(constant.ToInt32());
...@@ -727,6 +765,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -727,6 +765,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArchTailCallAddress: { case kArchTailCallAddress: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
CHECK(!HasImmediateInput(instr, 0)); CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0); Register reg = i.InputRegister(0);
DCHECK_IMPLIES( DCHECK_IMPLIES(
...@@ -742,6 +781,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -742,6 +781,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
} }
case kArchCallJSFunction: { case kArchCallJSFunction: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
Register func = i.InputRegister(0); Register func = i.InputRegister(0);
if (FLAG_debug_code) { if (FLAG_debug_code) {
// Check the function's context matches the context argument. // Check the function's context matches the context argument.
...@@ -792,6 +832,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -792,6 +832,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssemblePrepareTailCall(); AssemblePrepareTailCall();
break; break;
case kArchCallCFunction: { case kArchCallCFunction: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
int const num_parameters = MiscField::decode(instr->opcode()); int const num_parameters = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) { if (HasImmediateInput(instr, 0)) {
ExternalReference ref = i.InputExternalReference(0); ExternalReference ref = i.InputExternalReference(0);
...@@ -1176,8 +1217,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1176,8 +1217,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bswap(i.OutputRegister()); __ bswap(i.OutputRegister());
break; break;
case kArchWordPoisonOnSpeculation: case kArchWordPoisonOnSpeculation:
// TODO(860429): Remove remaining poisoning infrastructure on ia32. DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
UNREACHABLE(); __ and_(i.InputRegister(0), kSpeculationPoisonRegister);
break; break;
case kLFence: case kLFence:
__ lfence(); __ lfence();
...@@ -1552,9 +1593,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1552,9 +1593,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break; break;
case kIA32Movsxbl: case kIA32Movsxbl:
ASSEMBLE_MOVX(movsx_b); ASSEMBLE_MOVX(movsx_b);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break; break;
case kIA32Movzxbl: case kIA32Movzxbl:
ASSEMBLE_MOVX(movzx_b); ASSEMBLE_MOVX(movzx_b);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break; break;
case kIA32Movb: { case kIA32Movb: {
size_t index = 0; size_t index = 0;
...@@ -1564,13 +1607,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1564,13 +1607,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else { } else {
__ mov_b(operand, i.InputRegister(index)); __ mov_b(operand, i.InputRegister(index));
} }
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break; break;
} }
case kIA32Movsxwl: case kIA32Movsxwl:
ASSEMBLE_MOVX(movsx_w); ASSEMBLE_MOVX(movsx_w);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break; break;
case kIA32Movzxwl: case kIA32Movzxwl:
ASSEMBLE_MOVX(movzx_w); ASSEMBLE_MOVX(movzx_w);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break; break;
case kIA32Movw: { case kIA32Movw: {
size_t index = 0; size_t index = 0;
...@@ -1580,11 +1626,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ...@@ -1580,11 +1626,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else { } else {
__ mov_w(operand, i.InputRegister(index)); __ mov_w(operand, i.InputRegister(index));
} }
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break; break;
} }
case kIA32Movl: case kIA32Movl:
if (instr->HasOutput()) { if (instr->HasOutput()) {
__ mov(i.OutputRegister(), i.MemoryOperand()); __ mov(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
} else { } else {
size_t index = 0; size_t index = 0;
Operand operand = i.MemoryOperand(&index); Operand operand = i.MemoryOperand(&index);
...@@ -3871,8 +3919,15 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { ...@@ -3871,8 +3919,15 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition, void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) { Instruction* instr) {
// TODO(860429): Remove remaining poisoning infrastructure on ia32. // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
UNREACHABLE(); if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
return;
}
condition = NegateFlagsCondition(condition);
__ setcc(FlagsConditionToCondition(condition), kSpeculationPoisonRegister);
__ add(kSpeculationPoisonRegister, Immediate(255));
__ sar(kSpeculationPoisonRegister, 31u);
} }
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr, void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
...@@ -4200,6 +4255,7 @@ void CodeGenerator::AssembleConstructFrame() { ...@@ -4200,6 +4255,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --"); if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset(); osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots(); shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
} }
const RegList saves = call_descriptor->CalleeSavedRegisters(); const RegList saves = call_descriptor->CalleeSavedRegisters();
......
...@@ -1353,6 +1353,7 @@ void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node, ...@@ -1353,6 +1353,7 @@ void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
// For Word64 operations, the value input is split into the a high node, // For Word64 operations, the value input is split into the a high node,
// and a low node in the int64-lowering phase. // and a low node in the int64-lowering phase.
Node* value_high = node->InputAt(3); Node* value_high = node->InputAt(3);
bool block_root_register = !FLAG_embedded_builtins;
// Wasm lives in 32-bit address space, so we do not need to worry about // Wasm lives in 32-bit address space, so we do not need to worry about
// base/index lowering. This will need to be fixed for Wasm64. // base/index lowering. This will need to be fixed for Wasm64.
...@@ -1365,19 +1366,22 @@ void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node, ...@@ -1365,19 +1366,22 @@ void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
Node* projection0 = NodeProperties::FindProjection(node, 0); Node* projection0 = NodeProperties::FindProjection(node, 0);
Node* projection1 = NodeProperties::FindProjection(node, 1); Node* projection1 = NodeProperties::FindProjection(node, 1);
if (projection1) { if (projection1) {
InstructionOperand temps[] = {g.TempRegister(ebx)};
InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax), InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax),
g.DefineAsFixed(projection1, edx)}; g.DefineAsFixed(projection1, edx)};
const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs, selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
0, {}); num_temps, temps);
} else if (projection0) { } else if (projection0) {
InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax)}; InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax)};
InstructionOperand temps[] = {g.TempRegister(edx)}; InstructionOperand temps[] = {g.TempRegister(edx), g.TempRegister(ebx)};
const int num_temps = arraysize(temps); const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs, selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
num_temps, temps); num_temps, temps);
} else { } else {
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)}; InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx),
const int num_temps = arraysize(temps); g.TempRegister(ebx)};
const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps,
temps); temps);
} }
...@@ -1799,6 +1803,7 @@ void InstructionSelector::VisitWord32AtomicPairStore(Node* node) { ...@@ -1799,6 +1803,7 @@ void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
Node* index = node->InputAt(1); Node* index = node->InputAt(1);
Node* value = node->InputAt(2); Node* value = node->InputAt(2);
Node* value_high = node->InputAt(3); Node* value_high = node->InputAt(3);
bool block_root_register = !FLAG_embedded_builtins;
AddressingMode addressing_mode; AddressingMode addressing_mode;
InstructionOperand inputs[] = { InstructionOperand inputs[] = {
...@@ -1808,8 +1813,9 @@ void InstructionSelector::VisitWord32AtomicPairStore(Node* node) { ...@@ -1808,8 +1813,9 @@ void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
// Allocating temp registers here as stores are performed using an atomic // Allocating temp registers here as stores are performed using an atomic
// exchange, the output of which is stored in edx:eax, which should be saved // exchange, the output of which is stored in edx:eax, which should be saved
// and restored at the end of the instruction. // and restored at the end of the instruction.
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)}; InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx),
const int num_temps = arraysize(temps); g.TempRegister(ebx)};
const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
InstructionCode code = InstructionCode code =
kIA32Word32AtomicPairStore | AddressingModeField::encode(addressing_mode); kIA32Word32AtomicPairStore | AddressingModeField::encode(addressing_mode);
Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps); Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
...@@ -1843,6 +1849,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { ...@@ -1843,6 +1849,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
IA32OperandGenerator g(this); IA32OperandGenerator g(this);
Node* index = node->InputAt(1); Node* index = node->InputAt(1);
AddressingMode addressing_mode; AddressingMode addressing_mode;
bool block_root_register = !FLAG_embedded_builtins;
InstructionOperand inputs[] = { InstructionOperand inputs[] = {
// High, Low values of old value // High, Low values of old value
...@@ -1859,18 +1866,22 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { ...@@ -1859,18 +1866,22 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
AddressingModeField::encode(addressing_mode); AddressingModeField::encode(addressing_mode);
if (projection1) { if (projection1) {
InstructionOperand temps[] = {g.TempRegister(ebx)};
InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax), InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax),
g.DefineAsFixed(projection1, edx)}; g.DefineAsFixed(projection1, edx)};
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs, 0, {}); const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
num_temps, temps);
} else if (projection0) { } else if (projection0) {
InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax)}; InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax)};
InstructionOperand temps[] = {g.TempRegister(edx)}; InstructionOperand temps[] = {g.TempRegister(edx), g.TempRegister(ebx)};
const int num_temps = arraysize(temps); const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs, Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
num_temps, temps); num_temps, temps);
} else { } else {
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)}; InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx),
const int num_temps = arraysize(temps); g.TempRegister(ebx)};
const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps); Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
} }
} }
......
...@@ -1015,7 +1015,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, ...@@ -1015,7 +1015,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison && if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison &&
unallocated.HasFixedRegisterPolicy()) { unallocated.HasFixedRegisterPolicy()) {
int reg = unallocated.fixed_register_index(); int reg = unallocated.fixed_register_index();
if (Register::from_code(reg) == kSpeculationPoisonRegister) { if (reg == kSpeculationPoisonRegister.code()) {
buffer->instruction_args[poison_alias_index] = g.TempImmediate( buffer->instruction_args[poison_alias_index] = g.TempImmediate(
static_cast<int32_t>(buffer->instruction_args.size())); static_cast<int32_t>(buffer->instruction_args.size()));
op = g.UseRegisterOrSlotOrConstant(*iter); op = g.UseRegisterOrSlotOrConstant(*iter);
...@@ -2590,6 +2590,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) { ...@@ -2590,6 +2590,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
if (flags & CallDescriptor::kAllowCallThroughSlot) { if (flags & CallDescriptor::kAllowCallThroughSlot) {
// TODO(v8:6666): Remove kAllowCallThroughSlot and use a pc-relative call // TODO(v8:6666): Remove kAllowCallThroughSlot and use a pc-relative call
// instead once builtins are embedded in every build configuration. // instead once builtins are embedded in every build configuration.
DCHECK(FLAG_embedded_builtins);
call_buffer_flags |= kAllowCallThroughSlot; call_buffer_flags |= kAllowCallThroughSlot;
#ifndef V8_TARGET_ARCH_32_BIT #ifndef V8_TARGET_ARCH_32_BIT
// kAllowCallThroughSlot is only supported on ia32. // kAllowCallThroughSlot is only supported on ia32.
......
...@@ -2458,24 +2458,28 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) { ...@@ -2458,24 +2458,28 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
std::unique_ptr<const RegisterConfiguration> config; std::unique_ptr<const RegisterConfiguration> config;
config.reset(RegisterConfiguration::RestrictGeneralRegisters(registers)); config.reset(RegisterConfiguration::RestrictGeneralRegisters(registers));
AllocateRegisters(config.get(), call_descriptor, run_verifier); AllocateRegisters(config.get(), call_descriptor, run_verifier);
#ifdef V8_TARGET_ARCH_IA32 } else if (data->info()->GetPoisoningMitigationLevel() !=
PoisoningMitigationLevel::kDontPoison) {
#if defined(V8_TARGET_ARCH_IA32)
DCHECK(!FLAG_embedded_builtins);
#endif
AllocateRegisters(RegisterConfiguration::Poisoning(), call_descriptor,
run_verifier);
} else { } else {
#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
// TODO(v8:6666): Ensure that that this configuration cooperates with // TODO(v8:6666): Ensure that that this configuration cooperates with
// restricted allocatable registers above, i.e. that we guarantee a // restricted allocatable registers above, i.e. that we guarantee a
// restricted configuration cannot allocate kRootRegister on ia32. // restricted configuration cannot allocate kRootRegister on ia32.
static_assert(kRootRegister == kSpeculationPoisonRegister,
"The following checks assume root equals poison register");
CHECK(!FLAG_untrusted_code_mitigations);
AllocateRegisters(RegisterConfiguration::PreserveRootIA32(), AllocateRegisters(RegisterConfiguration::PreserveRootIA32(),
call_descriptor, run_verifier); call_descriptor, run_verifier);
}
#else #else
} else if (data->info()->GetPoisoningMitigationLevel() !=
PoisoningMitigationLevel::kDontPoison) {
AllocateRegisters(RegisterConfiguration::Poisoning(), call_descriptor,
run_verifier);
} else {
AllocateRegisters(RegisterConfiguration::Default(), call_descriptor, AllocateRegisters(RegisterConfiguration::Default(), call_descriptor,
run_verifier); run_verifier);
#endif // defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
} }
#endif // V8_TARGET_ARCH_IA32
// Verify the instruction sequence has the same hash in two stages. // Verify the instruction sequence has the same hash in two stages.
VerifyGeneratedCodeIsIdempotent(); VerifyGeneratedCodeIsIdempotent();
......
...@@ -4630,12 +4630,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { ...@@ -4630,12 +4630,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] = undefined_node; args[pos++] = undefined_node;
} }
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
#ifdef V8_TARGET_ARCH_IA32 #ifdef V8_TARGET_ARCH_IA32
// TODO(v8:6666): Remove kAllowCallThroughSlot and use a pc-relative // TODO(v8:6666): Remove kAllowCallThroughSlot and use a pc-relative
// call instead once builtins are embedded in every build configuration. // call instead once builtins are embedded in every build configuration.
CallDescriptor::Flags flags = CallDescriptor::kAllowCallThroughSlot; flags = FLAG_embedded_builtins ? CallDescriptor::kAllowCallThroughSlot
#else : CallDescriptor::kNoFlags;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
#endif #endif
auto call_descriptor = Linkage::GetStubCallDescriptor( auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), ArgumentsAdaptorDescriptor{}, 1 + wasm_count, mcgraph()->zone(), ArgumentsAdaptorDescriptor{}, 1 + wasm_count,
......
...@@ -809,9 +809,16 @@ void Deoptimizer::DoComputeOutputFrames() { ...@@ -809,9 +809,16 @@ void Deoptimizer::DoComputeOutputFrames() {
} }
} }
FrameDescription* topmost = output_[count - 1]; #if defined(V8_TARGET_ARCH_IA32)
topmost->GetRegisterValues()->SetRegister(kRootRegister.code(), constexpr bool kShouldInitializeRootRegister = FLAG_embedded_builtins;
isolate()->isolate_root()); #else
constexpr bool kShouldInitializeRootRegister = true;
#endif
if (kShouldInitializeRootRegister) {
FrameDescription* topmost = output_[count - 1];
topmost->GetRegisterValues()->SetRegister(kRootRegister.code(),
isolate()->isolate_root());
}
// Print some helpful diagnostic information. // Print some helpful diagnostic information.
if (trace_scope_ != nullptr) { if (trace_scope_ != nullptr) {
...@@ -1477,7 +1484,7 @@ void Deoptimizer::DoComputeBuiltinContinuation( ...@@ -1477,7 +1484,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
const bool must_handle_result = const bool must_handle_result =
!is_topmost || deopt_kind_ == DeoptimizeKind::kLazy; !is_topmost || deopt_kind_ == DeoptimizeKind::kLazy;
#ifdef V8_TARGET_ARCH_IA32 #if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
// TODO(v8:6666): Fold into Default config once root is fully supported. // TODO(v8:6666): Fold into Default config once root is fully supported.
const RegisterConfiguration* config( const RegisterConfiguration* config(
RegisterConfiguration::PreserveRootIA32()); RegisterConfiguration::PreserveRootIA32());
......
...@@ -46,6 +46,10 @@ MacroAssembler::MacroAssembler(Isolate* isolate, ...@@ -46,6 +46,10 @@ MacroAssembler::MacroAssembler(Isolate* isolate,
} }
void TurboAssembler::InitializeRootRegister() { void TurboAssembler::InitializeRootRegister() {
// TODO(v8:6666): Initialize unconditionally once poisoning support has been
// removed.
if (!FLAG_embedded_builtins) return;
ExternalReference isolate_root = ExternalReference::isolate_root(isolate()); ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
Move(kRootRegister, Immediate(isolate_root)); Move(kRootRegister, Immediate(isolate_root));
} }
...@@ -1995,6 +1999,10 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) { ...@@ -1995,6 +1999,10 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
} }
} }
void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Immediate(-1));
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -21,6 +21,7 @@ constexpr Register kReturnRegister2 = edi; ...@@ -21,6 +21,7 @@ constexpr Register kReturnRegister2 = edi;
constexpr Register kJSFunctionRegister = edi; constexpr Register kJSFunctionRegister = edi;
constexpr Register kContextRegister = esi; constexpr Register kContextRegister = esi;
constexpr Register kAllocateSizeRegister = edx; constexpr Register kAllocateSizeRegister = edx;
constexpr Register kSpeculationPoisonRegister = ebx;
constexpr Register kInterpreterAccumulatorRegister = eax; constexpr Register kInterpreterAccumulatorRegister = eax;
constexpr Register kInterpreterBytecodeOffsetRegister = edx; constexpr Register kInterpreterBytecodeOffsetRegister = edx;
constexpr Register kInterpreterBytecodeArrayRegister = edi; constexpr Register kInterpreterBytecodeArrayRegister = edi;
...@@ -48,9 +49,6 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = edi; ...@@ -48,9 +49,6 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = edi;
constexpr Register kRootRegister = ebx; constexpr Register kRootRegister = ebx;
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
constexpr Register kSpeculationPoisonRegister = no_reg;
// Convenience for platform-independent signatures. We do not normally // Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32. // distinguish memory operands from other operands on ia32.
typedef Operand MemOperand; typedef Operand MemOperand;
...@@ -456,8 +454,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ...@@ -456,8 +454,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference. // This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst); void ComputeCodeStartAddress(Register dst);
// TODO(860429): Remove remaining poisoning infrastructure on ia32. void ResetSpeculationPoisonRegister();
void ResetSpeculationPoisonRegister() { UNREACHABLE(); }
}; };
// MacroAssembler implements a collection of frequently used macros. // MacroAssembler implements a collection of frequently used macros.
......
...@@ -166,7 +166,7 @@ static base::LazyInstance<ArchDefaultPoisoningRegisterConfiguration, ...@@ -166,7 +166,7 @@ static base::LazyInstance<ArchDefaultPoisoningRegisterConfiguration,
PoisoningRegisterConfigurationInitializer>::type PoisoningRegisterConfigurationInitializer>::type
kDefaultPoisoningRegisterConfiguration = LAZY_INSTANCE_INITIALIZER; kDefaultPoisoningRegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
#ifdef V8_TARGET_ARCH_IA32 #if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
// Allocatable registers with the root register removed. // Allocatable registers with the root register removed.
// TODO(v8:6666): Once all builtins have been migrated, we could remove this // TODO(v8:6666): Once all builtins have been migrated, we could remove this
// configuration and remove kRootRegister from ALLOCATABLE_GENERAL_REGISTERS // configuration and remove kRootRegister from ALLOCATABLE_GENERAL_REGISTERS
...@@ -213,7 +213,7 @@ struct PreserveRootIA32RegisterConfigurationInitializer { ...@@ -213,7 +213,7 @@ struct PreserveRootIA32RegisterConfigurationInitializer {
static base::LazyInstance<ArchPreserveRootIA32RegisterConfiguration, static base::LazyInstance<ArchPreserveRootIA32RegisterConfiguration,
PreserveRootIA32RegisterConfigurationInitializer>:: PreserveRootIA32RegisterConfigurationInitializer>::
type kPreserveRootIA32RegisterConfiguration = LAZY_INSTANCE_INITIALIZER; type kPreserveRootIA32RegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
#endif // V8_TARGET_ARCH_IA32 #endif // defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
// RestrictedRegisterConfiguration uses the subset of allocatable general // RestrictedRegisterConfiguration uses the subset of allocatable general
// registers the architecture support, which results into generating assembly // registers the architecture support, which results into generating assembly
...@@ -267,11 +267,11 @@ const RegisterConfiguration* RegisterConfiguration::Poisoning() { ...@@ -267,11 +267,11 @@ const RegisterConfiguration* RegisterConfiguration::Poisoning() {
return &kDefaultPoisoningRegisterConfiguration.Get(); return &kDefaultPoisoningRegisterConfiguration.Get();
} }
#ifdef V8_TARGET_ARCH_IA32 #if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
const RegisterConfiguration* RegisterConfiguration::PreserveRootIA32() { const RegisterConfiguration* RegisterConfiguration::PreserveRootIA32() {
return &kPreserveRootIA32RegisterConfiguration.Get(); return &kPreserveRootIA32RegisterConfiguration.Get();
} }
#endif // V8_TARGET_ARCH_IA32 #endif // defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters( const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters(
RegList registers) { RegList registers) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment