Commit 17f47d74 authored by Michael Achenbach's avatar Michael Achenbach Committed by Commit Bot

Revert "[ia32] Remove poisoning logic on ia32"

This reverts commit a31a6230.

Reason for revert:
https://ci.chromium.org/p/v8/builders/luci.v8.ci/V8%20Linux%20-%20nosnap%20-%20debug/21332

Original change's description:
> [ia32] Remove poisoning logic on ia32
> 
> Poisoning has been disabled by default on ia32 a while ago. This CL
> removes its logic from ia32 code generation, which will let us move
> towards fuller (and unconditional) root register support.
> 
> Bug: chromium:860429, v8:8254
> Change-Id: I8f672cf48a6ffc7bf21e7794c1b7463d7f8b9594
> Reviewed-on: https://chromium-review.googlesource.com/c/1296131
> Commit-Queue: Jakob Gruber <jgruber@chromium.org>
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Reviewed-by: Jaroslav Sevcik <jarin@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#56978}

TBR=mstarzinger@chromium.org,jarin@chromium.org,jgruber@chromium.org

Change-Id: I305e9e1719fb4b3f8ef267c232723db9b52966e9
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: chromium:860429, v8:8254
Reviewed-on: https://chromium-review.googlesource.com/c/1299015Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Commit-Queue: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56980}
parent 9cde8808
......@@ -1334,8 +1334,13 @@ namespace {
void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
bool java_script_builtin,
bool with_result) {
#ifdef V8_EMBEDDED_BUILTINS
// TODO(v8:6666): Fold into Default config once root is fully supported.
const RegisterConfiguration* config(
RegisterConfiguration::PreserveRootIA32());
#else
const RegisterConfiguration* config(RegisterConfiguration::Default());
#endif
int allocatable_register_count = config->num_allocatable_general_registers();
if (with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
......@@ -1365,20 +1370,32 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
} // namespace
void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
#ifdef V8_EMBEDDED_BUILTINS
// TODO(v8:6666): Remove the ifdef once root is preserved by default.
#endif
Generate_ContinueToBuiltinHelper(masm, false, false);
}
void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
MacroAssembler* masm) {
#ifdef V8_EMBEDDED_BUILTINS
// TODO(v8:6666): Remove the ifdef once root is preserved by default.
#endif
Generate_ContinueToBuiltinHelper(masm, false, true);
}
void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
#ifdef V8_EMBEDDED_BUILTINS
// TODO(v8:6666): Remove the ifdef once root is preserved by default.
#endif
Generate_ContinueToBuiltinHelper(masm, true, false);
}
void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
MacroAssembler* masm) {
#ifdef V8_EMBEDDED_BUILTINS
// TODO(v8:6666): Remove the ifdef once root is preserved by default.
#endif
Generate_ContinueToBuiltinHelper(masm, true, true);
}
......@@ -2490,6 +2507,10 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// If argv_mode == kArgvInRegister:
// ecx: pointer to the first argument
#ifdef V8_EMBEDDED_BUILTINS
// TODO(v8:6666): Remove the ifdef once branch load poisoning is removed.
#endif
STATIC_ASSERT(eax == kRuntimeCallArgCountRegister);
STATIC_ASSERT(ecx == kRuntimeCallArgvRegister);
STATIC_ASSERT(edx == kRuntimeCallFunctionRegister);
......@@ -2611,6 +2632,17 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
__ bind(&skip);
#ifdef V8_EMBEDDED_BUILTINS
STATIC_ASSERT(kRootRegister == kSpeculationPoisonRegister);
CHECK(!FLAG_untrusted_code_mitigations);
#else
// Reset the masking register. This is done independent of the underlying
// feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
// with both configurations. It is safe to always do this, because the
// underlying register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
#endif
// Compute the handler entry address and jump to it.
__ mov(edi, __ ExternalReferenceAsOperand(pending_handler_entrypoint_address,
edi));
......
......@@ -309,6 +309,30 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Zone* zone_;
};
void MoveOperandIfAliasedWithPoisonRegister(Instruction* call_instruction,
CodeGenerator* gen) {
IA32OperandConverter i(gen, call_instruction);
int const poison_index = i.InputInt32(1);
if (poison_index == -1) {
// No aliasing -> nothing to move.
return;
}
i.MoveInstructionOperandToRegister(kSpeculationPoisonRegister,
call_instruction->InputAt(poison_index));
}
void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
IA32OperandConverter& i) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
Register value = i.OutputRegister();
codegen->tasm()->and_(value, kSpeculationPoisonRegister);
}
}
} // namespace
#define ASSEMBLE_COMPARE(asm_instr) \
......@@ -605,13 +629,23 @@ void CodeGenerator::BailoutIfDeoptimized() {
}
void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
UNREACHABLE();
__ push(eax); // Push eax so we can use it as a scratch register.
// Set a mask which has all bits set in the normal case, but has all
// bits cleared if we are speculatively executing the wrong PC.
__ ComputeCodeStartAddress(eax);
__ mov(kSpeculationPoisonRegister, Immediate(0));
__ cmp(kJavaScriptCallCodeStartRegister, eax);
__ mov(eax, Immediate(-1));
__ cmov(equal, kSpeculationPoisonRegister, eax);
__ pop(eax); // Restore eax.
}
void CodeGenerator::AssembleRegisterArgumentPoisoning() {
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
UNREACHABLE();
__ and_(kJSFunctionRegister, kSpeculationPoisonRegister);
__ and_(kContextRegister, kSpeculationPoisonRegister);
__ and_(esp, kSpeculationPoisonRegister);
}
// Assembles an instruction after register allocation, producing machine code.
......@@ -622,6 +656,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
switch (arch_opcode) {
case kArchCallCodeObject: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
InstructionOperand* op = instr->InputAt(0);
if (op->IsImmediate()) {
Handle<Code> code = i.InputCode(0);
......@@ -660,6 +695,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchCallWasmFunction: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
if (HasImmediateInput(instr, 0)) {
Constant constant = i.ToConstant(instr->InputAt(0));
Address wasm_code = static_cast<Address>(constant.ToInt32());
......@@ -686,6 +722,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
no_reg, no_reg, no_reg);
......@@ -710,6 +747,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchTailCallWasm: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
if (HasImmediateInput(instr, 0)) {
Constant constant = i.ToConstant(instr->InputAt(0));
Address wasm_code = static_cast<Address>(constant.ToInt32());
......@@ -727,6 +765,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchTailCallAddress: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
......@@ -742,6 +781,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchCallJSFunction: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
......@@ -792,6 +832,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
MoveOperandIfAliasedWithPoisonRegister(instr, this);
int const num_parameters = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
ExternalReference ref = i.InputExternalReference(0);
......@@ -1176,8 +1217,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bswap(i.OutputRegister());
break;
case kArchWordPoisonOnSpeculation:
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
UNREACHABLE();
DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
__ and_(i.InputRegister(0), kSpeculationPoisonRegister);
break;
case kLFence:
__ lfence();
......@@ -1552,9 +1593,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kIA32Movsxbl:
ASSEMBLE_MOVX(movsx_b);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kIA32Movzxbl:
ASSEMBLE_MOVX(movzx_b);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kIA32Movb: {
size_t index = 0;
......@@ -1564,13 +1607,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ mov_b(operand, i.InputRegister(index));
}
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kIA32Movsxwl:
ASSEMBLE_MOVX(movsx_w);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kIA32Movzxwl:
ASSEMBLE_MOVX(movzx_w);
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kIA32Movw: {
size_t index = 0;
......@@ -1580,11 +1626,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ mov_w(operand, i.InputRegister(index));
}
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kIA32Movl:
if (instr->HasOutput()) {
__ mov(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
......@@ -3871,8 +3919,15 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
Instruction* instr) {
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
UNREACHABLE();
// TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
return;
}
condition = NegateFlagsCondition(condition);
__ setcc(FlagsConditionToCondition(condition), kSpeculationPoisonRegister);
__ add(kSpeculationPoisonRegister, Immediate(255));
__ sar(kSpeculationPoisonRegister, 31u);
}
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
......@@ -4200,6 +4255,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
......
......@@ -1353,6 +1353,7 @@ void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
// For Word64 operations, the value input is split into the a high node,
// and a low node in the int64-lowering phase.
Node* value_high = node->InputAt(3);
bool block_root_register = !FLAG_embedded_builtins;
// Wasm lives in 32-bit address space, so we do not need to worry about
// base/index lowering. This will need to be fixed for Wasm64.
......@@ -1365,19 +1366,22 @@ void VisitPairAtomicBinOp(InstructionSelector* selector, Node* node,
Node* projection0 = NodeProperties::FindProjection(node, 0);
Node* projection1 = NodeProperties::FindProjection(node, 1);
if (projection1) {
InstructionOperand temps[] = {g.TempRegister(ebx)};
InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax),
g.DefineAsFixed(projection1, edx)};
const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
0, {});
num_temps, temps);
} else if (projection0) {
InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax)};
InstructionOperand temps[] = {g.TempRegister(edx)};
const int num_temps = arraysize(temps);
InstructionOperand temps[] = {g.TempRegister(edx), g.TempRegister(ebx)};
const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
num_temps, temps);
} else {
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
const int num_temps = arraysize(temps);
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx),
g.TempRegister(ebx)};
const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps,
temps);
}
......@@ -1799,6 +1803,7 @@ void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
Node* value_high = node->InputAt(3);
bool block_root_register = !FLAG_embedded_builtins;
AddressingMode addressing_mode;
InstructionOperand inputs[] = {
......@@ -1808,8 +1813,9 @@ void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
// Allocating temp registers here as stores are performed using an atomic
// exchange, the output of which is stored in edx:eax, which should be saved
// and restored at the end of the instruction.
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
const int num_temps = arraysize(temps);
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx),
g.TempRegister(ebx)};
const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
InstructionCode code =
kIA32Word32AtomicPairStore | AddressingModeField::encode(addressing_mode);
Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
......@@ -1843,6 +1849,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
IA32OperandGenerator g(this);
Node* index = node->InputAt(1);
AddressingMode addressing_mode;
bool block_root_register = !FLAG_embedded_builtins;
InstructionOperand inputs[] = {
// High, Low values of old value
......@@ -1859,18 +1866,22 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
AddressingModeField::encode(addressing_mode);
if (projection1) {
InstructionOperand temps[] = {g.TempRegister(ebx)};
InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax),
g.DefineAsFixed(projection1, edx)};
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs, 0, {});
const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
num_temps, temps);
} else if (projection0) {
InstructionOperand outputs[] = {g.DefineAsFixed(projection0, eax)};
InstructionOperand temps[] = {g.TempRegister(edx)};
const int num_temps = arraysize(temps);
InstructionOperand temps[] = {g.TempRegister(edx), g.TempRegister(ebx)};
const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
num_temps, temps);
} else {
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
const int num_temps = arraysize(temps);
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx),
g.TempRegister(ebx)};
const int num_temps = arraysize(temps) - (block_root_register ? 0 : 1);
Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
}
}
......
......@@ -1015,7 +1015,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison &&
unallocated.HasFixedRegisterPolicy()) {
int reg = unallocated.fixed_register_index();
if (Register::from_code(reg) == kSpeculationPoisonRegister) {
if (reg == kSpeculationPoisonRegister.code()) {
buffer->instruction_args[poison_alias_index] = g.TempImmediate(
static_cast<int32_t>(buffer->instruction_args.size()));
op = g.UseRegisterOrSlotOrConstant(*iter);
......@@ -2590,6 +2590,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
if (flags & CallDescriptor::kAllowCallThroughSlot) {
// TODO(v8:6666): Remove kAllowCallThroughSlot and use a pc-relative call
// instead once builtins are embedded in every build configuration.
DCHECK(FLAG_embedded_builtins);
call_buffer_flags |= kAllowCallThroughSlot;
#ifndef V8_TARGET_ARCH_32_BIT
// kAllowCallThroughSlot is only supported on ia32.
......
......@@ -2458,24 +2458,28 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
std::unique_ptr<const RegisterConfiguration> config;
config.reset(RegisterConfiguration::RestrictGeneralRegisters(registers));
AllocateRegisters(config.get(), call_descriptor, run_verifier);
#ifdef V8_TARGET_ARCH_IA32
} else if (data->info()->GetPoisoningMitigationLevel() !=
PoisoningMitigationLevel::kDontPoison) {
#if defined(V8_TARGET_ARCH_IA32)
DCHECK(!FLAG_embedded_builtins);
#endif
AllocateRegisters(RegisterConfiguration::Poisoning(), call_descriptor,
run_verifier);
} else {
#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
// TODO(v8:6666): Ensure that that this configuration cooperates with
// restricted allocatable registers above, i.e. that we guarantee a
// restricted configuration cannot allocate kRootRegister on ia32.
static_assert(kRootRegister == kSpeculationPoisonRegister,
"The following checks assume root equals poison register");
CHECK(!FLAG_untrusted_code_mitigations);
AllocateRegisters(RegisterConfiguration::PreserveRootIA32(),
call_descriptor, run_verifier);
}
#else
} else if (data->info()->GetPoisoningMitigationLevel() !=
PoisoningMitigationLevel::kDontPoison) {
AllocateRegisters(RegisterConfiguration::Poisoning(), call_descriptor,
run_verifier);
} else {
AllocateRegisters(RegisterConfiguration::Default(), call_descriptor,
run_verifier);
#endif // defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
}
#endif // V8_TARGET_ARCH_IA32
// Verify the instruction sequence has the same hash in two stages.
VerifyGeneratedCodeIsIdempotent();
......
......@@ -4630,12 +4630,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] = undefined_node;
}
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
#ifdef V8_TARGET_ARCH_IA32
// TODO(v8:6666): Remove kAllowCallThroughSlot and use a pc-relative
// call instead once builtins are embedded in every build configuration.
CallDescriptor::Flags flags = CallDescriptor::kAllowCallThroughSlot;
#else
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
flags = FLAG_embedded_builtins ? CallDescriptor::kAllowCallThroughSlot
: CallDescriptor::kNoFlags;
#endif
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), ArgumentsAdaptorDescriptor{}, 1 + wasm_count,
......
......@@ -809,9 +809,16 @@ void Deoptimizer::DoComputeOutputFrames() {
}
}
FrameDescription* topmost = output_[count - 1];
topmost->GetRegisterValues()->SetRegister(kRootRegister.code(),
isolate()->isolate_root());
#if defined(V8_TARGET_ARCH_IA32)
constexpr bool kShouldInitializeRootRegister = FLAG_embedded_builtins;
#else
constexpr bool kShouldInitializeRootRegister = true;
#endif
if (kShouldInitializeRootRegister) {
FrameDescription* topmost = output_[count - 1];
topmost->GetRegisterValues()->SetRegister(kRootRegister.code(),
isolate()->isolate_root());
}
// Print some helpful diagnostic information.
if (trace_scope_ != nullptr) {
......@@ -1477,7 +1484,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
const bool must_handle_result =
!is_topmost || deopt_kind_ == DeoptimizeKind::kLazy;
#ifdef V8_TARGET_ARCH_IA32
#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
// TODO(v8:6666): Fold into Default config once root is fully supported.
const RegisterConfiguration* config(
RegisterConfiguration::PreserveRootIA32());
......
......@@ -46,6 +46,10 @@ MacroAssembler::MacroAssembler(Isolate* isolate,
}
void TurboAssembler::InitializeRootRegister() {
// TODO(v8:6666): Initialize unconditionally once poisoning support has been
// removed.
if (!FLAG_embedded_builtins) return;
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
Move(kRootRegister, Immediate(isolate_root));
}
......@@ -1995,6 +1999,10 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
}
}
void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Immediate(-1));
}
} // namespace internal
} // namespace v8
......
......@@ -21,6 +21,7 @@ constexpr Register kReturnRegister2 = edi;
constexpr Register kJSFunctionRegister = edi;
constexpr Register kContextRegister = esi;
constexpr Register kAllocateSizeRegister = edx;
constexpr Register kSpeculationPoisonRegister = ebx;
constexpr Register kInterpreterAccumulatorRegister = eax;
constexpr Register kInterpreterBytecodeOffsetRegister = edx;
constexpr Register kInterpreterBytecodeArrayRegister = edi;
......@@ -48,9 +49,6 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = edi;
constexpr Register kRootRegister = ebx;
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
constexpr Register kSpeculationPoisonRegister = no_reg;
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
typedef Operand MemOperand;
......@@ -456,8 +454,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
void ResetSpeculationPoisonRegister() { UNREACHABLE(); }
void ResetSpeculationPoisonRegister();
};
// MacroAssembler implements a collection of frequently used macros.
......
......@@ -166,7 +166,7 @@ static base::LazyInstance<ArchDefaultPoisoningRegisterConfiguration,
PoisoningRegisterConfigurationInitializer>::type
kDefaultPoisoningRegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
#ifdef V8_TARGET_ARCH_IA32
#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
// Allocatable registers with the root register removed.
// TODO(v8:6666): Once all builtins have been migrated, we could remove this
// configuration and remove kRootRegister from ALLOCATABLE_GENERAL_REGISTERS
......@@ -213,7 +213,7 @@ struct PreserveRootIA32RegisterConfigurationInitializer {
static base::LazyInstance<ArchPreserveRootIA32RegisterConfiguration,
PreserveRootIA32RegisterConfigurationInitializer>::
type kPreserveRootIA32RegisterConfiguration = LAZY_INSTANCE_INITIALIZER;
#endif // V8_TARGET_ARCH_IA32
#endif // defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
// RestrictedRegisterConfiguration uses the subset of allocatable general
// registers the architecture support, which results into generating assembly
......@@ -267,11 +267,11 @@ const RegisterConfiguration* RegisterConfiguration::Poisoning() {
return &kDefaultPoisoningRegisterConfiguration.Get();
}
#ifdef V8_TARGET_ARCH_IA32
#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
const RegisterConfiguration* RegisterConfiguration::PreserveRootIA32() {
return &kPreserveRootIA32RegisterConfiguration.Get();
}
#endif // V8_TARGET_ARCH_IA32
#endif // defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters(
RegList registers) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment