Commit adfe25c0 authored by Jakob Gruber's avatar Jakob Gruber Committed by Commit Bot

[ia32] Remove embedded builtin porting helpers

This removes a bunch of porting helpers, e.g. scopes that mark
the addressability of ebx, printing embedded builtin candidates, and
the call/jump mechanism through a virtual target register.

This also disables root register verification by default on ia32. It
can be completely removed in a bit.

Bug: v8:6666
Change-Id: I4705d61991ddc57c30981c311a1c8c5e2f8ddf4d
Reviewed-on: https://chromium-review.googlesource.com/c/1288271Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56864}
parent c949f19b
......@@ -17,75 +17,45 @@ namespace internal {
void Builtins::Generate_CallFunction_ReceiverIsNullOrUndefined(
MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined);
}
void Builtins::Generate_CallFunction_ReceiverIsNotNullOrUndefined(
MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined);
}
void Builtins::Generate_CallFunction_ReceiverIsAny(MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
Generate_CallFunction(masm, ConvertReceiverMode::kAny);
}
void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
Generate_CallBoundFunctionImpl(masm);
}
void Builtins::Generate_Call_ReceiverIsNullOrUndefined(MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined);
}
void Builtins::Generate_Call_ReceiverIsNotNullOrUndefined(
MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined);
}
void Builtins::Generate_Call_ReceiverIsAny(MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
Generate_Call(masm, ConvertReceiverMode::kAny);
}
void Builtins::Generate_CallVarargs(MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
Generate_CallOrConstructVarargs(masm, masm->isolate()->builtins()->Call());
}
void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
Generate_CallOrConstructForwardVarargs(masm, CallOrConstructMode::kCall,
masm->isolate()->builtins()->Call());
}
void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
Generate_CallOrConstructForwardVarargs(
masm, CallOrConstructMode::kCall,
masm->isolate()->builtins()->CallFunction());
......
......@@ -19,26 +19,17 @@ namespace v8 {
namespace internal {
void Builtins::Generate_ConstructVarargs(MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
Generate_CallOrConstructVarargs(masm,
BUILTIN_CODE(masm->isolate(), Construct));
}
void Builtins::Generate_ConstructForwardVarargs(MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
Generate_CallOrConstructForwardVarargs(
masm, CallOrConstructMode::kConstruct,
BUILTIN_CODE(masm->isolate(), Construct));
}
void Builtins::Generate_ConstructFunctionForwardVarargs(MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
Generate_CallOrConstructForwardVarargs(
masm, CallOrConstructMode::kConstruct,
BUILTIN_CODE(masm->isolate(), ConstructFunction));
......
......@@ -24,16 +24,10 @@ using TNode = compiler::TNode<T>;
// Interrupt and stack checks.
void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
masm->TailCallRuntime(Runtime::kInterrupt);
}
void Builtins::Generate_StackCheck(MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
masm->TailCallRuntime(Runtime::kStackGuard);
}
......@@ -1211,9 +1205,6 @@ void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit(
}
void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
// CallApiGetterStub only exists as a stub to avoid duplicating code between
// here and code-stubs-<arch>.cc. For example, see CallApiFunctionAndReturn.
// Here we abuse the instantiated stub to generate code.
......@@ -1222,9 +1213,6 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
}
void Builtins::Generate_CallApiCallback_Argc0(MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
// The common variants of CallApiCallbackStub (i.e. all that are embedded into
// the snapshot) are generated as builtins. The rest remain available as code
// stubs. Here we abuse the instantiated stub to generate code and avoid
......@@ -1235,9 +1223,6 @@ void Builtins::Generate_CallApiCallback_Argc0(MacroAssembler* masm) {
}
void Builtins::Generate_CallApiCallback_Argc1(MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
// The common variants of CallApiCallbackStub (i.e. all that are embedded into
// the snapshot) are generated as builtins. The rest remain available as code
// stubs. Here we abuse the instantiated stub to generate code and avoid
......
......@@ -10,18 +10,12 @@ namespace v8 {
namespace internal {
void Builtins::Generate_InterpreterPushArgsThenCall(MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
return Generate_InterpreterPushArgsThenCallImpl(
masm, ConvertReceiverMode::kAny, InterpreterPushArgsMode::kOther);
}
void Builtins::Generate_InterpreterPushUndefinedAndArgsThenCall(
MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
return Generate_InterpreterPushArgsThenCallImpl(
masm, ConvertReceiverMode::kNullOrUndefined,
InterpreterPushArgsMode::kOther);
......@@ -29,36 +23,24 @@ void Builtins::Generate_InterpreterPushUndefinedAndArgsThenCall(
void Builtins::Generate_InterpreterPushArgsThenCallWithFinalSpread(
MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
return Generate_InterpreterPushArgsThenCallImpl(
masm, ConvertReceiverMode::kAny,
InterpreterPushArgsMode::kWithFinalSpread);
}
void Builtins::Generate_InterpreterPushArgsThenConstruct(MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
return Generate_InterpreterPushArgsThenConstructImpl(
masm, InterpreterPushArgsMode::kOther);
}
void Builtins::Generate_InterpreterPushArgsThenConstructWithFinalSpread(
MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
return Generate_InterpreterPushArgsThenConstructImpl(
masm, InterpreterPushArgsMode::kWithFinalSpread);
}
void Builtins::Generate_InterpreterPushArgsThenConstructArrayFunction(
MacroAssembler* masm) {
#ifdef V8_TARGET_ARCH_IA32
Assembler::SupportsRootRegisterScope supports_root_register(masm);
#endif
return Generate_InterpreterPushArgsThenConstructImpl(
masm, InterpreterPushArgsMode::kArrayFunction);
}
......
This diff is collapsed.
......@@ -440,7 +440,6 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
do { \
Label binop; \
__ bind(&binop); \
TurboAssembler::AllowExplicitEbxAccessScope spill_register(tasm()); \
__ mov(eax, i.MemoryOperand(2)); \
__ mov(edx, i.NextMemoryOperand(2)); \
__ push(ebx); \
......@@ -700,7 +699,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// TODO(v8:6666): Remove once only embedded builtins are supported.
__ push(eax);
frame_access_state()->IncreaseSPDelta(1);
TurboAssembler::AllowExplicitEbxAccessScope read_only_access(tasm());
Operand virtual_call_target_register(
kRootRegister,
IsolateData::kVirtualCallTargetRegisterOffset - kRootRegisterBias);
......@@ -3700,7 +3698,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32Word32AtomicPairStore: {
Label store;
__ bind(&store);
TurboAssembler::AllowExplicitEbxAccessScope spill_register(tasm());
__ mov(i.TempRegister(0), i.MemoryOperand(2));
__ mov(i.TempRegister(1), i.NextMemoryOperand(2));
__ push(ebx);
......@@ -3741,7 +3738,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(VerifyOutputOfAtomicPairInstr(&i, instr));
Label exchange;
__ bind(&exchange);
TurboAssembler::AllowExplicitEbxAccessScope spill_ebx(tasm());
__ mov(eax, i.MemoryOperand(2));
__ mov(edx, i.NextMemoryOperand(2));
__ push(ebx);
......@@ -3784,7 +3780,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32Word32AtomicPairCompareExchange: {
TurboAssembler::AllowExplicitEbxAccessScope spill_ebx(tasm());
__ push(ebx);
frame_access_state()->IncreaseSPDelta(1);
i.MoveInstructionOperandToRegister(ebx, instr->InputAt(2));
......@@ -3840,7 +3835,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(VerifyOutputOfAtomicPairInstr(&i, instr));
Label binop;
__ bind(&binop);
TurboAssembler::AllowExplicitEbxAccessScope spill_register(tasm());
// Move memory operand into edx:eax
__ mov(eax, i.MemoryOperand(2));
__ mov(edx, i.NextMemoryOperand(2));
......@@ -4329,7 +4323,6 @@ void CodeGenerator::AssembleConstructFrame() {
if (saves != 0) { // Save callee-saved registers.
DCHECK(!info()->is_osr());
TurboAssembler::AllowExplicitEbxAccessScope spill_register(tasm());
for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
if (((1 << i) & saves)) __ push(Register::from_code(i));
}
......@@ -4352,7 +4345,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ add(esp, Immediate(returns * kPointerSize));
}
for (int i = 0; i < Register::kNumRegisters; i++) {
TurboAssembler::AllowExplicitEbxAccessScope reload_register(tasm());
if (!((1 << i) & saves)) continue;
__ pop(Register::from_code(i));
}
......
......@@ -2550,10 +2550,6 @@ void PipelineImpl::AssembleCode(Linkage* linkage) {
data->BeginPhaseKind("code generation");
data->InitializeCodeGenerator(linkage);
#if defined(V8_TARGET_ARCH_IA32) && defined(V8_EMBEDDED_BUILTINS)
code_generator()->tasm()->set_ebx_addressable(false);
#endif
Run<AssembleCodePhase>();
if (data->info()->trace_turbo_json_enabled()) {
TurboJsonFile json_of(data->info(), std::ios_base::app);
......
......@@ -16,8 +16,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
Assembler::SupportsRootRegisterScope supports_root_register(masm);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
......@@ -29,8 +27,6 @@ void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
}
void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
Assembler::SupportsRootRegisterScope supports_root_register(masm);
// Frame is being dropped:
// - Drop to the target frame specified by eax.
// - Look up current function on the frame.
......
......@@ -1080,11 +1080,8 @@ DEFINE_BOOL_READONLY(embedded_builtins, V8_EMBEDDED_BUILTINS_BOOL,
"Embed builtin code into the binary.")
// TODO(jgruber,v8:6666): Remove once ia32 has full embedded builtin support.
DEFINE_BOOL_READONLY(
ia32_verify_root_register, V8_EMBEDDED_BUILTINS_BOOL,
ia32_verify_root_register, false,
"Check that the value of the root register was not clobbered.")
// TODO(jgruber,v8:6666): Remove once ia32 has full embedded builtin support.
DEFINE_BOOL(print_embedded_builtin_candidates, false,
"Prints builtins that are not yet embedded but could be.")
DEFINE_BOOL(lazy_deserialization, true,
"Deserialize code lazily from the snapshot.")
DEFINE_BOOL(trace_lazy_deserialization, false, "Trace lazy deserialization.")
......
......@@ -323,10 +323,6 @@ void Assembler::deserialization_set_target_internal_reference_at(
void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
#ifdef DEBUG
AddUsedRegister(index);
AddUsedRegister(base);
#endif
DCHECK_EQ(len_, 1);
DCHECK_EQ(scale & -4, 0);
// Use SIB with no index register only for base esp.
......
......@@ -501,7 +501,6 @@ void Assembler::pushad() {
void Assembler::popad() {
EnsureSpace ensure_space(this);
AssertIsAddressable(ebx);
EMIT(0x61);
}
......@@ -538,7 +537,6 @@ void Assembler::push_imm32(int32_t imm32) {
void Assembler::push(Register src) {
AssertIsAddressable(src);
EnsureSpace ensure_space(this);
EMIT(0x50 | src.code());
}
......@@ -551,7 +549,6 @@ void Assembler::push(Operand src) {
void Assembler::pop(Register dst) {
AssertIsAddressable(dst);
DCHECK_NOT_NULL(reloc_info_writer.last_pc());
EnsureSpace ensure_space(this);
EMIT(0x58 | dst.code());
......@@ -623,7 +620,6 @@ void Assembler::mov_w(Operand dst, const Immediate& src) {
void Assembler::mov(Register dst, int32_t imm32) {
AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0xB8 | dst.code());
emit(imm32);
......@@ -631,14 +627,12 @@ void Assembler::mov(Register dst, int32_t imm32) {
void Assembler::mov(Register dst, const Immediate& x) {
AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0xB8 | dst.code());
emit(x);
}
void Assembler::mov(Register dst, Handle<HeapObject> handle) {
AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0xB8 | dst.code());
emit(handle);
......@@ -652,8 +646,6 @@ void Assembler::mov(Register dst, Operand src) {
void Assembler::mov(Register dst, Register src) {
AssertIsAddressable(src);
AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0x89);
EMIT(0xC0 | src.code() << 3 | dst.code());
......@@ -760,8 +752,6 @@ void Assembler::stos() {
void Assembler::xchg(Register dst, Register src) {
AssertIsAddressable(src);
AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
if (src == eax || dst == eax) { // Single-byte encoding.
EMIT(0x90 | (src == eax ? dst.code() : src.code()));
......@@ -992,7 +982,6 @@ void Assembler::cmpw_ax(Operand op) {
void Assembler::dec_b(Register dst) {
AssertIsAddressable(dst);
CHECK(dst.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0xFE);
......@@ -1007,7 +996,6 @@ void Assembler::dec_b(Operand dst) {
void Assembler::dec(Register dst) {
AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0x48 | dst.code());
}
......@@ -1038,7 +1026,6 @@ void Assembler::div(Operand src) {
void Assembler::imul(Register reg) {
AssertIsAddressable(reg);
EnsureSpace ensure_space(this);
EMIT(0xF7);
EMIT(0xE8 | reg.code());
......@@ -1071,7 +1058,6 @@ void Assembler::imul(Register dst, Operand src, int32_t imm32) {
void Assembler::inc(Register dst) {
AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0x40 | dst.code());
}
......@@ -1090,7 +1076,6 @@ void Assembler::lea(Register dst, Operand src) {
void Assembler::mul(Register src) {
AssertIsAddressable(src);
EnsureSpace ensure_space(this);
EMIT(0xF7);
EMIT(0xE0 | src.code());
......@@ -1098,14 +1083,12 @@ void Assembler::mul(Register src) {
void Assembler::neg(Register dst) {
AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0xF7);
EMIT(0xD8 | dst.code());
}
void Assembler::neg(Operand dst) {
AllowExplicitEbxAccessScope register_used_for_regcode(this);
EnsureSpace ensure_space(this);
EMIT(0xF7);
emit_operand(ebx, dst);
......@@ -1113,7 +1096,6 @@ void Assembler::neg(Operand dst) {
void Assembler::not_(Register dst) {
AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0xF7);
EMIT(0xD0 | dst.code());
......@@ -1150,7 +1132,6 @@ void Assembler::or_(Operand dst, Register src) {
void Assembler::rcl(Register dst, uint8_t imm8) {
AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
......@@ -1165,7 +1146,6 @@ void Assembler::rcl(Register dst, uint8_t imm8) {
void Assembler::rcr(Register dst, uint8_t imm8) {
AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
......@@ -1323,7 +1303,6 @@ void Assembler::test(Register reg, const Immediate& imm) {
return;
}
AssertIsAddressable(reg);
EnsureSpace ensure_space(this);
// This is not using emit_arith because test doesn't support
// sign-extension of 8-bit operands.
......@@ -1364,7 +1343,6 @@ void Assembler::test(Operand op, const Immediate& imm) {
}
void Assembler::test_b(Register reg, Immediate imm8) {
AssertIsAddressable(reg);
DCHECK(imm8.is_uint8());
EnsureSpace ensure_space(this);
// Only use test against byte for registers that have a byte
......@@ -1394,7 +1372,6 @@ void Assembler::test_b(Operand op, Immediate imm8) {
}
void Assembler::test_w(Register reg, Immediate imm16) {
AssertIsAddressable(reg);
DCHECK(imm16.is_int16() || imm16.is_uint16());
EnsureSpace ensure_space(this);
if (reg == eax) {
......@@ -1451,7 +1428,6 @@ void Assembler::xor_(Operand dst, const Immediate& x) {
}
void Assembler::bswap(Register dst) {
AssertIsAddressable(dst);
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xC8 + dst.code());
......@@ -1880,7 +1856,6 @@ void Assembler::fld_d(Operand adr) {
}
void Assembler::fstp_s(Operand adr) {
AllowExplicitEbxAccessScope register_used_for_regcode(this);
EnsureSpace ensure_space(this);
EMIT(0xD9);
emit_operand(ebx, adr);
......@@ -1893,7 +1868,6 @@ void Assembler::fst_s(Operand adr) {
}
void Assembler::fstp_d(Operand adr) {
AllowExplicitEbxAccessScope register_used_for_regcode(this);
EnsureSpace ensure_space(this);
EMIT(0xDD);
emit_operand(ebx, adr);
......@@ -1918,7 +1892,6 @@ void Assembler::fild_d(Operand adr) {
}
void Assembler::fistp_s(Operand adr) {
AllowExplicitEbxAccessScope register_used_for_regcode(this);
EnsureSpace ensure_space(this);
EMIT(0xDB);
emit_operand(ebx, adr);
......@@ -2206,7 +2179,6 @@ void Assembler::sahf() {
void Assembler::setcc(Condition cc, Register reg) {
AssertIsAddressable(reg);
DCHECK(reg.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x0F);
......@@ -2218,8 +2190,6 @@ void Assembler::cvttss2si(Register dst, Operand src) {
EnsureSpace ensure_space(this);
// The [src] might contain ebx's register code, but in
// this case, it refers to xmm3, so it is OK to emit.
AllowExplicitEbxAccessScope accessing_xmm_register(this);
DCHECK(is_ebx_addressable_ || dst != ebx);
EMIT(0xF3);
EMIT(0x0F);
EMIT(0x2C);
......@@ -2230,8 +2200,6 @@ void Assembler::cvttsd2si(Register dst, Operand src) {
EnsureSpace ensure_space(this);
// The [src] might contain ebx's register code, but in
// this case, it refers to xmm3, so it is OK to emit.
AllowExplicitEbxAccessScope accessing_xmm_register(this);
DCHECK(is_ebx_addressable_ || dst != ebx);
EMIT(0xF2);
EMIT(0x0F);
EMIT(0x2C);
......@@ -3195,7 +3163,6 @@ void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
}
void Assembler::emit_sse_operand(XMMRegister reg, Operand adr) {
AllowExplicitEbxAccessScope accessing_xmm_register(this);
Register ireg = Register::from_code(reg.code());
emit_operand(ireg, adr);
}
......@@ -3207,13 +3174,11 @@ void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
AssertIsAddressable(dst);
EMIT(0xC0 | dst.code() << 3 | src.code());
}
void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
AssertIsAddressable(src);
EMIT(0xC0 | (dst.code() << 3) | src.code());
}
......@@ -3299,7 +3264,6 @@ void Assembler::GrowBuffer() {
void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
AssertIsAddressable(dst);
DCHECK(is_uint8(op1) && is_uint8(op2)); // wrong opcode
DCHECK(is_uint8(imm8));
DCHECK_EQ(op1 & 0x01, 0); // should be 8bit operation
......@@ -3310,7 +3274,6 @@ void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
AssertIsAddressable(dst);
DCHECK((0 <= sel) && (sel <= 7));
Register ireg = Register::from_code(sel);
if (x.is_int8()) {
......@@ -3337,16 +3300,13 @@ void Assembler::emit_operand(XMMRegister reg, Operand adr) {
}
void Assembler::emit_operand(int code, Operand adr) {
AssertIsAddressable(adr);
AssertIsAddressable(Register::from_code(code));
// Isolate-independent code may not embed relocatable addresses.
DCHECK(!options().isolate_independent_code ||
adr.rmode_ != RelocInfo::CODE_TARGET);
DCHECK(!options().isolate_independent_code ||
adr.rmode_ != RelocInfo::EMBEDDED_OBJECT);
// TODO(jgruber,v8:6666): Enable once kRootRegister exists.
// DCHECK(!options().isolate_independent_code ||
// adr.rmode_ != RelocInfo::EXTERNAL_REFERENCE);
DCHECK(!options().isolate_independent_code ||
adr.rmode_ != RelocInfo::EXTERNAL_REFERENCE);
const unsigned length = adr.len_;
DCHECK_GT(length, 0);
......@@ -3420,16 +3380,6 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
reloc_info_writer.Write(&rinfo);
}
#ifdef DEBUG
void Assembler::AssertIsAddressable(const Operand& operand) {
DCHECK(is_ebx_addressable_ || !operand.UsesEbx());
}
void Assembler::AssertIsAddressable(const Register& reg) {
DCHECK(is_ebx_addressable_ || reg != ebx);
}
#endif // DEBUG
} // namespace internal
} // namespace v8
......
......@@ -370,17 +370,10 @@ class V8_EXPORT_PRIVATE Operand {
// register.
Register reg() const;
#ifdef DEBUG
bool UsesEbx() const { return uses_ebx_; }
#endif // DEBUG
private:
// Set the ModRM byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
inline void set_modrm(int mod, Register rm) {
#ifdef DEBUG
AddUsedRegister(rm);
#endif
DCHECK_EQ(mod & -4, 0);
buf_[0] = mod << 6 | rm.code();
len_ = 1;
......@@ -407,23 +400,12 @@ class V8_EXPORT_PRIVATE Operand {
// Only valid if len_ > 4.
RelocInfo::Mode rmode_ = RelocInfo::NONE;
#ifdef DEBUG
// TODO(v8:6666): Remove once kRootRegister support is complete.
bool uses_ebx_ = false;
void AddUsedRegister(Register reg) {
if (reg == ebx) uses_ebx_ = true;
}
#endif // DEBUG
// TODO(clemensh): Get rid of this friendship, or make Operand immutable.
friend class Assembler;
};
ASSERT_TRIVIALLY_COPYABLE(Operand);
// TODO(v8:6666): Re-enable globally once kRootRegister support is complete.
#ifndef DEBUG
static_assert(sizeof(Operand) <= 2 * kPointerSize,
"Operand must be small enough to pass it by value");
#endif
// -----------------------------------------------------------------------------
// A Displacement describes the 32bit immediate field of an instruction which
......@@ -1780,34 +1762,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
// Temporary helper data structures while adding kRootRegister support to ia32
// builtins. The SupportsRootRegisterScope is intended to mark each builtin
// and helper that fully supports the root register, i.e. that does not
// clobber ebx. The AllowExplicitEbxAccessScope marks regions that are allowed
// to clobber ebx, e.g. when ebx is spilled and restored.
// TODO(v8:6666): Remove once kRootRegister is fully supported.
template <bool new_value>
class SetRootRegisterSupportScope final {
public:
explicit SetRootRegisterSupportScope(Assembler* assembler)
: assembler_(assembler), old_value_(assembler->is_ebx_addressable_) {
assembler_->is_ebx_addressable_ = new_value;
}
~SetRootRegisterSupportScope() {
assembler_->is_ebx_addressable_ = old_value_;
}
private:
Assembler* assembler_;
const bool old_value_;
};
typedef SetRootRegisterSupportScope<false> SupportsRootRegisterScope;
typedef SetRootRegisterSupportScope<true> AllowExplicitEbxAccessScope;
void set_ebx_addressable(bool is_addressable) {
is_ebx_addressable_ = is_addressable;
}
protected:
void emit_sse_operand(XMMRegister reg, Operand adr);
void emit_sse_operand(XMMRegister dst, XMMRegister src);
......@@ -1816,17 +1770,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
byte* addr_at(int pos) { return buffer_ + pos; }
#ifdef DEBUG
// TODO(v8:6666): Remove once kRootRegister is fully supported.
void AssertIsAddressable(const Register& reg);
void AssertIsAddressable(const Operand& operand);
#else
// An empty inline definition to avoid slowing down release builds.
void AssertIsAddressable(const Register&) {}
void AssertIsAddressable(const Operand&) {}
#endif // DEBUG
bool is_ebx_addressable_ = true;
private:
uint32_t long_at(int pos) {
return *reinterpret_cast<uint32_t*>(addr_at(pos));
......
......@@ -33,7 +33,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
{ // NOLINT. Scope block confuses linter.
NoRootArrayScope uninitialized_root_register(masm);
Assembler::AllowExplicitEbxAccessScope spill_register(masm);
// Set up frame.
__ push(ebp);
......@@ -53,7 +52,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ InitializeRootRegister();
}
Assembler::SupportsRootRegisterScope supports_root_register(masm);
// Save copies of the top frame descriptor on the stack.
ExternalReference c_entry_fp =
......@@ -111,7 +109,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Restore the top frame descriptor from the stack.
__ pop(__ ExternalReferenceAsOperand(c_entry_fp, edi));
Assembler::AllowExplicitEbxAccessScope exiting_js(masm);
// Restore callee-saved registers (C calling conventions).
__ pop(ebx);
__ pop(esi);
......@@ -139,8 +136,6 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
Assembler::SupportsRootRegisterScope supports_root_register(masm);
// Save volatile registers.
const int kNumSavedRegisters = 3;
__ push(eax);
......@@ -200,7 +195,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
Operand thunk_last_arg, int stack_space,
Operand* stack_space_operand,
Operand return_value_operand) {
Assembler::SupportsRootRegisterScope supports_root_register(masm);
Isolate* isolate = masm->isolate();
ExternalReference next_address =
......@@ -349,8 +343,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
}
void CallApiCallbackStub::Generate(MacroAssembler* masm) {
Assembler::SupportsRootRegisterScope supports_root_register(masm);
// ----------- S t a t e -------------
// -- eax : call_data
// -- ecx : holder
......@@ -441,8 +433,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
void CallApiGetterStub::Generate(MacroAssembler* masm) {
Assembler::SupportsRootRegisterScope supports_root_register(masm);
// Build v8::PropertyCallbackInfo::args_ array on the stack and push property
// name below the exit frame to make GC aware of them.
STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
......
......@@ -18,8 +18,6 @@ const int Deoptimizer::table_entry_size_ = 10;
#define __ masm()->
void Deoptimizer::TableEntryGenerator::Generate() {
Assembler::SupportsRootRegisterScope supports_root_register(masm());
GeneratePrologue();
// Save all general purpose registers before messing with them.
......@@ -197,7 +195,6 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
// Restore the registers from the stack.
Assembler::AllowExplicitEbxAccessScope restoring_spilled_value(masm());
__ popad();
__ InitializeRootRegister();
......
......@@ -50,7 +50,6 @@ void TurboAssembler::InitializeRootRegister() {
// removed.
if (!FLAG_embedded_builtins) return;
Assembler::AllowExplicitEbxAccessScope setup(this);
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
Move(kRootRegister, Immediate(roots_array_start));
......@@ -62,7 +61,6 @@ void TurboAssembler::VerifyRootRegister() {
DCHECK(FLAG_embedded_builtins);
Assembler::AllowExplicitEbxAccessScope read_only_access(this);
Label root_register_ok;
cmp(Operand(kRootRegister,
IsolateData::kMagicNumberOffset - kRootRegisterBias),
......@@ -75,7 +73,6 @@ void TurboAssembler::VerifyRootRegister() {
void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
#ifdef V8_EMBEDDED_BUILTINS
if (root_array_available()) {
Assembler::AllowExplicitEbxAccessScope read_only_access(this);
mov(destination,
Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
return;
......@@ -118,7 +115,6 @@ void TurboAssembler::CompareRoot(Register with, Register scratch,
void TurboAssembler::CompareRoot(Register with, RootIndex index) {
#ifdef V8_EMBEDDED_BUILTINS
if (root_array_available()) {
Assembler::AllowExplicitEbxAccessScope read_only_access(this);
cmp(with, Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
return;
}
......@@ -161,7 +157,6 @@ void MacroAssembler::PushRoot(RootIndex index) {
#ifdef V8_EMBEDDED_BUILTINS
if (root_array_available()) {
DCHECK(RootsTable::IsImmortalImmovable(index));
Assembler::AllowExplicitEbxAccessScope read_only_access(this);
push(Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
return;
}
......@@ -179,7 +174,6 @@ void MacroAssembler::PushRoot(RootIndex index) {
Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
Register scratch) {
// TODO(jgruber): Add support for enable_root_array_delta_access.
Assembler::AllowExplicitEbxAccessScope read_only_access(this);
if (root_array_available_ && options().isolate_independent_code) {
if (IsAddressableThroughRootRegister(isolate(), reference)) {
// Some external references can be efficiently loaded as an offset from
......@@ -206,7 +200,6 @@ Operand TurboAssembler::ExternalReferenceAddressAsOperand(
DCHECK(FLAG_embedded_builtins);
DCHECK(root_array_available());
DCHECK(options().isolate_independent_code);
Assembler::AllowExplicitEbxAccessScope read_only_access(this);
return Operand(
kRootRegister,
RootRegisterOffsetForExternalReferenceTableEntry(isolate(), reference));
......@@ -217,7 +210,6 @@ Operand TurboAssembler::ExternalReferenceAddressAsOperand(
Operand TurboAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
DCHECK(FLAG_embedded_builtins);
DCHECK(root_array_available());
Assembler::AllowExplicitEbxAccessScope read_only_access(this);
int builtin_index;
RootIndex root_index;
......@@ -239,7 +231,6 @@ Operand TurboAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(!is_ebx_addressable_);
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
mov(destination,
......@@ -249,10 +240,8 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
void TurboAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
DCHECK(!is_ebx_addressable_);
DCHECK(is_int32(offset));
DCHECK(root_array_available());
Assembler::AllowExplicitEbxAccessScope read_only_access(this);
if (offset == 0) {
mov(destination, kRootRegister);
} else {
......@@ -261,9 +250,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
}
void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
DCHECK(!is_ebx_addressable_);
DCHECK(root_array_available());
Assembler::AllowExplicitEbxAccessScope read_only_access(this);
mov(destination, Operand(kRootRegister, offset));
}
......@@ -279,11 +266,6 @@ void TurboAssembler::LoadAddress(Register destination,
mov(destination, Immediate(source));
}
Operand TurboAssembler::StaticVariable(const ExternalReference& ext) {
// TODO(jgruber,v8:6666): Root-relative operand once kRootRegister exists.
return Operand(ext.address(), RelocInfo::EXTERNAL_REFERENCE);
}
static constexpr Register saved_regs[] = {eax, ecx, edx};
static constexpr int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
......@@ -1372,7 +1354,6 @@ void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
void TurboAssembler::Push(Immediate value) {
#ifdef V8_EMBEDDED_BUILTINS
if (root_array_available_ && options().isolate_independent_code) {
Assembler::AllowExplicitEbxAccessScope read_only_access(this);
if (value.is_embedded_object()) {
Push(HeapObjectAsOperand(value.embedded_object()));
return;
......@@ -1414,7 +1395,6 @@ void TurboAssembler::Move(Operand dst, const Immediate& src) {
if (root_array_available_ && options().isolate_independent_code) {
if (src.is_embedded_object() || src.is_external_reference() ||
src.is_heap_object_request()) {
Assembler::AllowExplicitEbxAccessScope read_only_access(this);
Push(src);
pop(dst);
return;
......@@ -1890,21 +1870,12 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
if (FLAG_embedded_builtins) {
// TODO(jgruber): Pc-relative builtin-to-builtin calls.
if (root_array_available_ && options().isolate_independent_code &&
!Builtins::IsIsolateIndependentBuiltin(*code_object)) {
// Since we don't have a scratch register available we call through a
// so-called virtual register.
// TODO(v8:6666): Remove once pc-relative jumps are supported on ia32.
Assembler::AllowExplicitEbxAccessScope read_only_access(this);
Operand virtual_call_target_register(
kRootRegister,
IsolateData::kVirtualCallTargetRegisterOffset - kRootRegisterBias);
Move(virtual_call_target_register, Immediate(code_object));
add(virtual_call_target_register,
Immediate(Code::kHeaderSize - kHeapObjectTag));
call(virtual_call_target_register);
return;
// All call targets are expected to be isolate-independent builtins.
// If this assumption is ever violated, we could add back support for
// calls through a virtual target register.
UNREACHABLE();
} else if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
......@@ -1925,21 +1896,12 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
if (FLAG_embedded_builtins) {
// TODO(jgruber): Pc-relative builtin-to-builtin calls.
if (root_array_available_ && options().isolate_independent_code &&
!Builtins::IsIsolateIndependentBuiltin(*code_object)) {
// Since we don't have a scratch register available we call through a
// so-called virtual register.
// TODO(v8:6666): Remove once pc-relative jumps are supported on ia32.
Assembler::AllowExplicitEbxAccessScope read_only_access(this);
Operand virtual_call_target_register(
kRootRegister,
IsolateData::kVirtualCallTargetRegisterOffset - kRootRegisterBias);
Move(virtual_call_target_register, Immediate(code_object));
add(virtual_call_target_register,
Immediate(Code::kHeaderSize - kHeapObjectTag));
jmp(virtual_call_target_register);
return;
// All call targets are expected to be isolate-independent builtins.
// If this assumption is ever violated, we could add back support for
// calls through a virtual target register.
UNREACHABLE();
} else if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin_index) &&
......
......@@ -47,7 +47,6 @@ constexpr Register kRuntimeCallArgvRegister = ecx;
constexpr Register kWasmInstanceRegister = esi;
constexpr Register kWasmCompileLazyFuncIndexRegister = edi;
// TODO(v8:6666): Implement full support.
constexpr Register kRootRegister = ebx;
// Convenience for platform-independent signatures. We do not normally
......@@ -250,26 +249,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadAddress(Register destination, ExternalReference source);
void PushRootRegister() {
// Check that a NoRootArrayScope exists.
CHECK(!root_array_available());
push(kRootRegister);
}
void PopRootRegister() {
// Check that a NoRootArrayScope exists.
CHECK(!root_array_available());
pop(kRootRegister);
}
void CompareStackLimit(Register with);
void CompareRealStackLimit(Register with);
void CompareRoot(Register with, RootIndex index);
void CompareRoot(Register with, Register scratch, RootIndex index);
// Wrapper functions to ensure external reference operands produce
// isolate-independent code if needed.
Operand StaticVariable(const ExternalReference& ext);
// Return and drop arguments from stack, where the number of arguments
// may be bigger than 2^16 - 1. Requires a scratch register.
void Ret(int bytes_dropped, Register scratch);
......
......@@ -81,7 +81,8 @@ class IsolateData final {
const intptr_t magic_number_ = kRootRegisterSentinel;
// For isolate-independent calls on ia32.
// TODO(v8:6666): Remove once pc-relative jumps are supported on ia32.
// TODO(v8:6666): Remove once wasm supports pc-relative jumps to builtins on
// ia32 (otherwise the arguments adaptor call runs out of registers).
void* virtual_call_target_register_ = nullptr;
V8_INLINE static void AssertPredictableLayout();
......
......@@ -3054,19 +3054,6 @@ void CreateOffHeapTrampolines(Isolate* isolate) {
}
}
}
void PrintEmbeddedBuiltinCandidates(Isolate* isolate) {
CHECK(FLAG_print_embedded_builtin_candidates);
bool found_a_candidate = false;
for (int i = 0; i < Builtins::builtin_count; i++) {
if (Builtins::IsIsolateIndependent(i)) continue;
Code* builtin = isolate->heap()->builtin(i);
if (!builtin->IsIsolateIndependent(isolate)) continue;
if (!found_a_candidate) PrintF("Found embedded builtin candidates:\n");
found_a_candidate = true;
PrintF(" %s\n", Builtins::name(i));
}
}
} // namespace
void Isolate::PrepareEmbeddedBlobForSerialization() {
......@@ -3232,9 +3219,6 @@ bool Isolate::Init(StartupDeserializer* des) {
setup_delegate_ = nullptr;
if (FLAG_print_builtin_size) PrintBuiltinSizes(this);
if (FLAG_print_embedded_builtin_candidates) {
PrintEmbeddedBuiltinCandidates(this);
}
// Finish initialization of ThreadLocal after deserialization is done.
clear_pending_exception();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment