Commit fb6d4ba1 authored by Victor Gomes's avatar Victor Gomes Committed by Commit Bot

[ia32][sparkplug] Sparkplug IA32 port

Change-Id: Idece4925aa0ffa99bc34db39d20b24a41d59f84f
Bug: v8:11421
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2715064Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Victor Gomes <victorgomes@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73265}
parent fe5f67e9
......@@ -7,7 +7,7 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#include <type_traits>
#include <unordered_map>
......@@ -22,6 +22,8 @@
#include "src/baseline/x64/baseline-assembler-x64-inl.h"
#elif V8_TARGET_ARCH_ARM64
#include "src/baseline/arm64/baseline-assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_IA32
#include "src/baseline/ia32/baseline-assembler-ia32-inl.h"
#else
#error Unsupported target architecture.
#endif
......
......@@ -8,9 +8,10 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#include "src/codegen/macro-assembler.h"
#include "src/objects/tagged-index.h"
namespace v8 {
namespace internal {
......
......@@ -4,7 +4,7 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#include "src/baseline/baseline-compiler.h"
......@@ -34,6 +34,8 @@
#include "src/baseline/x64/baseline-compiler-x64-inl.h"
#elif V8_TARGET_ARCH_ARM64
#include "src/baseline/arm64/baseline-compiler-arm64-inl.h"
#elif V8_TARGET_ARCH_IA32
#include "src/baseline/ia32/baseline-compiler-ia32-inl.h"
#else
#error Unsupported target architecture.
#endif
......
......@@ -7,7 +7,7 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#include <unordered_map>
......
......@@ -6,7 +6,7 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#include "src/baseline/baseline-assembler-inl.h"
#include "src/baseline/baseline-compiler.h"
......
This diff is collapsed.
// Use of this source code is governed by a BSD-style license that can be
// Copyright 2021 the V8 project authors. All rights reserved.
// found in the LICENSE file.
#ifndef V8_BASELINE_IA32_BASELINE_COMPILER_IA32_INL_H_
#define V8_BASELINE_IA32_BASELINE_COMPILER_IA32_INL_H_
#include "src/base/macros.h"
#include "src/baseline/baseline-compiler.h"
#include "src/codegen/interface-descriptors.h"
namespace v8 {
namespace internal {
namespace baseline {
#define __ basm_.
void BaselineCompiler::Prologue() {
__ Move(ecx, bytecode_);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
kJSFunctionRegister, kJavaScriptCallArgCountRegister, ecx,
kJavaScriptCallNewTargetRegister);
PrologueFillFrame();
}
void BaselineCompiler::PrologueFillFrame() {
__ RecordComment("[ Fill frame");
// Inlined register frame fill
interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register();
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
int register_count = bytecode_->register_count();
// Magic value
const int kLoopUnrollSize = 8;
const int new_target_index = new_target_or_generator_register.index();
const bool has_new_target = new_target_index != kMaxInt;
if (has_new_target) {
DCHECK_LE(new_target_index, register_count);
for (int i = 0; i < new_target_index; i++) {
__ Push(kInterpreterAccumulatorRegister);
}
// Push new_target_or_generator.
__ Push(kJavaScriptCallNewTargetRegister);
register_count -= new_target_index + 1;
}
if (register_count < 2 * kLoopUnrollSize) {
// If the frame is small enough, just unroll the frame fill completely.
for (int i = 0; i < register_count; ++i) {
__ Push(kInterpreterAccumulatorRegister);
}
} else {
// Extract the first few registers to round to the unroll size.
int first_registers = register_count % kLoopUnrollSize;
for (int i = 0; i < first_registers; ++i) {
__ Push(kInterpreterAccumulatorRegister);
}
BaselineAssembler::ScratchRegisterScope scope(&basm_);
Register scratch = scope.AcquireScratch();
__ Move(scratch, register_count / kLoopUnrollSize);
// We enter the loop unconditionally, so make sure we need to loop at least
// once.
DCHECK_GT(register_count / kLoopUnrollSize, 0);
Label loop;
__ Bind(&loop);
for (int i = 0; i < kLoopUnrollSize; ++i) {
__ Push(kInterpreterAccumulatorRegister);
}
__ masm()->dec(scratch);
__ JumpIf(Condition::kGreaterThan, &loop);
}
__ RecordComment("]");
}
void BaselineCompiler::VerifyFrameSize() {
__ masm()->movd(xmm0, eax);
__ Move(eax, esp);
__ masm()->add(eax,
Immediate(InterpreterFrameConstants::kFixedFrameSizeFromFp +
bytecode_->frame_size()));
__ masm()->cmp(eax, ebp);
__ masm()->Assert(equal, AbortReason::kUnexpectedStackPointer);
__ masm()->movd(eax, xmm0);
}
#undef __
} // namespace baseline
} // namespace internal
} // namespace v8
#endif // V8_BASELINE_IA32_BASELINE_COMPILER_IA32_INL_H_
......@@ -22,7 +22,6 @@ static constexpr int kNumScratchRegisters = arraysize(kScratchRegisters);
} // namespace detail
// TODO(v8:11429): Move BaselineAssembler to baseline-assembler-<arch>-inl.h
class BaselineAssembler::ScratchRegisterScope {
public:
explicit ScratchRegisterScope(BaselineAssembler* assembler)
......
......@@ -930,7 +930,7 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
void Builtins::Generate_BaselineLeaveFrame(MacroAssembler* masm) {
EmitReturnBaseline(masm);
}
......
This diff is collapsed.
......@@ -235,6 +235,12 @@ class V8_EXPORT_PRIVATE Operand {
explicit Operand(Register base, int32_t disp,
RelocInfo::Mode rmode = RelocInfo::NONE);
// [rip + disp/r]
explicit Operand(Label* label) {
set_modrm(0, ebp);
set_dispr(reinterpret_cast<intptr_t>(label), RelocInfo::INTERNAL_REFERENCE);
}
// [base + index*scale + disp/r]
explicit Operand(Register base, Register index, ScaleFactor scale,
int32_t disp, RelocInfo::Mode rmode = RelocInfo::NONE);
......
......@@ -90,13 +90,9 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return ecx; }
const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
// TODO(v8:11421): Implement on this platform.
UNREACHABLE();
}
const Register BaselineLeaveFrameDescriptor::WeightRegister() {
// TODO(v8:11421): Implement on this platform.
UNREACHABLE();
return esi;
}
const Register BaselineLeaveFrameDescriptor::WeightRegister() { return edi; }
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
......@@ -224,8 +220,8 @@ void CompareDescriptor::InitializePlatformSpecific(
void Compare_BaselineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:11421): Implement on this platform.
InitializePlatformUnimplemented(data, kParameterCount);
Register registers[] = {edx, eax, ecx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpDescriptor::InitializePlatformSpecific(
......@@ -236,8 +232,8 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:11421): Implement on this platform.
InitializePlatformUnimplemented(data, kParameterCount);
Register registers[] = {edx, eax, ecx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ApiCallbackDescriptor::InitializePlatformSpecific(
......
......@@ -1451,11 +1451,13 @@ void TurboAssembler::Prologue() {
void TurboAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
mov(ebp, esp);
push(Immediate(StackFrame::TypeToMarker(type)));
if (!StackFrame::IsJavaScript(type)) {
Push(Immediate(StackFrame::TypeToMarker(type)));
}
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
if (emit_debug_code() && !StackFrame::IsJavaScript(type)) {
cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(type)));
Check(equal, AbortReason::kStackFrameTypesMustMatch);
......@@ -2071,6 +2073,8 @@ void TurboAssembler::Move(Operand dst, const Immediate& src) {
}
}
void TurboAssembler::Move(Register dst, Operand src) { mov(dst, src); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> src) {
if (root_array_available() && options().isolate_independent_code) {
IndirectLoadConstant(dst, src);
......@@ -2775,6 +2779,12 @@ void TurboAssembler::CallBuiltin(int builtin_index) {
call(entry, RelocInfo::OFF_HEAP_TARGET);
}
Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(
Builtins::Name builtin_index) {
return Operand(kRootRegister,
IsolateData::builtin_entry_slot_offset(builtin_index));
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating
......
......@@ -125,6 +125,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(Register dst, Smi src) { Move(dst, Immediate(src)); }
void Move(Register dst, Handle<HeapObject> src);
void Move(Register dst, Register src);
void Move(Register dst, Operand src);
void Move(Operand dst, const Immediate& src);
// Move an immediate into an XMM register.
......@@ -133,7 +134,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
Operand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
void Call(Register reg) { call(reg); }
void Call(Operand op) { call(op); }
void Call(Label* target) { call(target); }
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
......@@ -189,6 +193,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
void SmiUntag(Register reg) { sar(reg, kSmiTagSize); }
void SmiUntag(Register output, Register value) {
mov(output, value);
SmiUntag(output);
}
// Removes current frame and its arguments from the stack preserving the
// arguments and a return address pushed to the stack for the next call. Both
......@@ -243,6 +251,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void PushReturnAddressFrom(Register src) { push(src); }
void PopReturnAddressTo(Register dst) { pop(dst); }
void PushReturnAddressFrom(XMMRegister src, Register scratch) {
Push(src, scratch);
}
void PopReturnAddressTo(XMMRegister dst, Register scratch) {
Pop(dst, scratch);
}
void Ret();
// Root register utility functions.
......@@ -712,6 +727,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Push(Immediate value);
void Push(Handle<HeapObject> handle) { push(Immediate(handle)); }
void Push(Smi smi) { Push(Immediate(smi)); }
void Push(XMMRegister src, Register scratch) {
movd(scratch, src);
push(scratch);
}
void Pop(Register dst) { pop(dst); }
void Pop(Operand dst) { pop(dst); }
void Pop(XMMRegister dst, Register scratch) {
pop(scratch);
movd(dst, scratch);
}
void SaveRegisters(RegList registers);
void RestoreRegisters(RegList registers);
......@@ -993,9 +1019,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// from the stack, clobbering only the esp register.
void Drop(int element_count);
void Pop(Register dst) { pop(dst); }
void Pop(Operand dst) { pop(dst); }
// ---------------------------------------------------------------------------
// In-place weak references.
void LoadWeakValue(Register in_out, Label* target_if_cleared);
......
......@@ -334,7 +334,14 @@ void StoreTransitionDescriptor::InitializePlatformSpecific(
void BaselineOutOfLinePrologueDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:11421): Implement on other platforms.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_IA32
// TODO(v8:11503): Use register names that can be defined in each
// architecture indenpendently of the interpreter registers.
Register registers[] = {kContextRegister, kJSFunctionRegister,
kJavaScriptCallArgCountRegister, ecx,
kJavaScriptCallNewTargetRegister};
data->InitializePlatformSpecific(kParameterCount, registers);
#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
Register registers[] = {
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
kInterpreterBytecodeArrayRegister, kJavaScriptCallNewTargetRegister};
......@@ -347,7 +354,7 @@ void BaselineOutOfLinePrologueDescriptor::InitializePlatformSpecific(
void BaselineLeaveFrameDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:11421): Implement on other platforms.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
Register registers[] = {ParamsSizeRegister(), WeightRegister()};
data->InitializePlatformSpecific(kParameterCount, registers);
#else
......
......@@ -163,7 +163,7 @@ struct MaybeBoolFlag {
#define ENABLE_CONTROL_FLOW_INTEGRITY_BOOL false
#endif
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#define ENABLE_SPARKPLUG true
#else
// TODO(v8:11421): Enable Sparkplug for other architectures
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment