Commit f5316032 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[sparkplug][arm] Port Sparkplug to arm

Bug: v8:11421
Change-Id: Ia4d3a20b9fdb5bc262cf480ece6e189aedff388f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2762143
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73488}
parent 7d2ac7b4
cbruni@chromium.org
ishell@chromium.org
leszeks@chromium.org
marja@chromium.org
pthier@chromium.org
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASELINE_ARM_BASELINE_ASSEMBLER_ARM_INL_H_
#define V8_BASELINE_ARM_BASELINE_ASSEMBLER_ARM_INL_H_
#include "src/baseline/baseline-assembler.h"
#include "src/codegen/arm/assembler-arm-inl.h"
#include "src/codegen/interface-descriptors.h"
namespace v8 {
namespace internal {
namespace baseline {
class BaselineAssembler::ScratchRegisterScope {
public:
explicit ScratchRegisterScope(BaselineAssembler* assembler)
: assembler_(assembler),
prev_scope_(assembler->scratch_register_scope_),
wrapped_scope_(assembler->masm()) {
if (!assembler_->scratch_register_scope_) {
// If we haven't opened a scratch scope yet, for the first one add a
// couple of extra registers.
DCHECK(wrapped_scope_.CanAcquire());
wrapped_scope_.Include(r8, r9);
wrapped_scope_.Include(kInterpreterBytecodeOffsetRegister);
}
assembler_->scratch_register_scope_ = this;
}
~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
Register AcquireScratch() { return wrapped_scope_.Acquire(); }
private:
BaselineAssembler* assembler_;
ScratchRegisterScope* prev_scope_;
UseScratchRegisterScope wrapped_scope_;
};
// TODO(v8:11429,leszeks): Unify condition names in the MacroAssembler.
enum class Condition : uint32_t {
kEqual = static_cast<uint32_t>(eq),
kNotEqual = static_cast<uint32_t>(ne),
kLessThan = static_cast<uint32_t>(lt),
kGreaterThan = static_cast<uint32_t>(gt),
kLessThanEqual = static_cast<uint32_t>(le),
kGreaterThanEqual = static_cast<uint32_t>(ge),
kUnsignedLessThan = static_cast<uint32_t>(lo),
kUnsignedGreaterThan = static_cast<uint32_t>(hi),
kUnsignedLessThanEqual = static_cast<uint32_t>(ls),
kUnsignedGreaterThanEqual = static_cast<uint32_t>(hs),
kOverflow = static_cast<uint32_t>(vs),
kNoOverflow = static_cast<uint32_t>(vc),
kZero = static_cast<uint32_t>(eq),
kNotZero = static_cast<uint32_t>(ne),
};
inline internal::Condition AsMasmCondition(Condition cond) {
// This is important for arm, where the internal::Condition where each value
// represents an encoded bit field value.
STATIC_ASSERT(sizeof(internal::Condition) == sizeof(Condition));
return static_cast<internal::Condition>(cond);
}
namespace detail {
#ifdef DEBUG
inline bool Clobbers(Register target, MemOperand op) {
return op.rn() == target || op.rm() == target;
}
#endif
} // namespace detail
#define __ masm_->
MemOperand BaselineAssembler::RegisterFrameOperand(
interpreter::Register interpreter_register) {
return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
}
MemOperand BaselineAssembler::FeedbackVectorOperand() {
return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
}
void BaselineAssembler::Bind(Label* label) {
// All baseline compiler binds on arm64 are assumed to be for jump targets.
__ bind(label);
}
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ b(target);
}
void BaselineAssembler::JumpIf(Condition cc, Label* target, Label::Distance) {
__ b(AsMasmCondition(cc), target);
}
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
__ JumpIfRoot(value, index, target);
}
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
__ JumpIfNotRoot(value, index, target);
}
void BaselineAssembler::JumpIfSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfSmi(value, target);
}
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfNotSmi(value, target);
}
void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
// __ CallBuiltin(static_cast<int>(builtin));
__ RecordCommentForOffHeapTrampoline(builtin);
ScratchRegisterScope temps(this);
Register temp = temps.AcquireScratch();
__ LoadEntryFromBuiltinIndex(builtin, temp);
__ Call(temp);
if (FLAG_code_comments) __ RecordComment("]");
}
void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
__ RecordCommentForOffHeapTrampoline(builtin);
ScratchRegisterScope temps(this);
Register temp = temps.AcquireScratch();
__ LoadEntryFromBuiltinIndex(builtin, temp);
__ Jump(temp);
if (FLAG_code_comments) __ RecordComment("]");
}
void BaselineAssembler::Test(Register value, int mask) {
__ tst(value, Operand(mask));
}
void BaselineAssembler::CmpObjectType(Register object,
InstanceType instance_type,
Register map) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
__ CompareObjectType(object, map, type, instance_type);
}
void BaselineAssembler::CmpInstanceType(Register map,
InstanceType instance_type) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
if (emit_debug_code()) {
__ AssertNotSmi(map);
__ CompareObjectType(map, type, type, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedValue);
}
__ CompareInstanceType(map, type, instance_type);
}
void BaselineAssembler::Cmp(Register value, Smi smi) {
__ cmp(value, Operand(smi));
}
void BaselineAssembler::ComparePointer(Register value, MemOperand operand) {
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ ldr(tmp, operand);
__ cmp(value, tmp);
}
void BaselineAssembler::SmiCompare(Register lhs, Register rhs) {
__ AssertSmi(lhs);
__ AssertSmi(rhs);
__ cmp(lhs, rhs);
}
void BaselineAssembler::CompareTagged(Register value, MemOperand operand) {
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ ldr(tmp, operand);
__ cmp(value, tmp);
}
void BaselineAssembler::CompareTagged(MemOperand operand, Register value) {
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ ldr(tmp, operand);
__ cmp(tmp, value);
}
void BaselineAssembler::CompareByte(Register value, int32_t byte) {
__ cmp(value, Operand(byte));
}
void BaselineAssembler::Move(interpreter::Register output, Register source) {
Move(RegisterFrameOperand(output), source);
}
void BaselineAssembler::Move(Register output, TaggedIndex value) {
__ mov(output, Operand(value.ptr()));
}
void BaselineAssembler::Move(MemOperand output, Register source) {
__ str(source, output);
}
void BaselineAssembler::Move(Register output, ExternalReference reference) {
__ mov(output, Operand(reference));
}
void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
__ mov(output, Operand(value));
}
void BaselineAssembler::Move(Register output, int32_t value) {
__ mov(output, Operand(value));
}
void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
__ mov(output, source);
}
void BaselineAssembler::MoveSmi(Register output, Register source) {
__ mov(output, source);
}
namespace detail {
template <typename Arg>
inline Register ToRegister(BaselineAssembler* basm,
BaselineAssembler::ScratchRegisterScope* scope,
Arg arg) {
Register reg = scope->AcquireScratch();
basm->Move(reg, arg);
return reg;
}
inline Register ToRegister(BaselineAssembler* basm,
BaselineAssembler::ScratchRegisterScope* scope,
Register reg) {
return reg;
}
template <typename... Args>
struct PushAllHelper;
template <>
struct PushAllHelper<> {
static int Push(BaselineAssembler* basm) { return 0; }
static int PushReverse(BaselineAssembler* basm) { return 0; }
};
// TODO(ishell): try to pack sequence of pushes into one instruction by
// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
template <typename Arg>
struct PushAllHelper<Arg> {
static int Push(BaselineAssembler* basm, Arg arg) {
BaselineAssembler::ScratchRegisterScope scope(basm);
basm->masm()->Push(ToRegister(basm, &scope, arg));
return 1;
}
static int PushReverse(BaselineAssembler* basm, Arg arg) {
return Push(basm, arg);
}
};
// TODO(ishell): try to pack sequence of pushes into one instruction by
// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
template <typename Arg, typename... Args>
struct PushAllHelper<Arg, Args...> {
static int Push(BaselineAssembler* basm, Arg arg, Args... args) {
PushAllHelper<Arg>::Push(basm, arg);
return 1 + PushAllHelper<Args...>::Push(basm, args...);
}
static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) {
int nargs = PushAllHelper<Args...>::PushReverse(basm, args...);
PushAllHelper<Arg>::Push(basm, arg);
return nargs + 1;
}
};
template <>
struct PushAllHelper<interpreter::RegisterList> {
static int Push(BaselineAssembler* basm, interpreter::RegisterList list) {
for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
}
return list.register_count();
}
static int PushReverse(BaselineAssembler* basm,
interpreter::RegisterList list) {
for (int reg_index = list.register_count() - 1; reg_index >= 0;
--reg_index) {
PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
}
return list.register_count();
}
};
template <typename... T>
struct PopAllHelper;
template <>
struct PopAllHelper<> {
static void Pop(BaselineAssembler* basm) {}
};
// TODO(ishell): try to pack sequence of pops into one instruction by
// looking at regiser codes. For example, Pop(r1, r2, r5, r0, r3, r4)
// could be generated as two pops: Pop(r1, r2, r5) and Pop(r0, r3, r4).
template <>
struct PopAllHelper<Register> {
static void Pop(BaselineAssembler* basm, Register reg) {
basm->masm()->Pop(reg);
}
};
template <typename... T>
struct PopAllHelper<Register, T...> {
static void Pop(BaselineAssembler* basm, Register reg, T... tail) {
PopAllHelper<Register>::Pop(basm, reg);
PopAllHelper<T...>::Pop(basm, tail...);
}
};
} // namespace detail
template <typename... T>
int BaselineAssembler::Push(T... vals) {
return detail::PushAllHelper<T...>::Push(this, vals...);
}
template <typename... T>
void BaselineAssembler::PushReverse(T... vals) {
detail::PushAllHelper<T...>::PushReverse(this, vals...);
}
template <typename... T>
void BaselineAssembler::Pop(T... registers) {
detail::PopAllHelper<T...>::Pop(this, registers...);
}
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
__ ldr(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
__ ldr(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ ldr(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadByteField(Register output, Register source,
int offset) {
__ ldrb(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ mov(tmp, Operand(value));
__ str(tmp, FieldMemOperand(target, offset));
}
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset,
Register value) {
__ str(value, FieldMemOperand(target, offset));
__ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
kDontSaveFPRegs);
}
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
Register value) {
__ str(value, FieldMemOperand(target, offset));
}
void BaselineAssembler::AddToInterruptBudget(int32_t weight) {
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ ldr(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
// Remember to set flags as part of the add!
__ add(interrupt_budget, interrupt_budget, Operand(weight), SetCC);
__ str(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
}
void BaselineAssembler::AddToInterruptBudget(Register weight) {
ScratchRegisterScope scratch_scope(this);
Register feedback_cell = scratch_scope.AcquireScratch();
LoadFunction(feedback_cell);
LoadTaggedPointerField(feedback_cell, feedback_cell,
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
__ ldr(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
// Remember to set flags as part of the add!
__ add(interrupt_budget, interrupt_budget, weight, SetCC);
__ str(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
}
void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ add(lhs, lhs, Operand(rhs));
}
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
Label fallthrough;
if (case_value_base > 0) {
__ sub(reg, reg, Operand(case_value_base));
}
// Mostly copied from code-generator-arm.cc
ScratchRegisterScope scope(this);
__ cmp(reg, Operand(num_labels));
JumpIf(Condition::kUnsignedGreaterThanEqual, &fallthrough);
// Ensure to emit the constant pool first if necessary.
__ CheckConstPool(true, true);
__ BlockConstPoolFor(num_labels);
int entry_size_log2 = 2;
__ add(pc, pc, Operand(reg, LSL, entry_size_log2), LeaveCC, lo);
__ b(&fallthrough);
for (int i = 0; i < num_labels; ++i) {
__ b(labels[i]);
}
__ bind(&fallthrough);
}
#undef __
#define __ basm.
void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
BaselineAssembler basm(masm);
Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
__ RecordComment("[ Update Interrupt Budget");
__ AddToInterruptBudget(weight);
// Use compare flags set by add
Label skip_interrupt_label;
__ JumpIf(Condition::kGreaterThanEqual, &skip_interrupt_label);
{
__ masm()->SmiTag(params_size);
__ Push(params_size, kInterpreterAccumulatorRegister);
__ LoadContext(kContextRegister);
__ LoadFunction(kJSFunctionRegister);
__ Push(kJSFunctionRegister);
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
__ Pop(kInterpreterAccumulatorRegister, params_size);
__ masm()->SmiUntag(params_size);
}
__ RecordComment("]");
__ Bind(&skip_interrupt_label);
BaselineAssembler::ScratchRegisterScope temps(&basm);
Register actual_params_size = temps.AcquireScratch();
// Compute the size of the actual parameters + receiver (in bytes).
__ Move(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
Label corrected_args_count;
__ masm()->cmp(params_size, actual_params_size);
__ JumpIf(Condition::kGreaterThanEqual, &corrected_args_count);
__ masm()->mov(params_size, actual_params_size);
__ Bind(&corrected_args_count);
// Leave the frame (also dropping the register file).
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
__ masm()->add(params_size, params_size,
Operand(1)); // Include the receiver.
__ masm()->Drop(params_size);
__ masm()->Ret();
}
#undef __
} // namespace baseline
} // namespace internal
} // namespace v8
#endif // V8_BASELINE_ARM_BASELINE_ASSEMBLER_ARM_INL_H_
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASELINE_ARM_BASELINE_COMPILER_ARM_INL_H_
#define V8_BASELINE_ARM_BASELINE_COMPILER_ARM_INL_H_
#include "src/base/logging.h"
#include "src/baseline/baseline-compiler.h"
namespace v8 {
namespace internal {
namespace baseline {
#define __ basm_.
void BaselineCompiler::Prologue() {
__ masm()->mov(kInterpreterBytecodeArrayRegister, Operand(bytecode_));
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
// Enter the frame here, since CallBuiltin will override lr.
__ masm()->EnterFrame(StackFrame::BASELINE);
CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
kJSFunctionRegister, kJavaScriptCallArgCountRegister,
kInterpreterBytecodeArrayRegister,
kJavaScriptCallNewTargetRegister);
PrologueFillFrame();
}
void BaselineCompiler::PrologueFillFrame() {
__ RecordComment("[ Fill frame");
// Inlined register frame fill
interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register();
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
int register_count = bytecode_->register_count();
// Magic value
const int kLoopUnrollSize = 8;
const int new_target_index = new_target_or_generator_register.index();
const bool has_new_target = new_target_index != kMaxInt;
if (has_new_target) {
DCHECK_LE(new_target_index, register_count);
for (int i = 0; i < new_target_index; i++) {
__ Push(kInterpreterAccumulatorRegister);
}
// Push new_target_or_generator.
__ Push(kJavaScriptCallNewTargetRegister);
register_count -= new_target_index + 1;
}
if (register_count < 2 * kLoopUnrollSize) {
// If the frame is small enough, just unroll the frame fill completely.
for (int i = 0; i < register_count; ++i) {
__ Push(kInterpreterAccumulatorRegister);
}
} else {
// Extract the first few registers to round to the unroll size.
int first_registers = register_count % kLoopUnrollSize;
for (int i = 0; i < first_registers; ++i) {
__ Push(kInterpreterAccumulatorRegister);
}
BaselineAssembler::ScratchRegisterScope temps(&basm_);
Register scratch = temps.AcquireScratch();
__ Move(scratch, register_count / kLoopUnrollSize);
// We enter the loop unconditionally, so make sure we need to loop at least
// once.
DCHECK_GT(register_count / kLoopUnrollSize, 0);
Label loop;
__ Bind(&loop);
for (int i = 0; i < kLoopUnrollSize; ++i) {
__ Push(kInterpreterAccumulatorRegister);
}
__ masm()->sub(scratch, scratch, Operand(1), SetCC);
__ JumpIf(Condition::kGreaterThan, &loop);
}
__ RecordComment("]");
}
void BaselineCompiler::VerifyFrameSize() {
BaselineAssembler::ScratchRegisterScope temps(&basm_);
Register scratch = temps.AcquireScratch();
__ masm()->add(scratch, sp,
Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
bytecode_->frame_size()));
__ masm()->cmp(scratch, fp);
__ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer);
}
} // namespace baseline
} // namespace internal
} // namespace v8
#endif // V8_BASELINE_ARM_BASELINE_COMPILER_ARM_INL_H_
......@@ -37,7 +37,7 @@ class BaselineAssembler::ScratchRegisterScope {
};
// TODO(v8:11461): Unify condition names in the MacroAssembler.
enum class Condition : uint8_t {
enum class Condition : uint32_t {
kEqual = eq,
kNotEqual = ne,
......
......@@ -7,7 +7,8 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM
#include <type_traits>
#include <unordered_map>
......@@ -24,6 +25,8 @@
#include "src/baseline/arm64/baseline-assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_IA32
#include "src/baseline/ia32/baseline-assembler-ia32-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "src/baseline/arm/baseline-assembler-arm-inl.h"
#else
#error Unsupported target architecture.
#endif
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
......@@ -8,7 +7,8 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM
#include "src/codegen/macro-assembler.h"
#include "src/objects/tagged-index.h"
......@@ -17,7 +17,7 @@ namespace v8 {
namespace internal {
namespace baseline {
enum class Condition : uint8_t;
enum class Condition : uint32_t;
class BaselineAssembler {
public:
......
......@@ -4,7 +4,8 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM
#include "src/baseline/baseline-compiler.h"
......@@ -36,6 +37,8 @@
#include "src/baseline/arm64/baseline-compiler-arm64-inl.h"
#elif V8_TARGET_ARCH_IA32
#include "src/baseline/ia32/baseline-compiler-ia32-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "src/baseline/arm/baseline-compiler-arm-inl.h"
#else
#error Unsupported target architecture.
#endif
......
......@@ -7,7 +7,8 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM
#include "src/base/logging.h"
#include "src/base/threaded-list.h"
......
......@@ -6,7 +6,8 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM
#include "src/baseline/baseline-assembler-inl.h"
#include "src/baseline/baseline-compiler.h"
......
......@@ -43,7 +43,7 @@ class BaselineAssembler::ScratchRegisterScope {
};
// TODO(v8:11461): Unify condition names in the MacroAssembler.
enum class Condition : uint8_t {
enum class Condition : uint32_t {
kEqual = equal,
kNotEqual = not_equal,
......
......@@ -5,6 +5,7 @@
#ifndef V8_BASELINE_X64_BASELINE_ASSEMBLER_X64_INL_H_
#define V8_BASELINE_X64_BASELINE_ASSEMBLER_X64_INL_H_
#include "src/base/macros.h"
#include "src/baseline/baseline-assembler.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/x64/register-x64.h"
......@@ -45,7 +46,7 @@ class BaselineAssembler::ScratchRegisterScope {
};
// TODO(v8:11461): Unify condition names in the MacroAssembler.
enum class Condition : uint8_t {
enum class Condition : uint32_t {
kEqual = equal,
kNotEqual = not_equal,
......
......@@ -309,12 +309,15 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
Register sfi_data,
Register scratch1) {
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Register sfi_data,
Register scratch1,
Label* is_baseline) {
Label done;
__ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
__ CompareObjectType(sfi_data, scratch1, scratch1, BASELINE_DATA_TYPE);
__ b(eq, is_baseline);
__ cmp(scratch1, Operand(INTERPRETER_DATA_TYPE));
__ b(ne, &done);
__ ldr(sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
......@@ -404,11 +407,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
Label is_baseline;
__ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, r3, r0);
GetSharedFunctionInfoBytecodeOrBaseline(masm, r3, r0, &is_baseline);
__ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE);
__ Assert(eq, AbortReason::kMissingBytecodeArray);
__ bind(&is_baseline);
}
// Resume (Ignition/TurboFan) generator object.
......@@ -992,6 +997,23 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&end);
}
// Read off the optimization state in the feedback vector and check if there
// is optimized code or a optimization marker that needs to be processed.
static void LoadOptimizationStateAndJumpIfNeedsProcessing(
MacroAssembler* masm, Register optimization_state, Register feedback_vector,
Label* has_optimized_code_or_marker) {
__ RecordComment("[ Check optimization state");
__ ldr(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
__ tst(
optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
__ b(ne, has_optimized_code_or_marker);
__ RecordComment("]");
}
static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
MacroAssembler* masm, Register optimization_state,
Register feedback_vector) {
......@@ -1014,6 +1036,158 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6);
}
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
// Need a few extra registers
temps.Include(r8, r9);
auto descriptor = Builtins::CallInterfaceDescriptorFor(
Builtins::kBaselineOutOfLinePrologue);
Register closure = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kClosure);
// Load the feedback vector from the closure.
Register feedback_vector = temps.Acquire();
__ ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
if (__ emit_debug_code()) {
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ CompareObjectType(feedback_vector, scratch, scratch,
FEEDBACK_VECTOR_TYPE);
__ Assert(eq, AbortReason::kExpectedFeedbackVector);
}
// Check for an optimization marker.
Label has_optimized_code_or_marker;
Register optimization_state = no_reg;
{
UseScratchRegisterScope temps(masm);
// optimization_state will be used only in |has_optimized_code_or_marker|
// and outside it can be reused.
optimization_state = temps.Acquire();
LoadOptimizationStateAndJumpIfNeedsProcessing(
masm, optimization_state, feedback_vector,
&has_optimized_code_or_marker);
}
// Increment invocation count for the function.
{
UseScratchRegisterScope temps(masm);
Register invocation_count = temps.Acquire();
__ ldr(invocation_count,
FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
__ add(invocation_count, invocation_count, Operand(1));
__ str(invocation_count,
FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
}
__ RecordComment("[ Frame Setup");
FrameScope frame_scope(masm, StackFrame::MANUAL);
// Normally the first thing we'd do here is Push(lr, fp), but we already
// entered the frame in BaselineCompiler::Prologue, as we had to use the
// value lr before the call to this BaselineOutOfLinePrologue builtin.
Register callee_context = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kCalleeContext);
Register callee_js_function = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kClosure);
__ Push(callee_context, callee_js_function);
DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
DCHECK_EQ(callee_js_function, kJSFunctionRegister);
Register argc = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
// We'll use the bytecode for both code age/OSR resetting, and pushing onto
// the frame, so load it into a register.
Register bytecodeArray = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrNestingLevelOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ mov(scratch, Operand(0));
__ strh(scratch, FieldMemOperand(bytecodeArray,
BytecodeArray::kOsrNestingLevelOffset));
}
__ Push(argc, bytecodeArray);
// Baseline code frames store the feedback vector where interpreter would
// store the bytecode offset.
if (__ emit_debug_code()) {
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ CompareObjectType(feedback_vector, scratch, scratch,
FEEDBACK_VECTOR_TYPE);
__ Assert(eq, AbortReason::kExpectedFeedbackVector);
}
__ Push(feedback_vector);
__ RecordComment("]");
__ RecordComment("[ Stack/interrupt check");
Label call_stack_guard;
{
// Stack check. This folds the checks for both the interrupt stack limit
// check and the real stack limit into one by just checking for the
// interrupt limit. The interrupt limit is either equal to the real stack
// limit or tighter. By ensuring we have space until that limit after
// building the frame we can quickly precheck both at once.
UseScratchRegisterScope temps(masm);
Register frame_size = temps.Acquire();
__ ldr(frame_size,
FieldMemOperand(bytecodeArray, BytecodeArray::kFrameSizeOffset));
Register sp_minus_frame_size = frame_size;
__ sub(sp_minus_frame_size, sp, frame_size);
Register interrupt_limit = temps.Acquire();
__ LoadStackLimit(interrupt_limit, StackLimitKind::kInterruptStackLimit);
__ cmp(sp_minus_frame_size, interrupt_limit);
__ b(&call_stack_guard, lo);
__ RecordComment("]");
}
// Do "fast" return to the caller pc in lr.
__ Ret();
__ bind(&has_optimized_code_or_marker);
{
UseScratchRegisterScope temps(masm);
// Ensure the optimization_state is not allocated again.
temps.Exclude(optimization_state);
__ RecordComment("[ Optimized marker check");
// Drop the frame created by the baseline call.
__ ldm(ia_w, sp, fp.bit() | lr.bit());
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
__ Trap();
__ RecordComment("]");
}
__ bind(&call_stack_guard);
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
__ RecordComment("[ Stack/interrupt call");
// Save incoming new target or generator
__ Push(kJavaScriptCallNewTargetRegister);
__ CallRuntime(Runtime::kStackGuard);
__ Pop(kJavaScriptCallNewTargetRegister);
__ RecordComment("]");
}
__ Ret();
}
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
......@@ -1038,7 +1212,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ ldr(r4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r8);
Label is_baseline;
GetSharedFunctionInfoBytecodeOrBaseline(
masm, kInterpreterBytecodeArrayRegister, r8, &is_baseline);
// The bytecode array could have been flushed from the shared function info,
// if so, call into CompileLazy.
......@@ -1061,17 +1238,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ b(ne, &push_stack_frame);
Register optimization_state = r4;
// Read off the optimization state in the feedback vector.
__ ldr(optimization_state,
FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
// Check if the optimized code slot is not empty or has a optimization marker.
Label has_optimized_code_or_marker;
__ tst(
optimization_state,
Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
__ b(ne, &has_optimized_code_or_marker);
LoadOptimizationStateAndJumpIfNeedsProcessing(
masm, optimization_state, feedback_vector, &has_optimized_code_or_marker);
Label not_optimized;
__ bind(&not_optimized);
......@@ -1220,6 +1389,38 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
__ bind(&is_baseline);
{
// Load the feedback vector from the closure.
__ ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector,
FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
__ ldr(r8, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
__ ldrh(r8, FieldMemOperand(r8, Map::kInstanceTypeOffset));
__ cmp(r8, Operand(FEEDBACK_VECTOR_TYPE));
__ b(ne, &install_baseline_code);
// Check for an optimization marker.
LoadOptimizationStateAndJumpIfNeedsProcessing(
masm, optimization_state, feedback_vector,
&has_optimized_code_or_marker);
// Load the baseline code into the closure.
__ ldr(r2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BaselineData::kBaselineCodeOffset));
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(masm, r2, closure);
__ JumpCodeObject(r2);
__ bind(&install_baseline_code);
GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
}
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
......@@ -1573,7 +1774,16 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
__ Ret();
}
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
// Need a few extra registers
temps.Include(r8, r9);
Register optimized_code_entry = kJavaScriptCallCodeStartRegister;
TailCallOptimizedCodeSlot(masm, optimized_code_entry, temps.Acquire());
}
namespace {
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
......@@ -1587,9 +1797,11 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
__ bind(&skip);
// Drop the handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
__ LeaveFrame(StackFrame::STUB);
if (is_interpreter) {
// Drop the handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
__ LeaveFrame(StackFrame::STUB);
}
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
......@@ -1611,6 +1823,15 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
__ Ret();
}
}
} // namespace
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
return OnStackReplacement(masm, true);
}
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
return OnStackReplacement(masm, false);
}
// static
void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
......
......@@ -1258,7 +1258,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
// Normally the first thing we'd do here is Push(lr, fp), but we already
// entered the frame in BaselineCompiler::Prologue, as we had to use the
// value lr had before the call to this BaselineOutOfLinePrologue builtin.
// value lr before the call to this BaselineOutOfLinePrologue builtin.
Register callee_context = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kCalleeContext);
......
......@@ -930,7 +930,8 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM
void Builtins::Generate_BaselineLeaveFrame(MacroAssembler* masm) {
EmitReturnBaseline(masm);
}
......
......@@ -1382,6 +1382,20 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; }
bool CanAcquireD() const { return CanAcquireVfp<DwVfpRegister>(); }
void Include(const Register& reg1, const Register& reg2 = no_reg) {
RegList* available = assembler_->GetScratchRegisterList();
DCHECK_NOT_NULL(available);
DCHECK_EQ((*available) & (reg1.bit() | reg2.bit()), 0);
*available |= reg1.bit() | reg2.bit();
}
void Exclude(const Register& reg1, const Register& reg2 = no_reg) {
RegList* available = assembler_->GetScratchRegisterList();
DCHECK_NOT_NULL(available);
DCHECK_EQ((*available) & (reg1.bit() | reg2.bit()),
reg1.bit() | reg2.bit());
*available &= ~(reg1.bit() | reg2.bit());
}
private:
friend class Assembler;
friend class TurboAssembler;
......
......@@ -86,14 +86,8 @@ const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
// TODO(v8:11421): Implement on this platform.
UNREACHABLE();
}
const Register BaselineLeaveFrameDescriptor::WeightRegister() {
// TODO(v8:11421): Implement on this platform.
UNREACHABLE();
}
const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() { return r3; }
const Register BaselineLeaveFrameDescriptor::WeightRegister() { return r4; }
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
......@@ -220,8 +214,11 @@ void CompareDescriptor::InitializePlatformSpecific(
void Compare_BaselineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:11421): Implement on this platform.
InitializePlatformUnimplemented(data, kParameterCount);
// r1: left operand
// r0: right operand
// r2: feedback slot
Register registers[] = {r1, r0, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpDescriptor::InitializePlatformSpecific(
......@@ -232,8 +229,11 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:11421): Implement on this platform.
InitializePlatformUnimplemented(data, kParameterCount);
// r1: left operand
// r0: right operand
// r2: feedback slot
Register registers[] = {r1, r0, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ApiCallbackDescriptor::InitializePlatformSpecific(
......
......@@ -305,6 +305,18 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
Call(builtin_index);
}
void TurboAssembler::LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
Register destination) {
ldr(destination, EntryFromBuiltinIndexAsOperand(builtin_index));
}
MemOperand TurboAssembler::EntryFromBuiltinIndexAsOperand(
Builtins::Name builtin_index) {
DCHECK(root_array_available());
return MemOperand(kRootRegister,
IsolateData::builtin_entry_slot_offset(builtin_index));
}
void TurboAssembler::CallBuiltin(int builtin_index, Condition cond) {
DCHECK(Builtins::IsBuiltinId(builtin_index));
RecordCommentForOffHeapTrampoline(builtin_index);
......@@ -1347,8 +1359,11 @@ void TurboAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// r0-r3: preserved
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
mov(scratch, Operand(StackFrame::TypeToMarker(type)));
Register scratch = no_reg;
if (!StackFrame::IsJavaScript(type)) {
scratch = temps.Acquire();
mov(scratch, Operand(StackFrame::TypeToMarker(type)));
}
PushCommonFrame(scratch);
}
......
......@@ -12,7 +12,7 @@
#include "src/codegen/arm/assembler-arm.h"
#include "src/codegen/bailout-reason.h"
#include "src/common/globals.h"
#include "src/objects/contexts.h"
#include "src/objects/tagged-index.h"
namespace v8 {
namespace internal {
......@@ -313,6 +313,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
bool check_constant_pool = true);
void Call(Label* target);
MemOperand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
void LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
Register destination);
// Load the builtin given by the Smi in |builtin_index| into the same
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
......@@ -478,6 +481,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, ExternalReference reference);
void Move(Register dst, Register src, Condition cond = al);
void Move(Register dst, const MemOperand& src) { ldr(dst, src); }
void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
Condition cond = al) {
if (!src.IsRegister() || src.rm() != dst || sbit != LeaveCC) {
......
......@@ -341,7 +341,7 @@ void BaselineOutOfLinePrologueDescriptor::InitializePlatformSpecific(
kJavaScriptCallArgCountRegister, ecx,
kJavaScriptCallNewTargetRegister};
data->InitializePlatformSpecific(kParameterCount, registers);
#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
Register registers[] = {
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
kInterpreterBytecodeArrayRegister, kJavaScriptCallNewTargetRegister};
......@@ -354,7 +354,8 @@ void BaselineOutOfLinePrologueDescriptor::InitializePlatformSpecific(
void BaselineLeaveFrameDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// TODO(v8:11421): Implement on other platforms.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM
Register registers[] = {ParamsSizeRegister(), WeightRegister()};
data->InitializePlatformSpecific(kParameterCount, registers);
#else
......
......@@ -163,7 +163,8 @@ struct MaybeBoolFlag {
#define ENABLE_CONTROL_FLOW_INTEGRITY_BOOL false
#endif
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM
#define ENABLE_SPARKPLUG true
#else
// TODO(v8:11421): Enable Sparkplug for other architectures
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment