Commit 2cb031ec authored by Toon Verwaest's avatar Toon Verwaest Committed by Commit Bot

[sparkplug] Extract assembler to baseline-assembler*

Bug: v8:11429
Change-Id: I98b65613dc05f593644af45388b1f2c2a7df34a1
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2712567
Auto-Submit: Toon Verwaest <verwaest@chromium.org>
Commit-Queue: Toon Verwaest <verwaest@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72923}
parent 8d11b06f
......@@ -2558,6 +2558,8 @@ v8_source_set("v8_base_without_compiler") {
"src/ast/source-range-ast-visitor.h",
"src/ast/variables.cc",
"src/ast/variables.h",
"src/baseline/baseline-assembler-inl.h",
"src/baseline/baseline-assembler.h",
"src/baseline/baseline-compiler.cc",
"src/baseline/baseline-compiler.h",
"src/baseline/baseline.cc",
......@@ -3724,6 +3726,7 @@ v8_source_set("v8_base_without_compiler") {
if (v8_current_cpu == "x86") {
sources += [ ### gcmole(arch:ia32) ###
"src/baseline/ia32/baseline-assembler-ia32-inl.h",
"src/baseline/ia32/baseline-compiler-ia32-inl.h",
"src/codegen/ia32/assembler-ia32-inl.h",
"src/codegen/ia32/assembler-ia32.cc",
......@@ -3751,6 +3754,7 @@ v8_source_set("v8_base_without_compiler") {
]
} else if (v8_current_cpu == "x64") {
sources += [ ### gcmole(arch:x64) ###
"src/baseline/x64/baseline-assembler-x64-inl.h",
"src/baseline/x64/baseline-compiler-x64-inl.h",
"src/codegen/x64/assembler-x64-inl.h",
"src/codegen/x64/assembler-x64.cc",
......@@ -3802,6 +3806,7 @@ v8_source_set("v8_base_without_compiler") {
}
} else if (v8_current_cpu == "arm") {
sources += [ ### gcmole(arch:arm) ###
"src/baseline/arm/baseline-assembler-arm-inl.h",
"src/baseline/arm/baseline-compiler-arm-inl.h",
"src/codegen/arm/assembler-arm-inl.h",
"src/codegen/arm/assembler-arm.cc",
......@@ -3834,6 +3839,7 @@ v8_source_set("v8_base_without_compiler") {
]
} else if (v8_current_cpu == "arm64") {
sources += [ ### gcmole(arch:arm64) ###
"src/baseline/arm64/baseline-assembler-arm64-inl.h",
"src/baseline/arm64/baseline-compiler-arm64-inl.h",
"src/codegen/arm64/assembler-arm64-inl.h",
"src/codegen/arm64/assembler-arm64.cc",
......@@ -3894,6 +3900,7 @@ v8_source_set("v8_base_without_compiler") {
}
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [ ### gcmole(arch:mipsel) ###
"src/baseline/mips/baseline-assembler-mips-inl.h",
"src/baseline/mips/baseline-compiler-mips-inl.h",
"src/codegen/mips/assembler-mips-inl.h",
"src/codegen/mips/assembler-mips.cc",
......@@ -3923,6 +3930,7 @@ v8_source_set("v8_base_without_compiler") {
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [ ### gcmole(arch:mips64el) ###
"src/baseline/mips64/baseline-assembler-mips64-inl.h",
"src/baseline/mips64/baseline-compiler-mips64-inl.h",
"src/codegen/mips64/assembler-mips64-inl.h",
"src/codegen/mips64/assembler-mips64.cc",
......@@ -3952,6 +3960,7 @@ v8_source_set("v8_base_without_compiler") {
]
} else if (v8_current_cpu == "ppc") {
sources += [ ### gcmole(arch:ppc) ###
"src/baseline/ppc/baseline-assembler-ppc-inl.h",
"src/baseline/ppc/baseline-compiler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc.cc",
......@@ -3984,6 +3993,7 @@ v8_source_set("v8_base_without_compiler") {
]
} else if (v8_current_cpu == "ppc64") {
sources += [ ### gcmole(arch:ppc64) ###
"src/baseline/ppc/baseline-assembler-ppc-inl.h",
"src/baseline/ppc/baseline-compiler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc.cc",
......@@ -4016,6 +4026,7 @@ v8_source_set("v8_base_without_compiler") {
]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [ ### gcmole(arch:s390) ###
"src/baseline/s390/baseline-assembler-s390-inl.h",
"src/baseline/s390/baseline-compiler-s390-inl.h",
"src/codegen/s390/assembler-s390-inl.h",
"src/codegen/s390/assembler-s390.cc",
......
This diff is collapsed.
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASELINE_BASELINE_ASSEMBLER_INL_H_
#define V8_BASELINE_BASELINE_ASSEMBLER_INL_H_
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#include <type_traits>
#include <unordered_map>
#include "src/baseline/baseline-assembler.h"
#include "src/interpreter/bytecode-register.h"
#include "src/objects/feedback-cell.h"
#include "src/objects/js-function.h"
#include "src/objects/map.h"
#if V8_TARGET_ARCH_X64
#include "src/baseline/x64/baseline-assembler-x64-inl.h"
#elif V8_TARGET_ARCH_ARM64
#include "src/baseline/arm64/baseline-assembler-arm64-inl.h"
#else
#error Unsupported target architecture.
#endif
namespace v8 {
namespace internal {
namespace baseline {
#define __ masm_->
void BaselineAssembler::GetCode(Isolate* isolate, CodeDesc* desc) {
__ GetCode(isolate, desc);
}
int BaselineAssembler::pc_offset() const { return __ pc_offset(); }
bool BaselineAssembler::emit_debug_code() const { return __ emit_debug_code(); }
void BaselineAssembler::CodeEntry() const { __ CodeEntry(); }
void BaselineAssembler::ExceptionHandler() const { __ ExceptionHandler(); }
void BaselineAssembler::RecordComment(const char* string) {
__ RecordComment(string);
}
void BaselineAssembler::Trap() { __ Trap(); }
void BaselineAssembler::DebugBreak() { __ DebugBreak(); }
void BaselineAssembler::CallRuntime(Runtime::FunctionId function, int nargs) {
__ CallRuntime(function, nargs);
}
MemOperand BaselineAssembler::ContextOperand() {
return RegisterFrameOperand(interpreter::Register::current_context());
}
MemOperand BaselineAssembler::FunctionOperand() {
return RegisterFrameOperand(interpreter::Register::function_closure());
}
void BaselineAssembler::LoadMap(Register output, Register value) {
__ LoadMap(output, value);
}
void BaselineAssembler::LoadRoot(Register output, RootIndex index) {
__ LoadRoot(output, index);
}
void BaselineAssembler::LoadNativeContextSlot(Register output, uint32_t index) {
__ LoadNativeContextSlot(index, output);
}
void BaselineAssembler::Move(Register output, interpreter::Register source) {
return __ Move(output, RegisterFrameOperand(source));
}
void BaselineAssembler::Move(Register output, RootIndex source) {
return __ LoadRoot(output, source);
}
void BaselineAssembler::Move(Register output, Register source) {
__ Move(output, source);
}
void BaselineAssembler::Move(Register output, MemOperand operand) {
__ Move(output, operand);
}
void BaselineAssembler::Move(Register output, Smi value) {
__ Move(output, value);
}
void BaselineAssembler::SmiUntag(Register reg) { __ SmiUntag(reg); }
void BaselineAssembler::SmiUntag(Register output, Register value) {
__ SmiUntag(output, value);
}
void BaselineAssembler::LoadFixedArrayElement(Register output, Register array,
int32_t index) {
LoadTaggedAnyField(output, array,
FixedArray::kHeaderSize + index * kTaggedSize);
}
void BaselineAssembler::LoadPrototype(Register prototype, Register object) {
__ LoadMap(prototype, object);
LoadTaggedPointerField(prototype, prototype, Map::kPrototypeOffset);
}
void BaselineAssembler::LoadContext(Register output) {
LoadRegister(output, interpreter::Register::current_context());
}
void BaselineAssembler::LoadFunction(Register output) {
LoadRegister(output, interpreter::Register::function_closure());
}
void BaselineAssembler::StoreContext(Register context) {
StoreRegister(interpreter::Register::current_context(), context);
}
void BaselineAssembler::LoadRegister(Register output,
interpreter::Register source) {
Move(output, source);
}
void BaselineAssembler::StoreRegister(interpreter::Register output,
Register value) {
Move(output, value);
}
SaveAccumulatorScope::SaveAccumulatorScope(BaselineAssembler* assembler)
: assembler_(assembler) {
assembler_->Push(kInterpreterAccumulatorRegister);
}
SaveAccumulatorScope::~SaveAccumulatorScope() {
assembler_->Pop(kInterpreterAccumulatorRegister);
}
#undef __
} // namespace baseline
} // namespace internal
} // namespace v8
#endif
#endif // V8_BASELINE_BASELINE_ASSEMBLER_INL_H_
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASELINE_BASELINE_ASSEMBLER_H_
#define V8_BASELINE_BASELINE_ASSEMBLER_H_
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#include "src/codegen/macro-assembler.h"
namespace v8 {
namespace internal {
namespace baseline {
enum class Condition : uint8_t;
class BaselineAssembler {
public:
class ScratchRegisterScope;
explicit BaselineAssembler(MacroAssembler* masm) : masm_(masm) {}
inline static MemOperand RegisterFrameOperand(
interpreter::Register interpreter_register);
inline MemOperand ContextOperand();
inline MemOperand FunctionOperand();
inline MemOperand FeedbackVectorOperand();
inline void GetCode(Isolate* isolate, CodeDesc* desc);
inline int pc_offset() const;
inline bool emit_debug_code() const;
inline void CodeEntry() const;
inline void ExceptionHandler() const;
inline void RecordComment(const char* string);
inline void Trap();
inline void DebugBreak();
inline void Bind(Label* label);
inline void JumpIf(Condition cc, Label* target,
Label::Distance distance = Label::kFar);
inline void Jump(Label* target, Label::Distance distance = Label::kFar);
inline void JumpIfRoot(Register value, RootIndex index, Label* target,
Label::Distance distance = Label::kFar);
inline void JumpIfNotRoot(Register value, RootIndex index, Label* target,
Label ::Distance distance = Label::kFar);
inline void JumpIfSmi(Register value, Label* target,
Label::Distance distance = Label::kFar);
inline void JumpIfNotSmi(Register value, Label* target,
Label::Distance distance = Label::kFar);
inline void Test(Register value, int mask);
inline void CmpObjectType(Register object, InstanceType instance_type,
Register map);
inline void CmpInstanceType(Register value, InstanceType instance_type);
inline void Cmp(Register value, Smi smi);
inline void ComparePointer(Register value, MemOperand operand);
inline Condition CheckSmi(Register value);
inline void SmiCompare(Register lhs, Register rhs);
inline void CompareTagged(Register value, MemOperand operand);
inline void CompareTagged(MemOperand operand, Register value);
inline void CompareByte(Register value, int32_t byte);
inline void LoadMap(Register output, Register value);
inline void LoadRoot(Register output, RootIndex index);
inline void LoadNativeContextSlot(Register output, uint32_t index);
inline void Move(Register output, Register source);
inline void Move(Register output, MemOperand operand);
inline void Move(Register output, Smi value);
inline void Move(Register output, TaggedIndex value);
inline void Move(Register output, interpreter::Register source);
inline void Move(interpreter::Register output, Register source);
inline void Move(Register output, RootIndex source);
inline void Move(MemOperand output, Register source);
inline void Move(Register output, ExternalReference reference);
inline void Move(Register output, Handle<HeapObject> value);
inline void Move(Register output, int32_t immediate);
inline void MoveMaybeSmi(Register output, Register source);
inline void MoveSmi(Register output, Register source);
// Push the given values, in the given order. If the stack needs alignment
// (looking at you Arm64), the stack is padded from the front (i.e. before the
// first value is pushed).
//
// This supports pushing a RegisterList as the last value -- the list is
// iterated and each interpreter Register is pushed.
//
// The total number of values pushed is returned. Note that this might be
// different from sizeof(T...), specifically if there was a RegisterList.
template <typename... T>
inline int Push(T... vals);
// Like Push(vals...), but pushes in reverse order, to support our reversed
// order argument JS calling convention. Doesn't return the number of
// arguments pushed though.
//
// Note that padding is still inserted before the first pushed value (i.e. the
// last value).
template <typename... T>
inline void PushReverse(T... vals);
// Pop values off the stack into the given registers.
//
// Note that this inserts into registers in the given order, i.e. in reverse
// order if the registers were pushed. This means that to spill registers,
// push and pop have to be in reverse order, e.g.
//
// Push(r1, r2, ..., rN);
// ClobberRegisters();
// Pop(rN, ..., r2, r1);
//
// On stack-alignment architectures, any padding is popped off after the last
// register. This the behaviour of Push, which means that the above code still
// works even if the number of registers doesn't match stack alignment.
template <typename... T>
inline void Pop(T... registers);
inline void CallBuiltin(Builtins::Name builtin);
inline void TailCallBuiltin(Builtins::Name builtin);
inline void CallRuntime(Runtime::FunctionId function, int nargs);
inline void LoadTaggedPointerField(Register output, Register source,
int offset);
inline void LoadTaggedSignedField(Register output, Register source,
int offset);
inline void LoadTaggedAnyField(Register output, Register source, int offset);
inline void LoadByteField(Register output, Register source, int offset);
inline void StoreTaggedSignedField(Register target, int offset, Smi value);
inline void StoreTaggedFieldWithWriteBarrier(Register target, int offset,
Register value);
inline void StoreTaggedFieldNoWriteBarrier(Register target, int offset,
Register value);
inline void LoadFixedArrayElement(Register output, Register array,
int32_t index);
inline void LoadPrototype(Register prototype, Register object);
// Loads the feedback cell from the function, and sets flags on add so that
// we can compare afterward.
inline void AddToInterruptBudget(int32_t weight);
inline void AddToInterruptBudget(Register weight);
inline void AddSmi(Register lhs, Smi rhs);
inline void SmiUntag(Register value);
inline void SmiUntag(Register output, Register value);
inline void Switch(Register reg, int case_value_base, Label** labels,
int num_labels);
// Register operands.
inline void LoadRegister(Register output, interpreter::Register source);
inline void StoreRegister(interpreter::Register output, Register value);
// Frame values
inline void LoadFunction(Register output);
inline void LoadContext(Register output);
inline void StoreContext(Register context);
inline static void EmitReturn(MacroAssembler* masm);
MacroAssembler* masm() { return masm_; }
private:
MacroAssembler* masm_;
ScratchRegisterScope* scratch_register_scope_ = nullptr;
};
class SaveAccumulatorScope final {
public:
inline explicit SaveAccumulatorScope(BaselineAssembler* assembler);
inline ~SaveAccumulatorScope();
private:
BaselineAssembler* assembler_;
};
} // namespace baseline
} // namespace internal
} // namespace v8
#endif
#endif // V8_BASELINE_BASELINE_ASSEMBLER_H_
......@@ -11,6 +11,7 @@
#include <type_traits>
#include <unordered_map>
#include "src/baseline/baseline-assembler-inl.h"
#include "src/builtins/builtins-constructor.h"
#include "src/builtins/builtins-descriptors.h"
#include "src/builtins/builtins.h"
......@@ -24,7 +25,6 @@
#include "src/interpreter/bytecode-array-accessor.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-register.h"
#include "src/objects/code.h"
#include "src/objects/heap-object.h"
#include "src/objects/instance-type.h"
......@@ -220,91 +220,6 @@ void MoveArgumentsForDescriptor(BaselineAssembler* masm,
} // namespace detail
#define __ masm_->
void BaselineAssembler::GetCode(Isolate* isolate, CodeDesc* desc) {
__ GetCode(isolate, desc);
}
int BaselineAssembler::pc_offset() const { return __ pc_offset(); }
bool BaselineAssembler::emit_debug_code() const { return __ emit_debug_code(); }
void BaselineAssembler::CodeEntry() const { __ CodeEntry(); }
void BaselineAssembler::ExceptionHandler() const { __ ExceptionHandler(); }
void BaselineAssembler::RecordComment(const char* string) {
__ RecordComment(string);
}
void BaselineAssembler::Trap() { __ Trap(); }
void BaselineAssembler::DebugBreak() { __ DebugBreak(); }
void BaselineAssembler::CallRuntime(Runtime::FunctionId function, int nargs) {
__ CallRuntime(function, nargs);
}
MemOperand BaselineAssembler::ContextOperand() {
return RegisterFrameOperand(interpreter::Register::current_context());
}
MemOperand BaselineAssembler::FunctionOperand() {
return RegisterFrameOperand(interpreter::Register::function_closure());
}
void BaselineAssembler::LoadMap(Register output, Register value) {
__ LoadMap(output, value);
}
void BaselineAssembler::LoadRoot(Register output, RootIndex index) {
__ LoadRoot(output, index);
}
void BaselineAssembler::LoadNativeContextSlot(Register output, uint32_t index) {
__ LoadNativeContextSlot(index, output);
}
void BaselineAssembler::Move(Register output, interpreter::Register source) {
return __ Move(output, RegisterFrameOperand(source));
}
void BaselineAssembler::Move(Register output, RootIndex source) {
return __ LoadRoot(output, source);
}
void BaselineAssembler::Move(Register output, Register source) {
__ Move(output, source);
}
void BaselineAssembler::Move(Register output, MemOperand operand) {
__ Move(output, operand);
}
void BaselineAssembler::Move(Register output, Smi value) {
__ Move(output, value);
}
void BaselineAssembler::SmiUntag(Register reg) { __ SmiUntag(reg); }
void BaselineAssembler::SmiUntag(Register output, Register value) {
__ SmiUntag(output, value);
}
void BaselineAssembler::LoadFixedArrayElement(Register output, Register array,
int32_t index) {
LoadTaggedAnyField(output, array,
FixedArray::kHeaderSize + index * kTaggedSize);
}
void BaselineAssembler::LoadPrototype(Register prototype, Register object) {
__ LoadMap(prototype, object);
LoadTaggedPointerField(prototype, prototype, Map::kPrototypeOffset);
}
void BaselineAssembler::LoadContext(Register output) {
LoadRegister(output, interpreter::Register::current_context());
}
void BaselineAssembler::LoadFunction(Register output) {
LoadRegister(output, interpreter::Register::function_closure());
}
void BaselineAssembler::StoreContext(Register context) {
StoreRegister(interpreter::Register::current_context(), context);
}
void BaselineAssembler::LoadRegister(Register output,
interpreter::Register source) {
Move(output, source);
}
void BaselineAssembler::StoreRegister(interpreter::Register output,
Register value) {
Move(output, value);
}
#undef __
BaselineCompiler::BaselineCompiler(
Isolate* isolate, Handle<SharedFunctionInfo> shared_function_info,
......
......@@ -13,7 +13,7 @@
#include "src/base/logging.h"
#include "src/base/threaded-list.h"
#include "src/codegen/macro-assembler.h"
#include "src/baseline/baseline-assembler.h"
#include "src/handles/handles.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-register.h"
......@@ -30,8 +30,6 @@ class BytecodeArray;
namespace baseline {
enum class Condition : uint8_t;
class BytecodeOffsetTableBuilder {
public:
void AddPosition(size_t pc_offset, size_t bytecode_offset) {
......@@ -61,165 +59,6 @@ class BytecodeOffsetTableBuilder {
std::vector<byte> bytes_;
};
class BaselineAssembler {
public:
class ScratchRegisterScope;
explicit BaselineAssembler(MacroAssembler* masm) : masm_(masm) {}
static MemOperand RegisterFrameOperand(
interpreter::Register interpreter_register);
MemOperand ContextOperand();
MemOperand FunctionOperand();
MemOperand FeedbackVectorOperand();
void GetCode(Isolate* isolate, CodeDesc* desc);
int pc_offset() const;
bool emit_debug_code() const;
void CodeEntry() const;
void ExceptionHandler() const;
void RecordComment(const char* string);
void Trap();
void DebugBreak();
void Bind(Label* label);
void JumpIf(Condition cc, Label* target,
Label::Distance distance = Label::kFar);
void Jump(Label* target, Label::Distance distance = Label::kFar);
void JumpIfRoot(Register value, RootIndex index, Label* target,
Label::Distance distance = Label::kFar);
void JumpIfNotRoot(Register value, RootIndex index, Label* target,
Label ::Distance distance = Label::kFar);
void JumpIfSmi(Register value, Label* target,
Label::Distance distance = Label::kFar);
void JumpIfNotSmi(Register value, Label* target,
Label::Distance distance = Label::kFar);
void Test(Register value, int mask);
void CmpObjectType(Register object, InstanceType instance_type, Register map);
void CmpInstanceType(Register value, InstanceType instance_type);
void Cmp(Register value, Smi smi);
void ComparePointer(Register value, MemOperand operand);
Condition CheckSmi(Register value);
void SmiCompare(Register lhs, Register rhs);
void CompareTagged(Register value, MemOperand operand);
void CompareTagged(MemOperand operand, Register value);
void CompareByte(Register value, int32_t byte);
void LoadMap(Register output, Register value);
void LoadRoot(Register output, RootIndex index);
void LoadNativeContextSlot(Register output, uint32_t index);
void Move(Register output, Register source);
void Move(Register output, MemOperand operand);
void Move(Register output, Smi value);
void Move(Register output, TaggedIndex value);
void Move(Register output, interpreter::Register source);
void Move(interpreter::Register output, Register source);
void Move(Register output, RootIndex source);
void Move(MemOperand output, Register source);
void Move(Register output, ExternalReference reference);
void Move(Register output, Handle<HeapObject> value);
void Move(Register output, int32_t immediate);
void MoveMaybeSmi(Register output, Register source);
void MoveSmi(Register output, Register source);
// Push the given values, in the given order. If the stack needs alignment
// (looking at you Arm64), the stack is padded from the front (i.e. before the
// first value is pushed).
//
// This supports pushing a RegisterList as the last value -- the list is
// iterated and each interpreter Register is pushed.
//
// The total number of values pushed is returned. Note that this might be
// different from sizeof(T...), specifically if there was a RegisterList.
template <typename... T>
int Push(T... vals);
// Like Push(vals...), but pushes in reverse order, to support our reversed
// order argument JS calling convention. Doesn't return the number of
// arguments pushed though.
//
// Note that padding is still inserted before the first pushed value (i.e. the
// last value).
template <typename... T>
void PushReverse(T... vals);
// Pop values off the stack into the given registers.
//
// Note that this inserts into registers in the given order, i.e. in reverse
// order if the registers were pushed. This means that to spill registers,
// push and pop have to be in reverse order, e.g.
//
// Push(r1, r2, ..., rN);
// ClobberRegisters();
// Pop(rN, ..., r2, r1);
//
// On stack-alignment architectures, any padding is popped off after the last
// register. This the behaviour of Push, which means that the above code still
// works even if the number of registers doesn't match stack alignment.
template <typename... T>
void Pop(T... registers);
void CallBuiltin(Builtins::Name builtin);
void TailCallBuiltin(Builtins::Name builtin);
void CallRuntime(Runtime::FunctionId function, int nargs);
void LoadTaggedPointerField(Register output, Register source, int offset);
void LoadTaggedSignedField(Register output, Register source, int offset);
void LoadTaggedAnyField(Register output, Register source, int offset);
void LoadByteField(Register output, Register source, int offset);
void StoreTaggedSignedField(Register target, int offset, Smi value);
void StoreTaggedFieldWithWriteBarrier(Register target, int offset,
Register value);
void StoreTaggedFieldNoWriteBarrier(Register target, int offset,
Register value);
void LoadFixedArrayElement(Register output, Register array, int32_t index);
void LoadPrototype(Register prototype, Register object);
// Loads the feedback cell from the function, and sets flags on add so that
// we can compare afterward.
void AddToInterruptBudget(int32_t weight);
void AddToInterruptBudget(Register weight);
void AddSmi(Register lhs, Smi rhs);
void SmiUntag(Register value);
void SmiUntag(Register output, Register value);
void Switch(Register reg, int case_value_base, Label** labels,
int num_labels);
// Register operands.
void LoadRegister(Register output, interpreter::Register source);
void StoreRegister(interpreter::Register output, Register value);
// Frame values
void LoadFunction(Register output);
void LoadContext(Register output);
void StoreContext(Register context);
static void EmitReturn(MacroAssembler* masm);
MacroAssembler* masm() { return masm_; }
private:
MacroAssembler* masm_;
ScratchRegisterScope* scratch_register_scope_ = nullptr;
};
class SaveAccumulatorScope final {
public:
explicit SaveAccumulatorScope(BaselineAssembler* assembler)
: assembler_(assembler) {
assembler_->Push(kInterpreterAccumulatorRegister);
}
~SaveAccumulatorScope() { assembler_->Pop(kInterpreterAccumulatorRegister); }
private:
BaselineAssembler* assembler_;
};
class BaselineCompiler {
public:
explicit BaselineCompiler(Isolate* isolate,
......
......@@ -8,6 +8,7 @@
// architectures.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
#include "src/baseline/baseline-assembler-inl.h"
#include "src/baseline/baseline-compiler.h"
#include "src/heap/factory-inl.h"
#include "src/logging/counters.h"
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment