Commit 266e803e authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[wasm] Liftoff: a baseline compiler for WebAssembly

This CL adds a first implementation of Liftoff, the new wasm baseline
compiler, for x64 and ia32. It currently supports the most important
i32 instructions and control instructions. Whenever it encounters an
instruction it does not support yet, it aborts.
In a subsequent CL, Liftoff will be called from the
WasmCompilationUnit, falling back to Turbofan compilation if the
baseline compiler bails out.

R=titzer@chromium.org

Bug: v8:6600
Change-Id: Ifa78fb9d546dce72c241ff01a251dfa13cb31c1d
Reviewed-on: https://chromium-review.googlesource.com/716480
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#48832}
parent 5c2984ab
...@@ -2056,6 +2056,9 @@ v8_source_set("v8_base") { ...@@ -2056,6 +2056,9 @@ v8_source_set("v8_base") {
"src/visitors.h", "src/visitors.h",
"src/vm-state-inl.h", "src/vm-state-inl.h",
"src/vm-state.h", "src/vm-state.h",
"src/wasm/baseline/liftoff-assembler.cc",
"src/wasm/baseline/liftoff-assembler.h",
"src/wasm/baseline/liftoff-compiler.cc",
"src/wasm/compilation-manager.cc", "src/wasm/compilation-manager.cc",
"src/wasm/compilation-manager.h", "src/wasm/compilation-manager.h",
"src/wasm/decoder.h", "src/wasm/decoder.h",
...@@ -2163,6 +2166,8 @@ v8_source_set("v8_base") { ...@@ -2163,6 +2166,8 @@ v8_source_set("v8_base") {
"src/ic/ia32/handler-compiler-ia32.cc", "src/ic/ia32/handler-compiler-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.cc", "src/regexp/ia32/regexp-macro-assembler-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.h", "src/regexp/ia32/regexp-macro-assembler-ia32.h",
"src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h",
"src/wasm/baseline/ia32/liftoff-assembler-ia32.h",
] ]
} else if (v8_current_cpu == "x64") { } else if (v8_current_cpu == "x64") {
sources += [ ### gcmole(arch:x64) ### sources += [ ### gcmole(arch:x64) ###
...@@ -2178,6 +2183,8 @@ v8_source_set("v8_base") { ...@@ -2178,6 +2183,8 @@ v8_source_set("v8_base") {
"src/regexp/x64/regexp-macro-assembler-x64.cc", "src/regexp/x64/regexp-macro-assembler-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.h", "src/regexp/x64/regexp-macro-assembler-x64.h",
"src/third_party/valgrind/valgrind.h", "src/third_party/valgrind/valgrind.h",
"src/wasm/baseline/x64/liftoff-assembler-x64-defs.h",
"src/wasm/baseline/x64/liftoff-assembler-x64.h",
"src/x64/assembler-x64-inl.h", "src/x64/assembler-x64-inl.h",
"src/x64/assembler-x64.cc", "src/x64/assembler-x64.cc",
"src/x64/assembler-x64.h", "src/x64/assembler-x64.h",
...@@ -2233,6 +2240,8 @@ v8_source_set("v8_base") { ...@@ -2233,6 +2240,8 @@ v8_source_set("v8_base") {
"src/ic/arm/handler-compiler-arm.cc", "src/ic/arm/handler-compiler-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.cc", "src/regexp/arm/regexp-macro-assembler-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.h", "src/regexp/arm/regexp-macro-assembler-arm.h",
"src/wasm/baseline/arm/liftoff-assembler-arm-defs.h",
"src/wasm/baseline/arm/liftoff-assembler-arm.h",
] ]
} else if (v8_current_cpu == "arm64") { } else if (v8_current_cpu == "arm64") {
sources += [ ### gcmole(arch:arm64) ### sources += [ ### gcmole(arch:arm64) ###
...@@ -2279,6 +2288,8 @@ v8_source_set("v8_base") { ...@@ -2279,6 +2288,8 @@ v8_source_set("v8_base") {
"src/ic/arm64/handler-compiler-arm64.cc", "src/ic/arm64/handler-compiler-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.cc", "src/regexp/arm64/regexp-macro-assembler-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.h", "src/regexp/arm64/regexp-macro-assembler-arm64.h",
"src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h",
"src/wasm/baseline/arm64/liftoff-assembler-arm64.h",
] ]
if (use_jumbo_build) { if (use_jumbo_build) {
jumbo_excluded_sources += [ jumbo_excluded_sources += [
...@@ -2316,6 +2327,8 @@ v8_source_set("v8_base") { ...@@ -2316,6 +2327,8 @@ v8_source_set("v8_base") {
"src/mips/simulator-mips.h", "src/mips/simulator-mips.h",
"src/regexp/mips/regexp-macro-assembler-mips.cc", "src/regexp/mips/regexp-macro-assembler-mips.cc",
"src/regexp/mips/regexp-macro-assembler-mips.h", "src/regexp/mips/regexp-macro-assembler-mips.h",
"src/wasm/baseline/mips/liftoff-assembler-mips-defs.h",
"src/wasm/baseline/mips/liftoff-assembler-mips.h",
] ]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [ ### gcmole(arch:mips64el) ### sources += [ ### gcmole(arch:mips64el) ###
...@@ -2346,6 +2359,8 @@ v8_source_set("v8_base") { ...@@ -2346,6 +2359,8 @@ v8_source_set("v8_base") {
"src/mips64/simulator-mips64.h", "src/mips64/simulator-mips64.h",
"src/regexp/mips64/regexp-macro-assembler-mips64.cc", "src/regexp/mips64/regexp-macro-assembler-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.h", "src/regexp/mips64/regexp-macro-assembler-mips64.h",
"src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h",
"src/wasm/baseline/mips64/liftoff-assembler-mips64.h",
] ]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") { } else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [ ### gcmole(arch:ppc) ### sources += [ ### gcmole(arch:ppc) ###
...@@ -2376,6 +2391,8 @@ v8_source_set("v8_base") { ...@@ -2376,6 +2391,8 @@ v8_source_set("v8_base") {
"src/ppc/simulator-ppc.h", "src/ppc/simulator-ppc.h",
"src/regexp/ppc/regexp-macro-assembler-ppc.cc", "src/regexp/ppc/regexp-macro-assembler-ppc.cc",
"src/regexp/ppc/regexp-macro-assembler-ppc.h", "src/regexp/ppc/regexp-macro-assembler-ppc.h",
"src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h",
"src/wasm/baseline/ppc/liftoff-assembler-ppc.h",
] ]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") { } else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [ ### gcmole(arch:s390) ### sources += [ ### gcmole(arch:s390) ###
......
...@@ -280,6 +280,12 @@ class Register : public CPURegister { ...@@ -280,6 +280,12 @@ class Register : public CPURegister {
return Register::Create(code, kXRegSizeInBits); return Register::Create(code, kXRegSizeInBits);
} }
template <int code>
static Register from_code() {
// Always return an X register.
return Register::Create<code, kXRegSizeInBits>();
}
// End of V8 compatibility section ----------------------- // End of V8 compatibility section -----------------------
// //
private: private:
......
...@@ -505,6 +505,7 @@ DEFINE_BOOL(trace_wasm_streaming, false, ...@@ -505,6 +505,7 @@ DEFINE_BOOL(trace_wasm_streaming, false,
DEFINE_INT(trace_wasm_ast_start, 0, DEFINE_INT(trace_wasm_ast_start, 0,
"start function for wasm AST trace (inclusive)") "start function for wasm AST trace (inclusive)")
DEFINE_INT(trace_wasm_ast_end, 0, "end function for wasm AST trace (exclusive)") DEFINE_INT(trace_wasm_ast_end, 0, "end function for wasm AST trace (exclusive)")
DEFINE_BOOL(trace_liftoff, false, "trace liftoff, the wasm baseline compiler")
DEFINE_UINT(skip_compiling_wasm_funcs, 0, "start compiling at function N") DEFINE_UINT(skip_compiling_wasm_funcs, 0, "start compiling at function N")
DEFINE_BOOL(wasm_break_on_decoder_error, false, DEFINE_BOOL(wasm_break_on_decoder_error, false,
"debug break when wasm decoder encounters an error") "debug break when wasm decoder encounters an error")
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
enum class MachineRepresentation { enum class MachineRepresentation : uint8_t {
kNone, kNone,
kBit, kBit,
kWord8, kWord8,
...@@ -41,7 +41,7 @@ static_assert(static_cast<int>(MachineRepresentation::kLastRepresentation) < ...@@ -41,7 +41,7 @@ static_assert(static_cast<int>(MachineRepresentation::kLastRepresentation) <
const char* MachineReprToString(MachineRepresentation); const char* MachineReprToString(MachineRepresentation);
enum class MachineSemantic { enum class MachineSemantic : uint8_t {
kNone, kNone,
kBool, kBool,
kInt32, kInt32,
......
...@@ -1437,6 +1437,9 @@ ...@@ -1437,6 +1437,9 @@
'visitors.h', 'visitors.h',
'vm-state-inl.h', 'vm-state-inl.h',
'vm-state.h', 'vm-state.h',
'wasm/baseline/liftoff-assembler.cc',
'wasm/baseline/liftoff-assembler.h',
'wasm/baseline/liftoff-compiler.cc',
'wasm/compilation-manager.cc', 'wasm/compilation-manager.cc',
'wasm/compilation-manager.h', 'wasm/compilation-manager.h',
'wasm/decoder.h', 'wasm/decoder.h',
...@@ -1532,13 +1535,15 @@ ...@@ -1532,13 +1535,15 @@
'compiler/arm/instruction-codes-arm.h', 'compiler/arm/instruction-codes-arm.h',
'compiler/arm/instruction-scheduler-arm.cc', 'compiler/arm/instruction-scheduler-arm.cc',
'compiler/arm/instruction-selector-arm.cc', 'compiler/arm/instruction-selector-arm.cc',
'compiler/arm/unwinding-info-writer-arm.h',
'compiler/arm/unwinding-info-writer-arm.cc', 'compiler/arm/unwinding-info-writer-arm.cc',
'compiler/arm/unwinding-info-writer-arm.h',
'debug/arm/debug-arm.cc', 'debug/arm/debug-arm.cc',
'ic/arm/access-compiler-arm.cc', 'ic/arm/access-compiler-arm.cc',
'ic/arm/handler-compiler-arm.cc', 'ic/arm/handler-compiler-arm.cc',
'regexp/arm/regexp-macro-assembler-arm.cc', 'regexp/arm/regexp-macro-assembler-arm.cc',
'regexp/arm/regexp-macro-assembler-arm.h', 'regexp/arm/regexp-macro-assembler-arm.h',
'wasm/baseline/arm/liftoff-assembler-arm-defs.h',
'wasm/baseline/arm/liftoff-assembler-arm.h',
], ],
}], }],
['v8_target_arch=="arm64"', { ['v8_target_arch=="arm64"', {
...@@ -1579,13 +1584,15 @@ ...@@ -1579,13 +1584,15 @@
'compiler/arm64/instruction-codes-arm64.h', 'compiler/arm64/instruction-codes-arm64.h',
'compiler/arm64/instruction-scheduler-arm64.cc', 'compiler/arm64/instruction-scheduler-arm64.cc',
'compiler/arm64/instruction-selector-arm64.cc', 'compiler/arm64/instruction-selector-arm64.cc',
'compiler/arm64/unwinding-info-writer-arm64.h',
'compiler/arm64/unwinding-info-writer-arm64.cc', 'compiler/arm64/unwinding-info-writer-arm64.cc',
'compiler/arm64/unwinding-info-writer-arm64.h',
'debug/arm64/debug-arm64.cc', 'debug/arm64/debug-arm64.cc',
'ic/arm64/access-compiler-arm64.cc', 'ic/arm64/access-compiler-arm64.cc',
'ic/arm64/handler-compiler-arm64.cc', 'ic/arm64/handler-compiler-arm64.cc',
'regexp/arm64/regexp-macro-assembler-arm64.cc', 'regexp/arm64/regexp-macro-assembler-arm64.cc',
'regexp/arm64/regexp-macro-assembler-arm64.h', 'regexp/arm64/regexp-macro-assembler-arm64.h',
'wasm/baseline/arm64/liftoff-assembler-arm64-defs.h',
'wasm/baseline/arm64/liftoff-assembler-arm64.h',
], ],
}], }],
['v8_target_arch=="ia32"', { ['v8_target_arch=="ia32"', {
...@@ -1616,6 +1623,8 @@ ...@@ -1616,6 +1623,8 @@
'ic/ia32/handler-compiler-ia32.cc', 'ic/ia32/handler-compiler-ia32.cc',
'regexp/ia32/regexp-macro-assembler-ia32.cc', 'regexp/ia32/regexp-macro-assembler-ia32.cc',
'regexp/ia32/regexp-macro-assembler-ia32.h', 'regexp/ia32/regexp-macro-assembler-ia32.h',
'wasm/baseline/ia32/liftoff-assembler-ia32-defs.h',
'wasm/baseline/ia32/liftoff-assembler-ia32.h',
], ],
}], }],
['v8_target_arch=="mips" or v8_target_arch=="mipsel"', { ['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
...@@ -1647,6 +1656,8 @@ ...@@ -1647,6 +1656,8 @@
'ic/mips/handler-compiler-mips.cc', 'ic/mips/handler-compiler-mips.cc',
'regexp/mips/regexp-macro-assembler-mips.cc', 'regexp/mips/regexp-macro-assembler-mips.cc',
'regexp/mips/regexp-macro-assembler-mips.h', 'regexp/mips/regexp-macro-assembler-mips.h',
'wasm/baseline/mips/liftoff-assembler-mips-defs.h',
'wasm/baseline/mips/liftoff-assembler-mips.h',
], ],
}], }],
['v8_target_arch=="mips64" or v8_target_arch=="mips64el"', { ['v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
...@@ -1678,6 +1689,8 @@ ...@@ -1678,6 +1689,8 @@
'ic/mips64/handler-compiler-mips64.cc', 'ic/mips64/handler-compiler-mips64.cc',
'regexp/mips64/regexp-macro-assembler-mips64.cc', 'regexp/mips64/regexp-macro-assembler-mips64.cc',
'regexp/mips64/regexp-macro-assembler-mips64.h', 'regexp/mips64/regexp-macro-assembler-mips64.h',
'wasm/baseline/mips64/liftoff-assembler-mips64-defs.h',
'wasm/baseline/mips64/liftoff-assembler-mips64.h',
], ],
}], }],
['v8_target_arch=="x64"', { ['v8_target_arch=="x64"', {
...@@ -1686,8 +1699,8 @@ ...@@ -1686,8 +1699,8 @@
'compiler/x64/instruction-codes-x64.h', 'compiler/x64/instruction-codes-x64.h',
'compiler/x64/instruction-scheduler-x64.cc', 'compiler/x64/instruction-scheduler-x64.cc',
'compiler/x64/instruction-selector-x64.cc', 'compiler/x64/instruction-selector-x64.cc',
'compiler/x64/unwinding-info-writer-x64.h',
'compiler/x64/unwinding-info-writer-x64.cc', 'compiler/x64/unwinding-info-writer-x64.cc',
'compiler/x64/unwinding-info-writer-x64.h',
'x64/assembler-x64-inl.h', 'x64/assembler-x64-inl.h',
'x64/assembler-x64.cc', 'x64/assembler-x64.cc',
'x64/assembler-x64.h', 'x64/assembler-x64.h',
...@@ -1712,6 +1725,8 @@ ...@@ -1712,6 +1725,8 @@
'regexp/x64/regexp-macro-assembler-x64.cc', 'regexp/x64/regexp-macro-assembler-x64.cc',
'regexp/x64/regexp-macro-assembler-x64.h', 'regexp/x64/regexp-macro-assembler-x64.h',
'third_party/valgrind/valgrind.h', 'third_party/valgrind/valgrind.h',
'wasm/baseline/x64/liftoff-assembler-x64-defs.h',
'wasm/baseline/x64/liftoff-assembler-x64.h',
], ],
}], }],
['v8_target_arch=="x64" and OS=="linux"', { ['v8_target_arch=="x64" and OS=="linux"', {
...@@ -1746,6 +1761,8 @@ ...@@ -1746,6 +1761,8 @@
'ppc/simulator-ppc.h', 'ppc/simulator-ppc.h',
'regexp/ppc/regexp-macro-assembler-ppc.cc', 'regexp/ppc/regexp-macro-assembler-ppc.cc',
'regexp/ppc/regexp-macro-assembler-ppc.h', 'regexp/ppc/regexp-macro-assembler-ppc.h',
'wasm/baseline/ppc/liftoff-assembler-ppc-defs.h',
'wasm/baseline/ppc/liftoff-assembler-ppc.h',
], ],
}], }],
['v8_target_arch=="s390" or v8_target_arch=="s390x"', { ['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
......
# Liftoff (the baseline compiler for WebAssembly) depends on some compiler
# internals, like the linkage location for parameters and returns.
include_rules = [
"+src/compiler/linkage.h",
]
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
// TODO(clemensh): Implement the LiftoffAssembler on this platform.
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_DEFS_H_
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_H_
#include "src/wasm/baseline/liftoff-assembler.h"
namespace v8 {
namespace internal {
namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
void LiftoffAssembler::Load(Register dst, Address addr,
RelocInfo::Mode reloc_mode) {}
void LiftoffAssembler::Store(Address addr, Register reg,
PinnedRegisterScope pinned_regs,
RelocInfo::Mode reloc_mode) {}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
uint32_t caller_slot_idx) {}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
wasm::ValueType type) {}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {}
#define DEFAULT_I32_BINOP(name, internal_name) \
void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
Register rhs) {}
// clang-format off
DEFAULT_I32_BINOP(sub, sub)
DEFAULT_I32_BINOP(mul, imul)
DEFAULT_I32_BINOP(and, and)
DEFAULT_I32_BINOP(or, or)
DEFAULT_I32_BINOP(xor, xor)
// clang-format on
#undef DEFAULT_I32_BINOP
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM_H_
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
// TODO(clemensh): Implement the LiftoffAssembler on this platform.
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_DEFS_H_
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_H_
#include "src/wasm/baseline/liftoff-assembler.h"
namespace v8 {
namespace internal {
namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
void LiftoffAssembler::Load(Register dst, Address addr,
RelocInfo::Mode reloc_mode) {}
void LiftoffAssembler::Store(Address addr, Register reg,
PinnedRegisterScope pinned_regs,
RelocInfo::Mode reloc_mode) {}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
uint32_t caller_slot_idx) {}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
wasm::ValueType type) {}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {}
#define DEFAULT_I32_BINOP(name, internal_name) \
void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
Register rhs) {}
// clang-format off
DEFAULT_I32_BINOP(sub, sub)
DEFAULT_I32_BINOP(mul, imul)
DEFAULT_I32_BINOP(and, and)
DEFAULT_I32_BINOP(or, or)
DEFAULT_I32_BINOP(xor, xor)
// clang-format on
#undef DEFAULT_I32_BINOP
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_ARM64_H_
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 1 << 0 | // eax
1 << 1 | // ecx
1 << 2 | // edx
1 << 3 | // ebx
1 << 6 | // esi
1 << 7; // edi
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_DEFS_H_
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_H_
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/assembler.h"
#include "src/wasm/wasm-opcodes.h"
namespace v8 {
namespace internal {
namespace wasm {
namespace liftoff {
inline Operand GetStackSlot(uint32_t index) {
// ebp-8 holds the stack marker, first stack slot is located at ebp-16.
return Operand(ebp, -16 - 8 * index);
}
} // namespace liftoff
void LiftoffAssembler::ReserveStackSpace(uint32_t space) {
stack_space_ = space;
sub(esp, Immediate(space));
}
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {
switch (value.type()) {
case kWasmI32:
if (value.to_i32() == 0) {
xor_(reg, reg);
} else {
mov(reg, Immediate(value.to_i32()));
}
break;
default:
UNIMPLEMENTED();
}
}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {
if (reg != eax) mov(eax, reg);
}
void LiftoffAssembler::Load(Register dst, Address addr,
RelocInfo::Mode reloc_mode) {
mov(dst, Operand(reinterpret_cast<uint32_t>(addr), reloc_mode));
}
void LiftoffAssembler::Store(Address addr, Register reg,
PinnedRegisterScope pinned_regs,
RelocInfo::Mode reloc_mode) {
mov(Operand(reinterpret_cast<uint32_t>(addr), reloc_mode), reg);
}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
uint32_t caller_slot_idx) {
mov(dst, Operand(ebp, 4 + 4 * caller_slot_idx));
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
wasm::ValueType type) {
DCHECK_NE(dst_index, src_index);
DCHECK_EQ(kWasmI32, type);
if (cache_state_.has_unused_register()) {
Register reg = GetUnusedRegister(type);
Fill(reg, src_index);
Spill(dst_index, reg);
} else {
push(liftoff::GetStackSlot(src_index));
pop(liftoff::GetStackSlot(dst_index));
}
}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {
// TODO(clemensh): Handle different types here.
mov(liftoff::GetStackSlot(index), reg);
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
// TODO(clemensh): Handle different types here.
mov(liftoff::GetStackSlot(index), Immediate(value.to_i32()));
}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {
// TODO(clemensh): Handle different types here.
mov(reg, liftoff::GetStackSlot(index));
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
if (lhs.code() != dst.code()) {
lea(dst, Operand(lhs, rhs, times_1, 0));
} else {
add(dst, rhs);
}
}
#define DEFAULT_I32_BINOP(name, internal_name) \
void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
Register rhs) { \
if (lhs.code() != dst.code()) { \
mov(dst, lhs); \
} \
internal_name(dst, rhs); \
}
// clang-format off
DEFAULT_I32_BINOP(sub, sub)
DEFAULT_I32_BINOP(mul, imul)
DEFAULT_I32_BINOP(and, and_)
DEFAULT_I32_BINOP(or, or_)
DEFAULT_I32_BINOP(xor, xor_)
// clang-format on
#undef DEFAULT_I32_BINOP
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {
test(reg, reg);
j(zero, label);
}
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_IA32_H_
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/assembler-inl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
#include "src/counters.h"
#include "src/macro-assembler-inl.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/wasm-opcodes.h"
namespace v8 {
namespace internal {
namespace wasm {
constexpr auto kRegister = LiftoffAssembler::VarState::kRegister;
constexpr auto kConstant = LiftoffAssembler::VarState::kConstant;
constexpr auto kStack = LiftoffAssembler::VarState::kStack;
namespace {
#define __ asm_->
#define TRACE(...) \
do { \
if (FLAG_trace_liftoff) PrintF("[liftoff] " __VA_ARGS__); \
} while (false)
class StackTransferRecipe {
public:
explicit StackTransferRecipe(LiftoffAssembler* wasm_asm) : asm_(wasm_asm) {}
~StackTransferRecipe() { Execute(); }
void Execute() {
// TODO(clemensh): Find suitable schedule.
for (RegisterMove& rm : register_moves) {
asm_->Move(rm.dst, rm.src);
}
for (RegisterLoad& rl : register_loads) {
if (rl.is_constant_load) {
asm_->LoadConstant(rl.dst, rl.constant);
} else {
asm_->Fill(rl.dst, rl.stack_slot);
}
}
}
void TransferStackSlot(const LiftoffAssembler::CacheState& dst_state,
uint32_t dst_index, uint32_t src_index) {
const LiftoffAssembler::VarState& dst = dst_state.stack_state[dst_index];
const LiftoffAssembler::VarState& src =
__ cache_state()->stack_state[src_index];
switch (dst.loc) {
case kConstant:
DCHECK_EQ(dst, src);
break;
case kRegister:
switch (src.loc) {
case kConstant:
LoadConstant(dst.reg, WasmValue(src.i32_const));
break;
case kRegister:
if (dst.reg != src.reg) MoveRegister(dst.reg, src.reg);
break;
case kStack:
LoadStackSlot(dst.reg, src_index);
break;
}
break;
case kStack:
switch (src.loc) {
case kConstant:
// TODO(clemensh): Handle other types than i32.
asm_->Spill(dst_index, WasmValue(src.i32_const));
break;
case kRegister:
asm_->Spill(dst_index, src.reg);
break;
case kStack:
if (src_index == dst_index) break;
// TODO(clemensh): Implement other types than i32.
asm_->MoveStackValue(dst_index, src_index, wasm::kWasmI32);
break;
}
}
}
void MoveRegister(Register dst, Register src) {
register_moves.emplace_back(dst, src);
}
void LoadConstant(Register dst, WasmValue value) {
register_loads.emplace_back(dst, value);
}
void LoadStackSlot(Register dst, uint32_t stack_index) {
register_loads.emplace_back(dst, stack_index);
}
private:
struct RegisterMove {
Register dst;
Register src;
RegisterMove(Register dst, Register src) : dst(dst), src(src) {}
};
struct RegisterLoad {
Register dst;
bool is_constant_load; // otherwise load it from the stack.
union {
uint32_t stack_slot;
WasmValue constant;
};
RegisterLoad(Register dst, WasmValue constant)
: dst(dst), is_constant_load(true), constant(constant) {}
RegisterLoad(Register dst, uint32_t stack_slot)
: dst(dst), is_constant_load(false), stack_slot(stack_slot) {}
};
std::vector<RegisterMove> register_moves;
std::vector<RegisterLoad> register_loads;
LiftoffAssembler* asm_;
};
} // namespace
// TODO(clemensh): Don't copy the full parent state (this makes us N^2).
void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
uint32_t num_locals,
uint32_t arity) {
DCHECK(stack_state.empty());
DCHECK_GE(source.stack_height(), stack_base);
stack_state.resize(stack_base + arity);
auto slot = stack_state.begin();
// Compute list of all registers holding local values.
PinnedRegisterScope locals_regs;
for (auto local_it = source.stack_state.begin(),
local_end = source.stack_state.begin() + num_locals;
local_it != local_end; ++local_it) {
if (local_it->is_reg()) locals_regs.pin(local_it->reg);
}
RegList used_regs = 0;
auto InitStackSlot = [&](const VarState& src, bool needs_unique_reg) {
Register reg = no_reg;
if (src.is_reg() &&
(!needs_unique_reg || (used_regs & src.reg.bit()) == 0)) {
reg = src.reg;
} else if (has_unused_register(locals_regs)) {
reg = unused_register(locals_regs);
} else {
DCHECK(slot->is_stack());
return;
}
*slot = VarState(reg);
++slot;
inc_used(reg);
used_regs |= reg.bit();
};
auto source_slot = source.stack_state.begin();
// Ensure that locals do not share the same register.
for (uint32_t i = 0; i < num_locals; ++i, ++source_slot) {
InitStackSlot(*source_slot, true);
}
for (uint32_t i = num_locals; i < stack_base; ++i, ++source_slot) {
InitStackSlot(*source_slot, false);
}
DCHECK_GE(source.stack_height(), stack_base + arity);
source_slot = source.stack_state.end() - arity;
for (uint32_t i = 0; i < arity; ++i, ++source_slot) {
InitStackSlot(*source_slot, true);
}
DCHECK_EQ(slot, stack_state.end());
last_spilled_reg = source.last_spilled_reg;
}
void LiftoffAssembler::CacheState::Steal(CacheState& source) {
stack_state.swap(source.stack_state);
used_registers = source.used_registers;
memcpy(register_use_count, source.register_use_count,
sizeof(register_use_count));
last_spilled_reg = source.last_spilled_reg;
}
void LiftoffAssembler::CacheState::Split(const CacheState& source) {
stack_state = source.stack_state;
used_registers = source.used_registers;
memcpy(register_use_count, source.register_use_count,
sizeof(register_use_count));
last_spilled_reg = source.last_spilled_reg;
}
LiftoffAssembler::LiftoffAssembler(Isolate* isolate)
: TurboAssembler(isolate, nullptr, 0, CodeObjectRequired::kYes) {}
LiftoffAssembler::~LiftoffAssembler() {
if (num_locals_ > kInlineLocalTypes) {
free(more_local_types_);
}
}
Register LiftoffAssembler::GetBinaryOpTargetRegister(
ValueType type, PinnedRegisterScope pinned_regs) {
auto& slot_lhs = *(cache_state_.stack_state.end() - 2);
if (slot_lhs.loc == kRegister && GetNumUses(slot_lhs.reg) == 1) {
return slot_lhs.reg;
}
auto& slot_rhs = *(cache_state_.stack_state.end() - 1);
if (slot_rhs.loc == kRegister && GetNumUses(slot_rhs.reg) == 1) {
return slot_rhs.reg;
}
return GetUnusedRegister(type, pinned_regs);
}
void LiftoffAssembler::MergeFullStackWith(CacheState& target) {
DCHECK_EQ(cache_state_.stack_height(), target.stack_height());
// TODO(clemensh): Reuse the same StackTransferRecipe object to save some
// allocations.
StackTransferRecipe transfers(this);
for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
transfers.TransferStackSlot(target, i, i);
}
}
void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity) {
// Before: ----------------|------ pop_count -----|--- arity ---|
// ^target_stack_height ^stack_base ^stack_height
// After: ----|-- arity --|
// ^ ^target_stack_height
// ^target_stack_base
uint32_t stack_height = cache_state_.stack_height();
uint32_t target_stack_height = target.stack_height();
uint32_t stack_base = stack_height - arity;
uint32_t target_stack_base = target_stack_height - arity;
StackTransferRecipe transfers(this);
for (uint32_t i = 0; i < target_stack_base; ++i) {
transfers.TransferStackSlot(target, i, i);
}
for (uint32_t i = 0; i < arity; ++i) {
transfers.TransferStackSlot(target, target_stack_base + i, stack_base + i);
}
}
void LiftoffAssembler::Spill(uint32_t index) {
auto& slot = cache_state_.stack_state[index];
switch (slot.loc) {
case kRegister:
Spill(index, slot.reg);
cache_state_.dec_used(slot.reg);
break;
case kConstant:
Spill(index, WasmValue(slot.i32_const));
break;
case kStack:
return;
}
slot.loc = kStack;
}
void LiftoffAssembler::SpillLocals() {
for (uint32_t i = 0; i < num_locals_; ++i) {
Spill(i);
}
}
Register LiftoffAssembler::PopToRegister(ValueType type,
PinnedRegisterScope pinned_regs) {
DCHECK(!cache_state_.stack_state.empty());
VarState slot = cache_state_.stack_state.back();
cache_state_.stack_state.pop_back();
switch (slot.loc) {
case kRegister:
cache_state_.dec_used(slot.reg);
return slot.reg;
case kConstant: {
Register reg = GetUnusedRegister(type, pinned_regs);
LoadConstant(reg, WasmValue(slot.i32_const));
return reg;
}
case kStack: {
Register reg = GetUnusedRegister(type, pinned_regs);
Fill(reg, cache_state_.stack_height());
return reg;
}
}
UNREACHABLE();
}
Register LiftoffAssembler::SpillRegister(ValueType type,
PinnedRegisterScope pinned_regs) {
DCHECK_EQ(kWasmI32, type);
// Spill one cached value to free a register.
Register spill_reg = cache_state_.GetNextSpillReg(pinned_regs);
int remaining_uses = cache_state_.register_use_count[spill_reg.code()];
DCHECK_LT(0, remaining_uses);
for (uint32_t idx = cache_state_.stack_height() - 1;; --idx) {
DCHECK_GT(cache_state_.stack_height(), idx);
auto& slot = cache_state_.stack_state[idx];
if (!slot.is_reg() || slot.reg != spill_reg) continue;
Spill(idx, spill_reg);
slot.loc = kStack;
if (--remaining_uses == 0) break;
}
cache_state_.register_use_count[spill_reg.code()] = 0;
cache_state_.used_registers &= ~spill_reg.bit();
return spill_reg;
}
void LiftoffAssembler::set_num_locals(uint32_t num_locals) {
DCHECK_EQ(0, num_locals_); // only call this once.
num_locals_ = num_locals;
if (num_locals > kInlineLocalTypes) {
more_local_types_ =
reinterpret_cast<ValueType*>(malloc(num_locals * sizeof(ValueType)));
DCHECK_NOT_NULL(more_local_types_);
}
}
#undef __
#undef TRACE
} // namespace wasm
} // namespace internal
} // namespace v8
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
#include <memory>
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
#include "src/frames.h"
#include "src/macro-assembler.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-value.h"
// Include platform specific definitions.
#if V8_TARGET_ARCH_IA32
#include "src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h"
#elif V8_TARGET_ARCH_X64
#include "src/wasm/baseline/x64/liftoff-assembler-x64-defs.h"
#elif V8_TARGET_ARCH_ARM64
#include "src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h"
#elif V8_TARGET_ARCH_ARM
#include "src/wasm/baseline/arm/liftoff-assembler-arm-defs.h"
#elif V8_TARGET_ARCH_PPC
#include "src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/wasm/baseline/mips/liftoff-assembler-mips-defs.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h"
#else
#error Unsupported architecture.
#endif
namespace v8 {
namespace internal {
namespace wasm {
// Forward declarations.
struct ModuleEnv;
class LiftoffAssembler : public TurboAssembler {
public:
class PinnedRegisterScope {
public:
PinnedRegisterScope() : pinned_regs_(0) {}
explicit PinnedRegisterScope(RegList regs) : pinned_regs_(regs) {}
Register pin(Register reg) {
pinned_regs_ |= reg.bit();
return reg;
}
RegList pinned_regs() const { return pinned_regs_; }
private:
RegList pinned_regs_ = 0;
};
static_assert(IS_TRIVIALLY_COPYABLE(PinnedRegisterScope),
"PinnedRegisterScope can be passed by value");
explicit LiftoffAssembler(Isolate* isolate);
~LiftoffAssembler();
Register GetBinaryOpTargetRegister(ValueType, PinnedRegisterScope = {});
struct VarState {
enum Location { kStack, kRegister, kConstant };
Location loc;
union {
Register reg;
uint32_t i32_const;
};
VarState() : loc(kStack) {}
explicit VarState(Register r) : loc(kRegister), reg(r) {}
explicit VarState(uint32_t value) : loc(kConstant), i32_const(value) {}
bool operator==(const VarState& other) const {
if (loc != other.loc) return false;
switch (loc) {
case kRegister:
return reg == other.reg;
case kStack:
return true;
case kConstant:
return i32_const == other.i32_const;
}
UNREACHABLE();
}
bool is_stack() const { return loc == kStack; }
bool is_reg() const { return loc == kRegister; }
bool is_const() const { return loc == kConstant; }
};
static_assert(IS_TRIVIALLY_COPYABLE(VarState),
"VarState should be trivially copyable");
struct CacheState {
MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(CacheState);
// TODO(clemensh): Improve memory management here; avoid std::vector.
std::vector<VarState> stack_state;
RegList used_registers = 0;
// TODO(clemensh): Replace this by CountLeadingZeros(kGpCacheRegs) once that
// method is constexpr.
static constexpr int kMaxRegisterCode = 7;
uint32_t register_use_count[kMaxRegisterCode + 1] = {0};
// TODO(clemensh): Remove stack_base; use ControlBase::stack_depth.
uint32_t stack_base = 0;
Register last_spilled_reg = Register::from_code<0>();
// TODO(clemensh): Don't copy the full parent state (this makes us N^2).
void InitMerge(const CacheState& source, uint32_t num_locals,
uint32_t arity);
void Steal(CacheState& source);
void Split(const CacheState& source);
bool has_unused_register(PinnedRegisterScope pinned_scope = {}) const {
RegList available_regs =
kGpCacheRegs & ~used_registers & ~pinned_scope.pinned_regs();
return available_regs != 0;
}
Register unused_register(PinnedRegisterScope pinned_scope = {}) const {
RegList available_regs =
kGpCacheRegs & ~used_registers & ~pinned_scope.pinned_regs();
Register reg =
Register::from_code(base::bits::CountTrailingZeros64(available_regs));
DCHECK_EQ(0, used_registers & reg.bit());
return reg;
}
void inc_used(Register reg) {
used_registers |= reg.bit();
DCHECK_GE(kMaxRegisterCode, reg.code());
++register_use_count[reg.code()];
}
// Returns whether this was the last use.
bool dec_used(Register reg) {
DCHECK(is_used(reg));
DCHECK_GE(kMaxRegisterCode, reg.code());
if (--register_use_count[reg.code()] == 0) {
used_registers &= ~reg.bit();
return true;
}
return false;
}
bool is_used(Register reg) const {
DCHECK_GE(kMaxRegisterCode, reg.code());
bool used = used_registers & reg.bit();
DCHECK_EQ(used, register_use_count[reg.code()] != 0);
return used;
}
bool is_free(Register reg) const { return !is_used(reg); }
uint32_t stack_height() const {
return static_cast<uint32_t>(stack_state.size());
}
Register GetNextSpillReg(PinnedRegisterScope scope = {}) {
uint32_t mask = (1u << (last_spilled_reg.code() + 1)) - 1;
RegList unpinned_regs = kGpCacheRegs & ~scope.pinned_regs();
DCHECK_NE(0, unpinned_regs);
RegList remaining_regs = unpinned_regs & ~mask;
if (!remaining_regs) remaining_regs = unpinned_regs;
last_spilled_reg =
Register::from_code(base::bits::CountTrailingZeros64(remaining_regs));
return last_spilled_reg;
}
};
Register PopToRegister(ValueType, PinnedRegisterScope = {});
void PushRegister(Register reg) {
cache_state_.inc_used(reg);
cache_state_.stack_state.emplace_back(reg);
}
uint32_t GetNumUses(Register reg) const {
DCHECK_GT(CacheState::kMaxRegisterCode, reg.code());
return cache_state_.register_use_count[reg.code()];
}
Register GetUnusedRegister(ValueType type,
PinnedRegisterScope pinned_regs = {}) {
DCHECK_EQ(kWasmI32, type);
if (cache_state_.has_unused_register(pinned_regs)) {
return cache_state_.unused_register(pinned_regs);
}
return SpillRegister(type, pinned_regs);
}
void DropStackSlot(VarState* slot) {
// The only loc we care about is register. Other types don't occupy
// anything.
if (slot->loc != VarState::kRegister) return;
// Free the register, then set the loc to "stack".
// No need to write back, the value should be dropped.
cache_state_.dec_used(slot->reg);
slot->loc = VarState::kStack;
}
void MergeFullStackWith(CacheState&);
void MergeStackWith(CacheState&, uint32_t arity);
void Spill(uint32_t index);
void SpillLocals();
////////////////////////////////////
// Platform-specific part. //
////////////////////////////////////
inline void ReserveStackSpace(uint32_t);
inline void LoadConstant(Register, WasmValue);
inline void Load(Register, Address, RelocInfo::Mode = RelocInfo::NONE32);
inline void Store(Address, Register, PinnedRegisterScope,
RelocInfo::Mode = RelocInfo::NONE32);
inline void LoadCallerFrameSlot(Register, uint32_t caller_slot_idx);
inline void MoveStackValue(uint32_t dst_index, uint32_t src_index, ValueType);
inline void MoveToReturnRegister(Register);
inline void Spill(uint32_t index, Register);
inline void Spill(uint32_t index, WasmValue);
inline void Fill(Register, uint32_t index);
inline void emit_i32_add(Register dst, Register lhs, Register rhs);
inline void emit_i32_sub(Register dst, Register lhs, Register rhs);
inline void emit_i32_mul(Register dst, Register lhs, Register rhs);
inline void emit_i32_and(Register dst, Register lhs, Register rhs);
inline void emit_i32_or(Register dst, Register lhs, Register rhs);
inline void emit_i32_xor(Register dst, Register lhs, Register rhs);
inline void JumpIfZero(Register, Label*);
// Platform-specific constant.
static constexpr RegList kGpCacheRegs = kLiftoffAssemblerGpCacheRegs;
////////////////////////////////////
// End of platform-specific part. //
////////////////////////////////////
uint32_t num_locals() const { return num_locals_; }
void set_num_locals(uint32_t num_locals);
ValueType local_type(uint32_t index) {
DCHECK_GT(num_locals_, index);
ValueType* locals =
num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
return locals[index];
}
void set_local_type(uint32_t index, ValueType type) {
ValueType* locals =
num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
locals[index] = type;
}
CacheState* cache_state() { return &cache_state_; }
private:
static_assert(
base::bits::CountPopulation(kGpCacheRegs) >= 2,
"We need at least two cache registers to execute binary operations");
uint32_t num_locals_ = 0;
uint32_t stack_space_ = 0;
static constexpr uint32_t kInlineLocalTypes = 8;
union {
ValueType local_types_[kInlineLocalTypes];
ValueType* more_local_types_;
};
static_assert(sizeof(ValueType) == 1,
"Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_;
Register SpillRegister(ValueType, PinnedRegisterScope = {});
};
} // namespace wasm
} // namespace internal
} // namespace v8
// Include platform specific implementation.
#if V8_TARGET_ARCH_IA32
#include "src/wasm/baseline/ia32/liftoff-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "src/wasm/baseline/x64/liftoff-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM64
#include "src/wasm/baseline/arm64/liftoff-assembler-arm64.h"
#elif V8_TARGET_ARCH_ARM
#include "src/wasm/baseline/arm/liftoff-assembler-arm.h"
#elif V8_TARGET_ARCH_PPC
#include "src/wasm/baseline/ppc/liftoff-assembler-ppc.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/wasm/baseline/mips/liftoff-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h"
#else
#error Unsupported architecture.
#endif
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_H_
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/assembler-inl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
#include "src/counters.h"
#include "src/macro-assembler-inl.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/wasm-opcodes.h"
namespace v8 {
namespace internal {
namespace wasm {
constexpr auto kRegister = LiftoffAssembler::VarState::kRegister;
constexpr auto kConstant = LiftoffAssembler::VarState::kConstant;
constexpr auto kStack = LiftoffAssembler::VarState::kStack;
namespace {
#define __ asm_->
#define TRACE(...) \
do { \
if (FLAG_trace_liftoff) PrintF("[liftoff] " __VA_ARGS__); \
} while (false)
class LiftoffCompiler {
public:
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(LiftoffCompiler);
// TODO(clemensh): Make this a template parameter.
static constexpr wasm::Decoder::ValidateFlag validate =
wasm::Decoder::kValidate;
using Value = ValueBase;
struct Control : public ControlWithNamedConstructors<Control, Value> {
MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(Control);
LiftoffAssembler::CacheState label_state;
// TODO(clemensh): Labels cannot be moved on arm64, but everywhere else.
// Find a better solution.
std::unique_ptr<Label> label = base::make_unique<Label>();
};
using Decoder = WasmFullDecoder<validate, LiftoffCompiler>;
LiftoffCompiler(LiftoffAssembler* liftoff_asm,
compiler::CallDescriptor* call_desc, compiler::ModuleEnv* env)
: asm_(liftoff_asm), call_desc_(call_desc), env_(env) {
// The ModuleEnv will be used once we implement calls.
USE(env_);
}
bool ok() const { return ok_; }
void unsupported(Decoder* decoder, const char* reason) {
ok_ = false;
TRACE("unsupported: %s\n", reason);
decoder->errorf(decoder->pc(), "unsupported liftoff operation: %s", reason);
BindUnboundLabels(decoder);
}
void BindUnboundLabels(Decoder* decoder) {
#ifndef DEBUG
return;
#endif
// Bind all labels now, otherwise their destructor will fire a DCHECK error
// if they where referenced before.
for (uint32_t i = 0, e = decoder->control_depth(); i < e; ++i) {
Label* label = decoder->control_at(i)->label.get();
if (!label->is_bound()) __ bind(label);
}
}
void CheckStackSizeLimit(Decoder* decoder) {
DCHECK_GE(__ cache_state()->stack_height(), __ num_locals());
int stack_height = __ cache_state()->stack_height() - __ num_locals();
if (stack_height > kMaxValueStackHeight) {
unsupported(decoder, "value stack grows too large");
}
}
void StartFunction(Decoder* decoder) {
if (!kLiftoffAssemblerImplementedOnThisPlatform) {
unsupported(decoder, "platform");
return;
}
int num_locals = decoder->NumLocals();
__ set_num_locals(num_locals);
for (int i = 0; i < num_locals; ++i) {
__ set_local_type(i, decoder->GetLocalType(i));
}
}
void StartFunctionBody(Decoder* decoder, Control* block) {
__ EnterFrame(StackFrame::WASM_COMPILED);
__ ReserveStackSpace(kPointerSize *
(__ num_locals() + kMaxValueStackHeight));
// Param #0 is the wasm context.
constexpr uint32_t kFirstActualParameterIndex = 1;
uint32_t num_params = static_cast<uint32_t>(call_desc_->ParameterCount()) -
kFirstActualParameterIndex;
for (uint32_t i = 0; i < __ num_locals(); ++i) {
// We can currently only handle i32 parameters and locals.
if (__ local_type(i) != kWasmI32) {
unsupported(decoder, "non-i32 param/local");
return;
}
}
uint32_t param_idx = 0;
for (; param_idx < num_params; ++param_idx) {
// First input is the call target.
constexpr int kParameterStartInInputs = kFirstActualParameterIndex + 1;
compiler::LinkageLocation param_loc =
call_desc_->GetInputLocation(param_idx + kParameterStartInInputs);
if (param_loc.IsRegister()) {
DCHECK(!param_loc.IsAnyRegister());
Register param_reg = Register::from_code(param_loc.AsRegister());
if (param_reg.bit() & __ kGpCacheRegs) {
// This is a cache register, just use it.
__ PushRegister(param_reg);
} else {
// No cache register. Push to the stack.
__ Spill(param_idx, param_reg);
__ cache_state()->stack_state.emplace_back();
}
} else if (param_loc.IsCallerFrameSlot()) {
Register tmp_reg = __ GetUnusedRegister(__ local_type(param_idx));
__ LoadCallerFrameSlot(tmp_reg, -param_loc.AsCallerFrameSlot());
__ PushRegister(tmp_reg);
} else {
UNIMPLEMENTED();
}
}
for (; param_idx < __ num_locals(); ++param_idx) {
ValueType type = decoder->GetLocalType(param_idx);
switch (type) {
case kWasmI32:
__ cache_state()->stack_state.emplace_back(0);
break;
default:
UNIMPLEMENTED();
}
}
block->label_state.stack_base = __ num_locals();
DCHECK_EQ(__ num_locals(), param_idx);
DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height());
CheckStackSizeLimit(decoder);
}
void FinishFunction(Decoder* decoder) {}
void OnFirstError(Decoder* decoder) {
ok_ = false;
BindUnboundLabels(decoder);
}
void Block(Decoder* decoder, Control* new_block) {
// Note: This is called for blocks and loops.
DCHECK_EQ(new_block, decoder->control_at(0));
new_block->label_state.stack_base = __ cache_state()->stack_height();
if (new_block->is_loop()) {
// Before entering a loop, spill all locals to the stack, in order to free
// the cache registers, and to avoid unnecessarily reloading stack values
// into registers at branches.
// TODO(clemensh): Come up with a better strategy here, involving
// pre-analysis of the function.
__ SpillLocals();
// Loop labels bind at the beginning of the block, block labels at the
// end.
__ bind(new_block->label.get());
new_block->label_state.Split(*__ cache_state());
}
}
void Loop(Decoder* decoder, Control* block) { Block(decoder, block); }
void Try(Decoder* decoder, Control* block) { unsupported(decoder, "try"); }
void If(Decoder* decoder, const Value& cond, Control* if_block) {
unsupported(decoder, "if");
}
void FallThruTo(Decoder* decoder, Control* c) {
if (c->merge.reached) {
__ MergeFullStackWith(c->label_state);
} else {
c->label_state.Split(*__ cache_state());
}
}
void PopControl(Decoder* decoder, Control* c) {
if (!c->is_loop() && c->merge.reached) {
__ cache_state()->Steal(c->label_state);
}
if (!c->label->is_bound()) {
__ bind(c->label.get());
}
}
void EndControl(Decoder* decoder, Control* c) {}
void UnOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& value, Value* result) {
unsupported(decoder, "unary operation");
}
void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& lhs, const Value& rhs, Value* result) {
void (LiftoffAssembler::*emit_fn)(Register, Register, Register);
#define CASE_EMIT_FN(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
emit_fn = &LiftoffAssembler::emit_##fn; \
break;
switch (opcode) {
CASE_EMIT_FN(I32Add, i32_add)
CASE_EMIT_FN(I32Sub, i32_sub)
CASE_EMIT_FN(I32Mul, i32_mul)
CASE_EMIT_FN(I32And, i32_and)
CASE_EMIT_FN(I32Ior, i32_or)
CASE_EMIT_FN(I32Xor, i32_xor)
default:
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
#undef CASE_EMIT_FN
LiftoffAssembler::PinnedRegisterScope pinned_regs;
Register target_reg =
pinned_regs.pin(__ GetBinaryOpTargetRegister(kWasmI32));
Register rhs_reg = pinned_regs.pin(__ PopToRegister(kWasmI32, pinned_regs));
Register lhs_reg = __ PopToRegister(kWasmI32, pinned_regs);
(asm_->*emit_fn)(target_reg, lhs_reg, rhs_reg);
__ PushRegister(std::move(target_reg));
}
void I32Const(Decoder* decoder, Value* result, int32_t value) {
__ cache_state()->stack_state.emplace_back(value);
CheckStackSizeLimit(decoder);
}
void I64Const(Decoder* decoder, Value* result, int64_t value) {
unsupported(decoder, "i64.const");
}
void F32Const(Decoder* decoder, Value* result, float value) {
unsupported(decoder, "f32.const");
}
void F64Const(Decoder* decoder, Value* result, double value) {
unsupported(decoder, "f64.const");
}
void Drop(Decoder* decoder, const Value& value) {
__ DropStackSlot(&__ cache_state()->stack_state.back());
__ cache_state()->stack_state.pop_back();
}
void DoReturn(Decoder* decoder, Vector<Value> values, bool implicit) {
if (implicit) {
DCHECK_EQ(1, decoder->control_depth());
Control* func_block = decoder->control_at(0);
__ bind(func_block->label.get());
__ cache_state()->Steal(func_block->label_state);
}
if (!values.is_empty()) {
if (values.size() > 1) unsupported(decoder, "multi-return");
// TODO(clemensh): Handle other types.
DCHECK_EQ(kWasmI32, values[0].type);
Register reg = __ PopToRegister(kWasmI32);
__ MoveToReturnRegister(reg);
}
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ Ret();
}
void GetLocal(Decoder* decoder, Value* result,
const LocalIndexOperand<validate>& operand) {
auto& slot = __ cache_state()->stack_state[operand.index];
switch (slot.loc) {
case kRegister:
__ PushRegister(slot.reg);
break;
case kConstant:
__ cache_state()->stack_state.emplace_back(slot.i32_const);
break;
case kStack: {
Register reg = __ GetUnusedRegister(__ local_type(operand.index));
__ Fill(reg, operand.index);
__ PushRegister(reg);
} break;
}
CheckStackSizeLimit(decoder);
}
void SetLocal(uint32_t local_index, bool is_tee) {
auto& state = *__ cache_state();
auto& source_slot = state.stack_state.back();
auto& target_slot = state.stack_state[local_index];
switch (source_slot.loc) {
case kRegister:
__ DropStackSlot(&target_slot);
target_slot = source_slot;
if (is_tee) state.inc_used(target_slot.reg);
break;
case kConstant:
__ DropStackSlot(&target_slot);
target_slot = source_slot;
break;
case kStack: {
switch (target_slot.loc) {
case kRegister:
if (state.register_use_count[target_slot.reg.code()] == 1) {
__ Fill(target_slot.reg, state.stack_height() - 1);
break;
} else {
state.dec_used(target_slot.reg);
// and fall through to use a new register.
}
case kConstant:
case kStack: {
Register target_reg =
__ GetUnusedRegister(__ local_type(local_index));
__ Fill(target_reg, state.stack_height() - 1);
target_slot = LiftoffAssembler::VarState(target_reg);
state.inc_used(target_reg);
} break;
}
break;
}
}
if (!is_tee) __ cache_state()->stack_state.pop_back();
}
void SetLocal(Decoder* decoder, const Value& value,
const LocalIndexOperand<validate>& operand) {
SetLocal(operand.index, false);
}
void TeeLocal(Decoder* decoder, const Value& value, Value* result,
const LocalIndexOperand<validate>& operand) {
SetLocal(operand.index, true);
}
void GetGlobal(Decoder* decoder, Value* result,
const GlobalIndexOperand<validate>& operand) {
unsupported(decoder, "get_global");
/*
auto* global = &env_->module->globals[operand.index];
Address global_addr =
reinterpret_cast<Address>(env_->globals_start + global->offset);
if (global->type != kWasmI32) return unsupported(decoder, "non-i32 global");
Register dst = __ GetUnusedRegister(global->type);
__ Load(dst, global_addr, RelocInfo::WASM_GLOBAL_REFERENCE);
__ PushRegister(dst);
*/
}
void SetGlobal(Decoder* decoder, const Value& value,
const GlobalIndexOperand<validate>& operand) {
unsupported(decoder, "set_global");
/*
auto* global = &env_->module->globals[operand.index];
Address global_addr =
reinterpret_cast<Address>(env_->globals_start + global->offset);
if (global->type != kWasmI32) return unsupported(decoder, "non-i32 global");
LiftoffAssembler::PinnedRegisterScope pinned_regs;
Register reg = pinned_regs.pin(__ PopToRegister(global->type));
__ Store(global_addr, reg, pinned_regs, RelocInfo::WASM_GLOBAL_REFERENCE);
*/
}
void Unreachable(Decoder* decoder) { unsupported(decoder, "unreachable"); }
void Select(Decoder* decoder, const Value& cond, const Value& fval,
const Value& tval, Value* result) {
unsupported(decoder, "select");
}
void BreakTo(Decoder* decoder, Control* target) {
if (!target->merge.reached) {
target->label_state.InitMerge(*__ cache_state(), __ num_locals(),
target->break_arity());
}
__ MergeStackWith(target->label_state, target->break_arity());
__ jmp(target->label.get());
}
void BrIf(Decoder* decoder, const Value& cond, Control* target) {
Label cont_false;
Register value = __ PopToRegister(kWasmI32);
__ JumpIfZero(value, &cont_false);
BreakTo(decoder, target);
__ bind(&cont_false);
}
void BrTable(Decoder* decoder, const BranchTableOperand<validate>& operand,
const Value& key) {
unsupported(decoder, "br_table");
}
void Else(Decoder* decoder, Control* if_block) {
unsupported(decoder, "else");
}
void LoadMem(Decoder* decoder, ValueType type, MachineType mem_type,
const MemoryAccessOperand<validate>& operand, const Value& index,
Value* result) {
unsupported(decoder, "memory load");
}
void StoreMem(Decoder* decoder, ValueType type, MachineType mem_type,
const MemoryAccessOperand<validate>& operand,
const Value& index, const Value& value) {
unsupported(decoder, "memory store");
}
void CurrentMemoryPages(Decoder* decoder, Value* result) {
unsupported(decoder, "current_memory");
}
void GrowMemory(Decoder* decoder, const Value& value, Value* result) {
unsupported(decoder, "grow_memory");
}
void CallDirect(Decoder* decoder,
const CallFunctionOperand<validate>& operand,
const Value args[], Value returns[]) {
unsupported(decoder, "call");
}
void CallIndirect(Decoder* decoder, const Value& index,
const CallIndirectOperand<validate>& operand,
const Value args[], Value returns[]) {
unsupported(decoder, "call_indirect");
}
void SimdOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
unsupported(decoder, "simd");
}
void SimdLaneOp(Decoder* decoder, WasmOpcode opcode,
const SimdLaneOperand<validate>& operand,
const Vector<Value> inputs, Value* result) {
unsupported(decoder, "simd");
}
void SimdShiftOp(Decoder* decoder, WasmOpcode opcode,
const SimdShiftOperand<validate>& operand,
const Value& input, Value* result) {
unsupported(decoder, "simd");
}
void Simd8x16ShuffleOp(Decoder* decoder,
const Simd8x16ShuffleOperand<validate>& operand,
const Value& input0, const Value& input1,
Value* result) {
unsupported(decoder, "simd");
}
void Throw(Decoder* decoder, const ExceptionIndexOperand<validate>&,
Control* block, const Vector<Value>& args) {
unsupported(decoder, "throw");
}
void CatchException(Decoder* decoder,
const ExceptionIndexOperand<validate>& operand,
Control* block, Vector<Value> caught_values) {
unsupported(decoder, "catch");
}
void AtomicOp(Decoder* decoder, WasmOpcode opcode, Vector<Value> args,
const MemoryAccessOperand<validate>& operand, Value* result) {
unsupported(decoder, "atomicop");
}
private:
LiftoffAssembler* asm_;
compiler::CallDescriptor* call_desc_;
compiler::ModuleEnv* env_;
bool ok_ = true;
// TODO(clemensh): Remove this limitation by allocating more stack space if
// needed.
static constexpr int kMaxValueStackHeight = 8;
};
} // namespace
#undef __
#undef TRACE
} // namespace wasm
} // namespace internal
} // namespace v8
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
// TODO(clemensh): Implement the LiftoffAssembler on this platform.
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_DEFS_H_
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_H_
#include "src/wasm/baseline/liftoff-assembler.h"
namespace v8 {
namespace internal {
namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
void LiftoffAssembler::Load(Register dst, Address addr,
RelocInfo::Mode reloc_mode) {}
void LiftoffAssembler::Store(Address addr, Register reg,
PinnedRegisterScope pinned_regs,
RelocInfo::Mode reloc_mode) {}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
uint32_t caller_slot_idx) {}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
wasm::ValueType type) {}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {}
#define DEFAULT_I32_BINOP(name, internal_name) \
void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
Register rhs) {}
// clang-format off
DEFAULT_I32_BINOP(sub, sub)
DEFAULT_I32_BINOP(mul, imul)
DEFAULT_I32_BINOP(and, and)
DEFAULT_I32_BINOP(or, or)
DEFAULT_I32_BINOP(xor, xor)
// clang-format on
#undef DEFAULT_I32_BINOP
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS_H_
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
// TODO(clemensh): Implement the LiftoffAssembler on this platform.
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_DEFS_H_
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_H_
#include "src/wasm/baseline/liftoff-assembler.h"
namespace v8 {
namespace internal {
namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
void LiftoffAssembler::Load(Register dst, Address addr,
RelocInfo::Mode reloc_mode) {}
void LiftoffAssembler::Store(Address addr, Register reg,
PinnedRegisterScope pinned_regs,
RelocInfo::Mode reloc_mode) {}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
uint32_t caller_slot_idx) {}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
wasm::ValueType type) {}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {}
#define DEFAULT_I32_BINOP(name, internal_name) \
void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
Register rhs) {}
// clang-format off
DEFAULT_I32_BINOP(sub, sub)
DEFAULT_I32_BINOP(mul, imul)
DEFAULT_I32_BINOP(and, and)
DEFAULT_I32_BINOP(or, or)
DEFAULT_I32_BINOP(xor, xor)
// clang-format on
#undef DEFAULT_I32_BINOP
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_MIPS64_H_
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
// TODO(clemensh): Implement the LiftoffAssembler on this platform.
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = false;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_DEFS_H_
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_H_
#include "src/wasm/baseline/liftoff-assembler.h"
namespace v8 {
namespace internal {
namespace wasm {
void LiftoffAssembler::ReserveStackSpace(uint32_t space) { USE(stack_space_); }
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {}
void LiftoffAssembler::Load(Register dst, Address addr,
RelocInfo::Mode reloc_mode) {}
void LiftoffAssembler::Store(Address addr, Register reg,
PinnedRegisterScope pinned_regs,
RelocInfo::Mode reloc_mode) {}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
uint32_t caller_slot_idx) {}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
wasm::ValueType type) {}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {}
#define DEFAULT_I32_BINOP(name, internal_name) \
void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
Register rhs) {}
// clang-format off
DEFAULT_I32_BINOP(sub, sub)
DEFAULT_I32_BINOP(mul, imul)
DEFAULT_I32_BINOP(and, and)
DEFAULT_I32_BINOP(or, or)
DEFAULT_I32_BINOP(xor, xor)
// clang-format on
#undef DEFAULT_I32_BINOP
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_PPC_H_
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_DEFS_H_
#include "src/reglist.h"
namespace v8 {
namespace internal {
namespace wasm {
static constexpr bool kLiftoffAssemblerImplementedOnThisPlatform = true;
static constexpr RegList kLiftoffAssemblerGpCacheRegs = 1 << 0 | // rax
1 << 1 | // rcx
1 << 2 | // rdx
1 << 3 | // rbx
1 << 6 | // rsi
1 << 7; // rdi
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_DEFS_H_
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_H_
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/assembler.h"
#include "src/wasm/wasm-opcodes.h"
namespace v8 {
namespace internal {
namespace wasm {
namespace liftoff {
inline Operand GetStackSlot(uint32_t index) {
// rbp-8 holds the stack marker, first stack slot is located at rbp-16.
return Operand(rbp, -16 - 8 * index);
}
} // namespace liftoff
void LiftoffAssembler::ReserveStackSpace(uint32_t space) {
stack_space_ = space;
subl(rsp, Immediate(space));
}
void LiftoffAssembler::LoadConstant(Register reg, WasmValue value) {
switch (value.type()) {
case kWasmI32:
if (value.to_i32() == 0) {
xorl(reg, reg);
} else {
movl(reg, Immediate(value.to_i32()));
}
break;
default:
UNIMPLEMENTED();
}
}
void LiftoffAssembler::Load(Register dst, Address addr,
RelocInfo::Mode reloc_mode) {
movp(dst, bit_cast<void*>(addr), reloc_mode);
movl(dst, Operand(dst, 0));
}
void LiftoffAssembler::Store(Address addr, Register reg,
PinnedRegisterScope pinned_regs,
RelocInfo::Mode reloc_mode) {
// TODO(clemensh): Change this to kPointerSizeT or something.
Register global_addr_reg = GetUnusedRegister(kWasmI32, pinned_regs);
DCHECK_NE(reg, global_addr_reg);
movp(global_addr_reg, static_cast<void*>(addr), reloc_mode);
movl(Operand(global_addr_reg, 0), reg);
}
void LiftoffAssembler::LoadCallerFrameSlot(Register dst,
uint32_t caller_slot_idx) {
movl(dst, Operand(rbp, 8 + 8 * caller_slot_idx));
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index,
wasm::ValueType type) {
DCHECK_NE(dst_index, src_index);
DCHECK_EQ(kWasmI32, type);
if (cache_state_.has_unused_register()) {
Register reg = GetUnusedRegister(type);
Fill(reg, src_index);
Spill(dst_index, reg);
} else {
pushq(liftoff::GetStackSlot(src_index));
popq(liftoff::GetStackSlot(dst_index));
}
}
void LiftoffAssembler::MoveToReturnRegister(Register reg) {
// TODO(clemensh): Handle different types here.
if (reg != rax) movl(rax, reg);
}
void LiftoffAssembler::Spill(uint32_t index, Register reg) {
// TODO(clemensh): Handle different types here.
movl(liftoff::GetStackSlot(index), reg);
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
// TODO(clemensh): Handle different types here.
movl(liftoff::GetStackSlot(index), Immediate(value.to_i32()));
}
void LiftoffAssembler::Fill(Register reg, uint32_t index) {
// TODO(clemensh): Handle different types here.
movl(reg, liftoff::GetStackSlot(index));
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
if (lhs.code() != dst.code()) {
leal(dst, Operand(lhs, rhs, times_1, 0));
} else {
addl(dst, rhs);
}
}
#define DEFAULT_I32_BINOP(name, internal_name) \
void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
Register rhs) { \
if (lhs.code() != dst.code()) { \
movl(dst, lhs); \
} \
internal_name##l(dst, rhs); \
}
// clang-format off
DEFAULT_I32_BINOP(sub, sub)
DEFAULT_I32_BINOP(mul, imul)
DEFAULT_I32_BINOP(and, and)
DEFAULT_I32_BINOP(or, or)
DEFAULT_I32_BINOP(xor, xor)
// clang-format on
#undef DEFAULT_I32_BINOP
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {
testl(reg, reg);
j(zero, label);
}
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_X64_H_
...@@ -459,6 +459,8 @@ struct ControlBase { ...@@ -459,6 +459,8 @@ struct ControlBase {
bool merge_reached = false) bool merge_reached = false)
: kind(kind), stack_depth(stack_depth), pc(pc), merge(merge_reached) {} : kind(kind), stack_depth(stack_depth), pc(pc), merge(merge_reached) {}
uint32_t break_arity() const { return is_loop() ? 0 : merge.arity; }
// Check whether the current block is reachable. // Check whether the current block is reachable.
bool reachable() const { return reachability == kReachable; } bool reachable() const { return reachability == kReachable; }
...@@ -1221,6 +1223,7 @@ class WasmFullDecoder : public WasmDecoder<validate> { ...@@ -1221,6 +1223,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
} }
inline uint32_t stack_size() const { inline uint32_t stack_size() const {
DCHECK_GE(kMaxUInt32, stack_.size());
return static_cast<uint32_t>(stack_.size()); return static_cast<uint32_t>(stack_.size());
} }
...@@ -1520,7 +1523,7 @@ class WasmFullDecoder : public WasmDecoder<validate> { ...@@ -1520,7 +1523,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
} }
// Check that label types match up. // Check that label types match up.
Control* c = control_at(target); Control* c = control_at(target);
uint32_t arity = c->is_loop() ? 0 : c->merge.arity; uint32_t arity = c->break_arity();
if (i == 0) { if (i == 0) {
br_arity = arity; br_arity = arity;
} else if (!VALIDATE(br_arity == arity)) { } else if (!VALIDATE(br_arity == arity)) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment