Commit c354fb9c authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff] Add histogram for bailout reasons

This CL adds a new enum {LiftoffBailoutReason}, and tracks this reason
for each bailout. This will give us data to prioritize extensions of
Liftoff for new proposals or last missing instructions. Since we also
track the {kSuccess} case, we will also see what percentage of
functions can be compiled with Liftoff overall.

R=mstarzinger@chromium.org
CC=jwd@chromium.org

Change-Id: I42b6a14c5a298ddda7053c195e8b650dc1fe66dc
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1634910Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61939}
parent 2f379994
...@@ -89,7 +89,9 @@ namespace internal { ...@@ -89,7 +89,9 @@ namespace internal {
0, 100, 32) \ 0, 100, 32) \
/* number of code GCs triggered per native module, collected on code GC */ \ /* number of code GCs triggered per native module, collected on code GC */ \
HR(wasm_module_num_triggered_code_gcs, \ HR(wasm_module_num_triggered_code_gcs, \
V8.WasmModuleNumberOfCodeGCsTriggered, 1, 128, 20) V8.WasmModuleNumberOfCodeGCsTriggered, 1, 128, 20) \
/* bailout reason if Liftoff failed, or {kSuccess} (per function) */ \
HR(liftoff_bailout_reasons, V8.LiftoffBailoutReasons, 0, 20, 21)
#define HISTOGRAM_TIMER_LIST(HT) \ #define HISTOGRAM_TIMER_LIST(HT) \
/* Timer histograms, not thread safe: HT(name, caption, max, unit) */ \ /* Timer histograms, not thread safe: HT(name, caption, max, unit) */ \
......
...@@ -7,8 +7,6 @@ ...@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h" #include "src/wasm/baseline/liftoff-assembler.h"
#define BAILOUT(reason) bailout("arm " reason)
namespace v8 { namespace v8 {
namespace internal { namespace internal {
namespace wasm { namespace wasm {
...@@ -223,7 +221,7 @@ inline void EmitFloatMinOrMax(LiftoffAssembler* assm, RegisterType dst, ...@@ -223,7 +221,7 @@ inline void EmitFloatMinOrMax(LiftoffAssembler* assm, RegisterType dst,
int LiftoffAssembler::PrepareStackFrame() { int LiftoffAssembler::PrepareStackFrame() {
if (!CpuFeatures::IsSupported(ARMv7)) { if (!CpuFeatures::IsSupported(ARMv7)) {
BAILOUT("Armv6 not supported"); bailout(kUnsupportedArchitecture, "Armv6 not supported");
return 0; return 0;
} }
uint32_t offset = static_cast<uint32_t>(pc_offset()); uint32_t offset = static_cast<uint32_t>(pc_offset());
...@@ -247,7 +245,8 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, ...@@ -247,7 +245,8 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// before checking it. // before checking it.
// TODO(arm): Remove this when the stack check mechanism will be updated. // TODO(arm): Remove this when the stack check mechanism will be updated.
if (bytes > KB / 2) { if (bytes > KB / 2) {
BAILOUT("Stack limited to 512 bytes to avoid a bug in StackCheck"); bailout(kOtherReason,
"Stack limited to 512 bytes to avoid a bug in StackCheck");
return; return;
} }
#endif #endif
...@@ -750,7 +749,7 @@ void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs, ...@@ -750,7 +749,7 @@ void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero, Label* trap_div_by_zero,
Label* trap_div_unrepresentable) { Label* trap_div_unrepresentable) {
if (!CpuFeatures::IsSupported(SUDIV)) { if (!CpuFeatures::IsSupported(SUDIV)) {
BAILOUT("i32_divs"); bailout(kMissingCPUFeature, "i32_divs");
return; return;
} }
CpuFeatureScope scope(this, SUDIV); CpuFeatureScope scope(this, SUDIV);
...@@ -778,7 +777,7 @@ void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs, ...@@ -778,7 +777,7 @@ void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs, void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) { Label* trap_div_by_zero) {
if (!CpuFeatures::IsSupported(SUDIV)) { if (!CpuFeatures::IsSupported(SUDIV)) {
BAILOUT("i32_divu"); bailout(kMissingCPUFeature, "i32_divu");
return; return;
} }
CpuFeatureScope scope(this, SUDIV); CpuFeatureScope scope(this, SUDIV);
...@@ -793,7 +792,7 @@ void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs, ...@@ -793,7 +792,7 @@ void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
if (!CpuFeatures::IsSupported(SUDIV)) { if (!CpuFeatures::IsSupported(SUDIV)) {
// When this case is handled, a check for ARMv7 is required to use mls. // When this case is handled, a check for ARMv7 is required to use mls.
// Mls support is implied with SUDIV support. // Mls support is implied with SUDIV support.
BAILOUT("i32_rems"); bailout(kMissingCPUFeature, "i32_rems");
return; return;
} }
CpuFeatureScope scope(this, SUDIV); CpuFeatureScope scope(this, SUDIV);
...@@ -814,7 +813,7 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs, ...@@ -814,7 +813,7 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
if (!CpuFeatures::IsSupported(SUDIV)) { if (!CpuFeatures::IsSupported(SUDIV)) {
// When this case is handled, a check for ARMv7 is required to use mls. // When this case is handled, a check for ARMv7 is required to use mls.
// Mls support is implied with SUDIV support. // Mls support is implied with SUDIV support.
BAILOUT("i32_remu"); bailout(kMissingCPUFeature, "i32_remu");
return; return;
} }
CpuFeatureScope scope(this, SUDIV); CpuFeatureScope scope(this, SUDIV);
...@@ -1564,6 +1563,4 @@ void LiftoffStackSlots::Construct() { ...@@ -1564,6 +1563,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
#undef BAILOUT
#endif // V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_ #endif // V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_
...@@ -7,8 +7,6 @@ ...@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h" #include "src/wasm/baseline/liftoff-assembler.h"
#define BAILOUT(reason) bailout("arm64 " reason)
namespace v8 { namespace v8 {
namespace internal { namespace internal {
namespace wasm { namespace wasm {
...@@ -135,7 +133,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, ...@@ -135,7 +133,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
if (!IsImmAddSub(bytes)) { if (!IsImmAddSub(bytes)) {
// Stack greater than 4M! Because this is a quite improbable case, we // Stack greater than 4M! Because this is a quite improbable case, we
// just fallback to Turbofan. // just fallback to Turbofan.
BAILOUT("Stack too big"); bailout(kOtherReason, "Stack too big");
return; return;
} }
} }
...@@ -144,7 +142,8 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset, ...@@ -144,7 +142,8 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// before checking it. // before checking it.
// TODO(arm): Remove this when the stack check mechanism will be updated. // TODO(arm): Remove this when the stack check mechanism will be updated.
if (bytes > KB / 2) { if (bytes > KB / 2) {
BAILOUT("Stack limited to 512 bytes to avoid a bug in StackCheck"); bailout(kOtherReason,
"Stack limited to 512 bytes to avoid a bug in StackCheck");
return; return;
} }
#endif #endif
...@@ -1088,6 +1087,4 @@ void LiftoffStackSlots::Construct() { ...@@ -1088,6 +1087,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
#undef BAILOUT
#endif // V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_ #endif // V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_
...@@ -14,11 +14,11 @@ namespace v8 { ...@@ -14,11 +14,11 @@ namespace v8 {
namespace internal { namespace internal {
namespace wasm { namespace wasm {
#define REQUIRE_CPU_FEATURE(name, ...) \ #define REQUIRE_CPU_FEATURE(name, ...) \
if (!CpuFeatures::IsSupported(name)) { \ if (!CpuFeatures::IsSupported(name)) { \
bailout("no " #name); \ bailout(kMissingCPUFeature, "no " #name); \
return __VA_ARGS__; \ return __VA_ARGS__; \
} \ } \
CpuFeatureScope feature(this, name); CpuFeatureScope feature(this, name);
namespace liftoff { namespace liftoff {
...@@ -1390,7 +1390,7 @@ template <typename dst_type, typename src_type> ...@@ -1390,7 +1390,7 @@ template <typename dst_type, typename src_type>
inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst, inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
DoubleRegister src, Label* trap) { DoubleRegister src, Label* trap) {
if (!CpuFeatures::IsSupported(SSE4_1)) { if (!CpuFeatures::IsSupported(SSE4_1)) {
assm->bailout("no SSE4.1"); assm->bailout(kMissingCPUFeature, "no SSE4.1");
return true; return true;
} }
CpuFeatureScope feature(assm, SSE4_1); CpuFeatureScope feature(assm, SSE4_1);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include "src/codegen/macro-assembler.h" #include "src/codegen/macro-assembler.h"
#include "src/execution/frames.h" #include "src/execution/frames.h"
#include "src/wasm/baseline/liftoff-assembler-defs.h" #include "src/wasm/baseline/liftoff-assembler-defs.h"
#include "src/wasm/baseline/liftoff-compiler.h"
#include "src/wasm/baseline/liftoff-register.h" #include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/function-body-decoder.h" #include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-code-manager.h" #include "src/wasm/wasm-code-manager.h"
...@@ -635,13 +636,16 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -635,13 +636,16 @@ class LiftoffAssembler : public TurboAssembler {
CacheState* cache_state() { return &cache_state_; } CacheState* cache_state() { return &cache_state_; }
const CacheState* cache_state() const { return &cache_state_; } const CacheState* cache_state() const { return &cache_state_; }
bool did_bailout() { return bailout_reason_ != nullptr; } bool did_bailout() { return bailout_reason_ != kSuccess; }
const char* bailout_reason() const { return bailout_reason_; } LiftoffBailoutReason bailout_reason() const { return bailout_reason_; }
const char* bailout_detail() const { return bailout_detail_; }
void bailout(const char* reason) { void bailout(LiftoffBailoutReason reason, const char* detail) {
if (bailout_reason_ != nullptr) return; DCHECK_NE(kSuccess, reason);
if (bailout_reason_ != kSuccess) return;
AbortCompilation(); AbortCompilation();
bailout_reason_ = reason; bailout_reason_ = reason;
bailout_detail_ = detail;
} }
private: private:
...@@ -655,7 +659,8 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -655,7 +659,8 @@ class LiftoffAssembler : public TurboAssembler {
"Reconsider this inlining if ValueType gets bigger"); "Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_; CacheState cache_state_;
uint32_t num_used_spill_slots_ = 0; uint32_t num_used_spill_slots_ = 0;
const char* bailout_reason_ = nullptr; LiftoffBailoutReason bailout_reason_ = kSuccess;
const char* bailout_detail_ = nullptr;
LiftoffRegister SpillOneRegister(LiftoffRegList candidates, LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned); LiftoffRegList pinned);
......
This diff is collapsed.
...@@ -19,6 +19,38 @@ struct CompilationEnv; ...@@ -19,6 +19,38 @@ struct CompilationEnv;
struct FunctionBody; struct FunctionBody;
struct WasmFeatures; struct WasmFeatures;
// Note: If this list changes, also the histogram "V8.LiftoffBailoutReasons"
// on the chromium side needs to be updated.
// Deprecating entries is always fine. Repurposing works if you don't care about
// temporary mix-ups. Increasing the number of reasons {kNumBailoutReasons} is
// more tricky, and might require introducing a new (updated) histogram.
enum LiftoffBailoutReason : int8_t {
// Nothing actually failed.
kSuccess = 0,
// Compilation failed, but not because of Liftoff.
kDecodeError = 1,
// Liftoff is not implemented on that architecture.
kUnsupportedArchitecture = 2,
// More complex code would be needed because a CPU feature is not present.
kMissingCPUFeature = 3,
// Liftoff does not implement a complex (and rare) instruction.
kComplexOperation = 4,
// Unimplemented proposals:
kSimd = 5,
kAnyRef = 6,
kExceptionHandling = 7,
kMultiValue = 8,
kTailCall = 9,
kAtomics = 10,
kBulkMemory = 11,
kNonTrappingFloatToInt = 12,
// A little gap, for forward compatibility.
// Any other reason (use rarely; introduce new reasons if this spikes).
kOtherReason = 20,
// Marker:
kNumBailoutReasons
};
WasmCompilationResult ExecuteLiftoffCompilation( WasmCompilationResult ExecuteLiftoffCompilation(
AccountingAllocator*, CompilationEnv*, const FunctionBody&, int func_index, AccountingAllocator*, CompilationEnv*, const FunctionBody&, int func_index,
Counters*, WasmFeatures* detected_features); Counters*, WasmFeatures* detected_features);
......
...@@ -7,8 +7,6 @@ ...@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h" #include "src/wasm/baseline/liftoff-assembler.h"
#define BAILOUT(reason) bailout("mips " reason)
namespace v8 { namespace v8 {
namespace internal { namespace internal {
namespace wasm { namespace wasm {
...@@ -854,7 +852,7 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs, ...@@ -854,7 +852,7 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
BAILOUT("f32_copysign"); bailout(kComplexOperation, "f32_copysign");
} }
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
...@@ -881,7 +879,7 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs, ...@@ -881,7 +879,7 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
BAILOUT("f64_copysign"); bailout(kComplexOperation, "f64_copysign");
} }
#define FP_BINOP(name, instruction) \ #define FP_BINOP(name, instruction) \
...@@ -1026,10 +1024,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, ...@@ -1026,10 +1024,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
TurboAssembler::BranchFalseF(trap); TurboAssembler::BranchFalseF(trap);
return true; return true;
} else {
BAILOUT("emit_type_conversion kExprI32SConvertF64");
return true;
} }
bailout(kUnsupportedArchitecture, "kExprI32SConvertF64");
return true;
} }
case kExprI32UConvertF64: { case kExprI32UConvertF64: {
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
...@@ -1049,10 +1046,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, ...@@ -1049,10 +1046,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
TurboAssembler::BranchFalseF(trap); TurboAssembler::BranchFalseF(trap);
return true; return true;
} else {
BAILOUT("emit_type_conversion kExprI32UConvertF64");
return true;
} }
bailout(kUnsupportedArchitecture, "kExprI32UConvertF64");
return true;
} }
case kExprI32ReinterpretF32: case kExprI32ReinterpretF32:
mfc1(dst.gp(), src.fp()); mfc1(dst.gp(), src.fp());
...@@ -1116,26 +1112,26 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, ...@@ -1116,26 +1112,26 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
} }
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) { void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
BAILOUT("emit_i32_signextend_i8"); bailout(kComplexOperation, "i32_signextend_i8");
} }
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) { void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
BAILOUT("emit_i32_signextend_i16"); bailout(kComplexOperation, "i32_signextend_i16");
} }
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst, void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
BAILOUT("emit_i64_signextend_i8"); bailout(kComplexOperation, "i64_signextend_i8");
} }
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst, void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
BAILOUT("emit_i64_signextend_i16"); bailout(kComplexOperation, "i64_signextend_i16");
} }
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst, void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
BAILOUT("emit_i64_signextend_i32"); bailout(kComplexOperation, "i64_signextend_i32");
} }
void LiftoffAssembler::emit_jump(Label* label) { void LiftoffAssembler::emit_jump(Label* label) {
...@@ -1511,6 +1507,4 @@ void LiftoffStackSlots::Construct() { ...@@ -1511,6 +1507,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
#undef BAILOUT
#endif // V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_ #endif // V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
...@@ -7,8 +7,6 @@ ...@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h" #include "src/wasm/baseline/liftoff-assembler.h"
#define BAILOUT(reason) bailout("mips64 " reason)
namespace v8 { namespace v8 {
namespace internal { namespace internal {
namespace wasm { namespace wasm {
...@@ -742,7 +740,7 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs, ...@@ -742,7 +740,7 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
BAILOUT("f32_copysign"); bailout(kComplexOperation, "f32_copysign");
} }
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
...@@ -769,7 +767,7 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs, ...@@ -769,7 +767,7 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) { DoubleRegister rhs) {
BAILOUT("f64_copysign"); bailout(kComplexOperation, "f64_copysign");
} }
#define FP_BINOP(name, instruction) \ #define FP_BINOP(name, instruction) \
...@@ -1010,26 +1008,26 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, ...@@ -1010,26 +1008,26 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
} }
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) { void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
BAILOUT("emit_i32_signextend_i8"); bailout(kComplexOperation, "i32_signextend_i8");
} }
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) { void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
BAILOUT("emit_i32_signextend_i16"); bailout(kComplexOperation, "i32_signextend_i16");
} }
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst, void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
BAILOUT("emit_i64_signextend_i8"); bailout(kComplexOperation, "i64_signextend_i8");
} }
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst, void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
BAILOUT("emit_i64_signextend_i16"); bailout(kComplexOperation, "i64_signextend_i16");
} }
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst, void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) { LiftoffRegister src) {
BAILOUT("emit_i64_signextend_i32"); bailout(kComplexOperation, "i64_signextend_i32");
} }
void LiftoffAssembler::emit_jump(Label* label) { void LiftoffAssembler::emit_jump(Label* label) {
...@@ -1351,6 +1349,4 @@ void LiftoffStackSlots::Construct() { ...@@ -1351,6 +1349,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
#undef BAILOUT
#endif // V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_ #endif // V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
...@@ -14,11 +14,11 @@ namespace v8 { ...@@ -14,11 +14,11 @@ namespace v8 {
namespace internal { namespace internal {
namespace wasm { namespace wasm {
#define REQUIRE_CPU_FEATURE(name, ...) \ #define REQUIRE_CPU_FEATURE(name, ...) \
if (!CpuFeatures::IsSupported(name)) { \ if (!CpuFeatures::IsSupported(name)) { \
bailout("no " #name); \ bailout(kMissingCPUFeature, "no " #name); \
return __VA_ARGS__; \ return __VA_ARGS__; \
} \ } \
CpuFeatureScope feature(this, name); CpuFeatureScope feature(this, name);
namespace liftoff { namespace liftoff {
...@@ -1260,7 +1260,7 @@ template <typename dst_type, typename src_type> ...@@ -1260,7 +1260,7 @@ template <typename dst_type, typename src_type>
inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst, inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
DoubleRegister src, Label* trap) { DoubleRegister src, Label* trap) {
if (!CpuFeatures::IsSupported(SSE4_1)) { if (!CpuFeatures::IsSupported(SSE4_1)) {
assm->bailout("no SSE4.1"); assm->bailout(kMissingCPUFeature, "no SSE4.1");
return true; return true;
} }
CpuFeatureScope feature(assm, SSE4_1); CpuFeatureScope feature(assm, SSE4_1);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment