Commit c354fb9c authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff] Add histogram for bailout reasons

This CL adds a new enum {LiftoffBailoutReason}, and tracks this reason
for each bailout. This will give us data to prioritize extensions of
Liftoff for new proposals or last missing instructions. Since we also
track the {kSuccess} case, we will also see what percentage of
functions can be compiled with Liftoff overall.

R=mstarzinger@chromium.org
CC=jwd@chromium.org

Change-Id: I42b6a14c5a298ddda7053c195e8b650dc1fe66dc
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1634910Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61939}
parent 2f379994
......@@ -89,7 +89,9 @@ namespace internal {
0, 100, 32) \
/* number of code GCs triggered per native module, collected on code GC */ \
HR(wasm_module_num_triggered_code_gcs, \
V8.WasmModuleNumberOfCodeGCsTriggered, 1, 128, 20)
V8.WasmModuleNumberOfCodeGCsTriggered, 1, 128, 20) \
/* bailout reason if Liftoff failed, or {kSuccess} (per function) */ \
HR(liftoff_bailout_reasons, V8.LiftoffBailoutReasons, 0, 20, 21)
#define HISTOGRAM_TIMER_LIST(HT) \
/* Timer histograms, not thread safe: HT(name, caption, max, unit) */ \
......
......@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
#define BAILOUT(reason) bailout("arm " reason)
namespace v8 {
namespace internal {
namespace wasm {
......@@ -223,7 +221,7 @@ inline void EmitFloatMinOrMax(LiftoffAssembler* assm, RegisterType dst,
int LiftoffAssembler::PrepareStackFrame() {
if (!CpuFeatures::IsSupported(ARMv7)) {
BAILOUT("Armv6 not supported");
bailout(kUnsupportedArchitecture, "Armv6 not supported");
return 0;
}
uint32_t offset = static_cast<uint32_t>(pc_offset());
......@@ -247,7 +245,8 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// before checking it.
// TODO(arm): Remove this when the stack check mechanism will be updated.
if (bytes > KB / 2) {
BAILOUT("Stack limited to 512 bytes to avoid a bug in StackCheck");
bailout(kOtherReason,
"Stack limited to 512 bytes to avoid a bug in StackCheck");
return;
}
#endif
......@@ -750,7 +749,7 @@ void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
if (!CpuFeatures::IsSupported(SUDIV)) {
BAILOUT("i32_divs");
bailout(kMissingCPUFeature, "i32_divs");
return;
}
CpuFeatureScope scope(this, SUDIV);
......@@ -778,7 +777,7 @@ void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
if (!CpuFeatures::IsSupported(SUDIV)) {
BAILOUT("i32_divu");
bailout(kMissingCPUFeature, "i32_divu");
return;
}
CpuFeatureScope scope(this, SUDIV);
......@@ -793,7 +792,7 @@ void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
if (!CpuFeatures::IsSupported(SUDIV)) {
// When this case is handled, a check for ARMv7 is required to use mls.
// Mls support is implied with SUDIV support.
BAILOUT("i32_rems");
bailout(kMissingCPUFeature, "i32_rems");
return;
}
CpuFeatureScope scope(this, SUDIV);
......@@ -814,7 +813,7 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
if (!CpuFeatures::IsSupported(SUDIV)) {
// When this case is handled, a check for ARMv7 is required to use mls.
// Mls support is implied with SUDIV support.
BAILOUT("i32_remu");
bailout(kMissingCPUFeature, "i32_remu");
return;
}
CpuFeatureScope scope(this, SUDIV);
......@@ -1564,6 +1563,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal
} // namespace v8
#undef BAILOUT
#endif // V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_
......@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
#define BAILOUT(reason) bailout("arm64 " reason)
namespace v8 {
namespace internal {
namespace wasm {
......@@ -135,7 +133,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
if (!IsImmAddSub(bytes)) {
// Stack greater than 4M! Because this is a quite improbable case, we
// just fallback to Turbofan.
BAILOUT("Stack too big");
bailout(kOtherReason, "Stack too big");
return;
}
}
......@@ -144,7 +142,8 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// before checking it.
// TODO(arm): Remove this when the stack check mechanism will be updated.
if (bytes > KB / 2) {
BAILOUT("Stack limited to 512 bytes to avoid a bug in StackCheck");
bailout(kOtherReason,
"Stack limited to 512 bytes to avoid a bug in StackCheck");
return;
}
#endif
......@@ -1088,6 +1087,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal
} // namespace v8
#undef BAILOUT
#endif // V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_
......@@ -14,11 +14,11 @@ namespace v8 {
namespace internal {
namespace wasm {
#define REQUIRE_CPU_FEATURE(name, ...) \
if (!CpuFeatures::IsSupported(name)) { \
bailout("no " #name); \
return __VA_ARGS__; \
} \
#define REQUIRE_CPU_FEATURE(name, ...) \
if (!CpuFeatures::IsSupported(name)) { \
bailout(kMissingCPUFeature, "no " #name); \
return __VA_ARGS__; \
} \
CpuFeatureScope feature(this, name);
namespace liftoff {
......@@ -1390,7 +1390,7 @@ template <typename dst_type, typename src_type>
inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
DoubleRegister src, Label* trap) {
if (!CpuFeatures::IsSupported(SSE4_1)) {
assm->bailout("no SSE4.1");
assm->bailout(kMissingCPUFeature, "no SSE4.1");
return true;
}
CpuFeatureScope feature(assm, SSE4_1);
......
......@@ -13,6 +13,7 @@
#include "src/codegen/macro-assembler.h"
#include "src/execution/frames.h"
#include "src/wasm/baseline/liftoff-assembler-defs.h"
#include "src/wasm/baseline/liftoff-compiler.h"
#include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-code-manager.h"
......@@ -635,13 +636,16 @@ class LiftoffAssembler : public TurboAssembler {
CacheState* cache_state() { return &cache_state_; }
const CacheState* cache_state() const { return &cache_state_; }
bool did_bailout() { return bailout_reason_ != nullptr; }
const char* bailout_reason() const { return bailout_reason_; }
bool did_bailout() { return bailout_reason_ != kSuccess; }
LiftoffBailoutReason bailout_reason() const { return bailout_reason_; }
const char* bailout_detail() const { return bailout_detail_; }
void bailout(const char* reason) {
if (bailout_reason_ != nullptr) return;
void bailout(LiftoffBailoutReason reason, const char* detail) {
DCHECK_NE(kSuccess, reason);
if (bailout_reason_ != kSuccess) return;
AbortCompilation();
bailout_reason_ = reason;
bailout_detail_ = detail;
}
private:
......@@ -655,7 +659,8 @@ class LiftoffAssembler : public TurboAssembler {
"Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_;
uint32_t num_used_spill_slots_ = 0;
const char* bailout_reason_ = nullptr;
LiftoffBailoutReason bailout_reason_ = kSuccess;
const char* bailout_detail_ = nullptr;
LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned);
......
This diff is collapsed.
......@@ -19,6 +19,38 @@ struct CompilationEnv;
struct FunctionBody;
struct WasmFeatures;
// Note: If this list changes, also the histogram "V8.LiftoffBailoutReasons"
// on the chromium side needs to be updated.
// Deprecating entries is always fine. Repurposing works if you don't care about
// temporary mix-ups. Increasing the number of reasons {kNumBailoutReasons} is
// more tricky, and might require introducing a new (updated) histogram.
enum LiftoffBailoutReason : int8_t {
// Nothing actually failed.
kSuccess = 0,
// Compilation failed, but not because of Liftoff.
kDecodeError = 1,
// Liftoff is not implemented on that architecture.
kUnsupportedArchitecture = 2,
// More complex code would be needed because a CPU feature is not present.
kMissingCPUFeature = 3,
// Liftoff does not implement a complex (and rare) instruction.
kComplexOperation = 4,
// Unimplemented proposals:
kSimd = 5,
kAnyRef = 6,
kExceptionHandling = 7,
kMultiValue = 8,
kTailCall = 9,
kAtomics = 10,
kBulkMemory = 11,
kNonTrappingFloatToInt = 12,
// A little gap, for forward compatibility.
// Any other reason (use rarely; introduce new reasons if this spikes).
kOtherReason = 20,
// Marker:
kNumBailoutReasons
};
WasmCompilationResult ExecuteLiftoffCompilation(
AccountingAllocator*, CompilationEnv*, const FunctionBody&, int func_index,
Counters*, WasmFeatures* detected_features);
......
......@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
#define BAILOUT(reason) bailout("mips " reason)
namespace v8 {
namespace internal {
namespace wasm {
......@@ -854,7 +852,7 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
BAILOUT("f32_copysign");
bailout(kComplexOperation, "f32_copysign");
}
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
......@@ -881,7 +879,7 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
BAILOUT("f64_copysign");
bailout(kComplexOperation, "f64_copysign");
}
#define FP_BINOP(name, instruction) \
......@@ -1026,10 +1024,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
TurboAssembler::BranchFalseF(trap);
return true;
} else {
BAILOUT("emit_type_conversion kExprI32SConvertF64");
return true;
}
bailout(kUnsupportedArchitecture, "kExprI32SConvertF64");
return true;
}
case kExprI32UConvertF64: {
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
......@@ -1049,10 +1046,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
TurboAssembler::BranchFalseF(trap);
return true;
} else {
BAILOUT("emit_type_conversion kExprI32UConvertF64");
return true;
}
bailout(kUnsupportedArchitecture, "kExprI32UConvertF64");
return true;
}
case kExprI32ReinterpretF32:
mfc1(dst.gp(), src.fp());
......@@ -1116,26 +1112,26 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
BAILOUT("emit_i32_signextend_i8");
bailout(kComplexOperation, "i32_signextend_i8");
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
BAILOUT("emit_i32_signextend_i16");
bailout(kComplexOperation, "i32_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
BAILOUT("emit_i64_signextend_i8");
bailout(kComplexOperation, "i64_signextend_i8");
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
BAILOUT("emit_i64_signextend_i16");
bailout(kComplexOperation, "i64_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
BAILOUT("emit_i64_signextend_i32");
bailout(kComplexOperation, "i64_signextend_i32");
}
void LiftoffAssembler::emit_jump(Label* label) {
......@@ -1511,6 +1507,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal
} // namespace v8
#undef BAILOUT
#endif // V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
......@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
#define BAILOUT(reason) bailout("mips64 " reason)
namespace v8 {
namespace internal {
namespace wasm {
......@@ -742,7 +740,7 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
BAILOUT("f32_copysign");
bailout(kComplexOperation, "f32_copysign");
}
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
......@@ -769,7 +767,7 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
BAILOUT("f64_copysign");
bailout(kComplexOperation, "f64_copysign");
}
#define FP_BINOP(name, instruction) \
......@@ -1010,26 +1008,26 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
BAILOUT("emit_i32_signextend_i8");
bailout(kComplexOperation, "i32_signextend_i8");
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
BAILOUT("emit_i32_signextend_i16");
bailout(kComplexOperation, "i32_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
BAILOUT("emit_i64_signextend_i8");
bailout(kComplexOperation, "i64_signextend_i8");
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
BAILOUT("emit_i64_signextend_i16");
bailout(kComplexOperation, "i64_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
BAILOUT("emit_i64_signextend_i32");
bailout(kComplexOperation, "i64_signextend_i32");
}
void LiftoffAssembler::emit_jump(Label* label) {
......@@ -1351,6 +1349,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal
} // namespace v8
#undef BAILOUT
#endif // V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
......@@ -14,11 +14,11 @@ namespace v8 {
namespace internal {
namespace wasm {
#define REQUIRE_CPU_FEATURE(name, ...) \
if (!CpuFeatures::IsSupported(name)) { \
bailout("no " #name); \
return __VA_ARGS__; \
} \
#define REQUIRE_CPU_FEATURE(name, ...) \
if (!CpuFeatures::IsSupported(name)) { \
bailout(kMissingCPUFeature, "no " #name); \
return __VA_ARGS__; \
} \
CpuFeatureScope feature(this, name);
namespace liftoff {
......@@ -1260,7 +1260,7 @@ template <typename dst_type, typename src_type>
inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
DoubleRegister src, Label* trap) {
if (!CpuFeatures::IsSupported(SSE4_1)) {
assm->bailout("no SSE4.1");
assm->bailout(kMissingCPUFeature, "no SSE4.1");
return true;
}
CpuFeatureScope feature(assm, SSE4_1);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment