Commit c354fb9c authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff] Add histogram for bailout reasons

This CL adds a new enum {LiftoffBailoutReason}, and tracks this reason
for each bailout. This will give us data to prioritize extensions of
Liftoff for new proposals or last missing instructions. Since we also
track the {kSuccess} case, we will also see what percentage of
functions can be compiled with Liftoff overall.

R=mstarzinger@chromium.org
CC=jwd@chromium.org

Change-Id: I42b6a14c5a298ddda7053c195e8b650dc1fe66dc
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1634910Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61939}
parent 2f379994
......@@ -89,7 +89,9 @@ namespace internal {
0, 100, 32) \
/* number of code GCs triggered per native module, collected on code GC */ \
HR(wasm_module_num_triggered_code_gcs, \
V8.WasmModuleNumberOfCodeGCsTriggered, 1, 128, 20)
V8.WasmModuleNumberOfCodeGCsTriggered, 1, 128, 20) \
/* bailout reason if Liftoff failed, or {kSuccess} (per function) */ \
HR(liftoff_bailout_reasons, V8.LiftoffBailoutReasons, 0, 20, 21)
#define HISTOGRAM_TIMER_LIST(HT) \
/* Timer histograms, not thread safe: HT(name, caption, max, unit) */ \
......
......@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
#define BAILOUT(reason) bailout("arm " reason)
namespace v8 {
namespace internal {
namespace wasm {
......@@ -223,7 +221,7 @@ inline void EmitFloatMinOrMax(LiftoffAssembler* assm, RegisterType dst,
int LiftoffAssembler::PrepareStackFrame() {
if (!CpuFeatures::IsSupported(ARMv7)) {
BAILOUT("Armv6 not supported");
bailout(kUnsupportedArchitecture, "Armv6 not supported");
return 0;
}
uint32_t offset = static_cast<uint32_t>(pc_offset());
......@@ -247,7 +245,8 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// before checking it.
// TODO(arm): Remove this when the stack check mechanism will be updated.
if (bytes > KB / 2) {
BAILOUT("Stack limited to 512 bytes to avoid a bug in StackCheck");
bailout(kOtherReason,
"Stack limited to 512 bytes to avoid a bug in StackCheck");
return;
}
#endif
......@@ -750,7 +749,7 @@ void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
if (!CpuFeatures::IsSupported(SUDIV)) {
BAILOUT("i32_divs");
bailout(kMissingCPUFeature, "i32_divs");
return;
}
CpuFeatureScope scope(this, SUDIV);
......@@ -778,7 +777,7 @@ void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
if (!CpuFeatures::IsSupported(SUDIV)) {
BAILOUT("i32_divu");
bailout(kMissingCPUFeature, "i32_divu");
return;
}
CpuFeatureScope scope(this, SUDIV);
......@@ -793,7 +792,7 @@ void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
if (!CpuFeatures::IsSupported(SUDIV)) {
// When this case is handled, a check for ARMv7 is required to use mls.
// Mls support is implied with SUDIV support.
BAILOUT("i32_rems");
bailout(kMissingCPUFeature, "i32_rems");
return;
}
CpuFeatureScope scope(this, SUDIV);
......@@ -814,7 +813,7 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
if (!CpuFeatures::IsSupported(SUDIV)) {
// When this case is handled, a check for ARMv7 is required to use mls.
// Mls support is implied with SUDIV support.
BAILOUT("i32_remu");
bailout(kMissingCPUFeature, "i32_remu");
return;
}
CpuFeatureScope scope(this, SUDIV);
......@@ -1564,6 +1563,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal
} // namespace v8
#undef BAILOUT
#endif // V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_
......@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
#define BAILOUT(reason) bailout("arm64 " reason)
namespace v8 {
namespace internal {
namespace wasm {
......@@ -135,7 +133,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
if (!IsImmAddSub(bytes)) {
// Stack greater than 4M! Because this is a quite improbable case, we
// just fallback to Turbofan.
BAILOUT("Stack too big");
bailout(kOtherReason, "Stack too big");
return;
}
}
......@@ -144,7 +142,8 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
// before checking it.
// TODO(arm): Remove this when the stack check mechanism will be updated.
if (bytes > KB / 2) {
BAILOUT("Stack limited to 512 bytes to avoid a bug in StackCheck");
bailout(kOtherReason,
"Stack limited to 512 bytes to avoid a bug in StackCheck");
return;
}
#endif
......@@ -1088,6 +1087,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal
} // namespace v8
#undef BAILOUT
#endif // V8_WASM_BASELINE_ARM64_LIFTOFF_ASSEMBLER_ARM64_H_
......@@ -16,7 +16,7 @@ namespace wasm {
#define REQUIRE_CPU_FEATURE(name, ...) \
if (!CpuFeatures::IsSupported(name)) { \
bailout("no " #name); \
bailout(kMissingCPUFeature, "no " #name); \
return __VA_ARGS__; \
} \
CpuFeatureScope feature(this, name);
......@@ -1390,7 +1390,7 @@ template <typename dst_type, typename src_type>
inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
DoubleRegister src, Label* trap) {
if (!CpuFeatures::IsSupported(SSE4_1)) {
assm->bailout("no SSE4.1");
assm->bailout(kMissingCPUFeature, "no SSE4.1");
return true;
}
CpuFeatureScope feature(assm, SSE4_1);
......
......@@ -13,6 +13,7 @@
#include "src/codegen/macro-assembler.h"
#include "src/execution/frames.h"
#include "src/wasm/baseline/liftoff-assembler-defs.h"
#include "src/wasm/baseline/liftoff-compiler.h"
#include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-code-manager.h"
......@@ -635,13 +636,16 @@ class LiftoffAssembler : public TurboAssembler {
CacheState* cache_state() { return &cache_state_; }
const CacheState* cache_state() const { return &cache_state_; }
bool did_bailout() { return bailout_reason_ != nullptr; }
const char* bailout_reason() const { return bailout_reason_; }
bool did_bailout() { return bailout_reason_ != kSuccess; }
LiftoffBailoutReason bailout_reason() const { return bailout_reason_; }
const char* bailout_detail() const { return bailout_detail_; }
void bailout(const char* reason) {
if (bailout_reason_ != nullptr) return;
void bailout(LiftoffBailoutReason reason, const char* detail) {
DCHECK_NE(kSuccess, reason);
if (bailout_reason_ != kSuccess) return;
AbortCompilation();
bailout_reason_ = reason;
bailout_detail_ = detail;
}
private:
......@@ -655,7 +659,8 @@ class LiftoffAssembler : public TurboAssembler {
"Reconsider this inlining if ValueType gets bigger");
CacheState cache_state_;
uint32_t num_used_spill_slots_ = 0;
const char* bailout_reason_ = nullptr;
LiftoffBailoutReason bailout_reason_ = kSuccess;
const char* bailout_detail_ = nullptr;
LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
LiftoffRegList pinned);
......
......@@ -174,7 +174,8 @@ class LiftoffCompiler {
compilation_zone_(compilation_zone),
safepoint_table_builder_(compilation_zone_) {}
bool ok() const { return ok_; }
bool did_bailout() const { return bailout_reason_ != kSuccess; }
LiftoffBailoutReason bailout_reason() const { return bailout_reason_; }
void GetCode(CodeDesc* desc) {
asm_.GetCode(nullptr, desc, &safepoint_table_builder_,
......@@ -195,30 +196,51 @@ class LiftoffCompiler {
return __ GetTotalFrameSlotCount();
}
void unsupported(FullDecoder* decoder, const char* reason) {
ok_ = false;
TRACE("unsupported: %s\n", reason);
void unsupported(FullDecoder* decoder, LiftoffBailoutReason reason,
const char* detail) {
DCHECK_NE(kSuccess, reason);
if (did_bailout()) return;
bailout_reason_ = reason;
TRACE("unsupported: %s\n", detail);
decoder->errorf(decoder->pc_offset(), "unsupported liftoff operation: %s",
reason);
detail);
UnuseLabels(decoder);
}
bool DidAssemblerBailout(FullDecoder* decoder) {
if (decoder->failed() || !__ did_bailout()) return false;
unsupported(decoder, __ bailout_reason());
unsupported(decoder, __ bailout_reason(), __ bailout_detail());
return true;
}
LiftoffBailoutReason BailoutReasonForType(ValueType type) {
switch (type) {
case kWasmS128:
return kSimd;
case kWasmAnyRef:
case kWasmAnyFunc:
case kWasmNullRef:
return kAnyRef;
case kWasmExceptRef:
return kExceptionHandling;
case kWasmVar:
return kMultiValue;
default:
return kOtherReason;
}
}
bool CheckSupportedType(FullDecoder* decoder,
Vector<const ValueType> supported_types,
ValueType type, const char* context) {
char buffer[128];
// Check supported types.
for (ValueType supported : supported_types) {
if (type == supported) return true;
}
SNPrintF(ArrayVector(buffer), "%s %s", ValueTypes::TypeName(type), context);
unsupported(decoder, buffer);
LiftoffBailoutReason bailout_reason = BailoutReasonForType(type);
EmbeddedVector<char, 128> buffer;
SNPrintF(buffer, "%s %s", ValueTypes::TypeName(type), context);
unsupported(decoder, bailout_reason, buffer.begin());
return false;
}
......@@ -449,7 +471,7 @@ class LiftoffCompiler {
}
void OnFirstError(FullDecoder* decoder) {
ok_ = false;
if (!did_bailout()) bailout_reason_ = kDecodeError;
UnuseLabels(decoder);
asm_.AbortCompilation();
}
......@@ -481,19 +503,20 @@ class LiftoffCompiler {
}
void Try(FullDecoder* decoder, Control* block) {
unsupported(decoder, "try");
unsupported(decoder, kExceptionHandling, "try");
}
void Catch(FullDecoder* decoder, Control* block, Value* exception) {
unsupported(decoder, "catch");
unsupported(decoder, kExceptionHandling, "catch");
}
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
DCHECK_EQ(if_block, decoder->control_at(0));
DCHECK(if_block->is_if());
if (if_block->start_merge.arity > 0 || if_block->end_merge.arity > 1)
return unsupported(decoder, "multi-value if");
if (if_block->start_merge.arity > 0 || if_block->end_merge.arity > 1) {
return unsupported(decoder, kMultiValue, "multi-value if");
}
// Allocate the else state.
if_block->else_state = base::make_unique<ElseState>();
......@@ -776,6 +799,8 @@ class LiftoffCompiler {
case WasmOpcode::kExprI64Clz:
case WasmOpcode::kExprI64Ctz:
case WasmOpcode::kExprI64Popcnt:
return unsupported(decoder, kComplexOperation,
WasmOpcodes::OpcodeName(opcode));
case WasmOpcode::kExprI32SConvertSatF32:
case WasmOpcode::kExprI32UConvertSatF32:
case WasmOpcode::kExprI32SConvertSatF64:
......@@ -784,7 +809,8 @@ class LiftoffCompiler {
case WasmOpcode::kExprI64UConvertSatF32:
case WasmOpcode::kExprI64SConvertSatF64:
case WasmOpcode::kExprI64UConvertSatF64:
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
return unsupported(decoder, kNonTrappingFloatToInt,
WasmOpcodes::OpcodeName(opcode));
default:
UNREACHABLE();
}
......@@ -1118,7 +1144,8 @@ class LiftoffCompiler {
break;
case WasmOpcode::kExprI64Rol:
case WasmOpcode::kExprI64Ror:
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
return unsupported(decoder, kComplexOperation,
WasmOpcodes::OpcodeName(opcode));
default:
UNREACHABLE();
}
......@@ -1168,11 +1195,11 @@ class LiftoffCompiler {
}
void RefNull(FullDecoder* decoder, Value* result) {
unsupported(decoder, "ref_null");
unsupported(decoder, kAnyRef, "ref_null");
}
void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
unsupported(decoder, "func");
unsupported(decoder, kAnyRef, "func");
}
void Drop(FullDecoder* decoder, const Value& value) {
......@@ -1184,7 +1211,9 @@ class LiftoffCompiler {
void ReturnImpl(FullDecoder* decoder) {
size_t num_returns = decoder->sig_->return_count();
if (num_returns > 1) return unsupported(decoder, "multi-return");
if (num_returns > 1) {
return unsupported(decoder, kMultiValue, "multi-return");
}
if (num_returns > 0) __ MoveToReturnRegisters(decoder->sig_);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ DropStackSlotsAndRet(
......@@ -1313,12 +1342,12 @@ class LiftoffCompiler {
void GetTable(FullDecoder* decoder, const Value& index, Value* result,
TableIndexImmediate<validate>& imm) {
unsupported(decoder, "table_get");
unsupported(decoder, kAnyRef, "table_get");
}
void SetTable(FullDecoder* decoder, const Value& index, const Value& value,
TableIndexImmediate<validate>& imm) {
unsupported(decoder, "table_set");
unsupported(decoder, kAnyRef, "table_set");
}
void Unreachable(FullDecoder* decoder) {
......@@ -1735,12 +1764,14 @@ class LiftoffCompiler {
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[]) {
if (imm.sig->return_count() > 1)
return unsupported(decoder, "multi-return");
if (imm.sig->return_count() > 1) {
return unsupported(decoder, kMultiValue, "multi-return");
}
if (imm.sig->return_count() == 1 &&
!CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
"return"))
"return")) {
return;
}
auto call_descriptor =
compiler::GetWasmCallDescriptor(compilation_zone_, imm.sig);
......@@ -1798,10 +1829,10 @@ class LiftoffCompiler {
const CallIndirectImmediate<validate>& imm,
const Value args[], Value returns[]) {
if (imm.sig->return_count() > 1) {
return unsupported(decoder, "multi-return");
return unsupported(decoder, kMultiValue, "multi-return");
}
if (imm.table_index != 0) {
return unsupported(decoder, "table index != 0");
return unsupported(decoder, kAnyRef, "table index != 0");
}
if (imm.sig->return_count() == 1 &&
!CheckSupportedType(decoder, kSupportedTypes, imm.sig->GetReturn(0),
......@@ -1933,96 +1964,96 @@ class LiftoffCompiler {
void ReturnCall(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[]) {
unsupported(decoder, "return_call");
unsupported(decoder, kTailCall, "return_call");
}
void ReturnCallIndirect(FullDecoder* decoder, const Value& index_val,
const CallIndirectImmediate<validate>& imm,
const Value args[]) {
unsupported(decoder, "return_call_indirect");
unsupported(decoder, kTailCall, "return_call_indirect");
}
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
unsupported(decoder, "simd");
unsupported(decoder, kSimd, "simd");
}
void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdLaneImmediate<validate>& imm,
const Vector<Value> inputs, Value* result) {
unsupported(decoder, "simd");
unsupported(decoder, kSimd, "simd");
}
void SimdShiftOp(FullDecoder* decoder, WasmOpcode opcode,
const SimdShiftImmediate<validate>& imm, const Value& input,
Value* result) {
unsupported(decoder, "simd");
unsupported(decoder, kSimd, "simd");
}
void Simd8x16ShuffleOp(FullDecoder* decoder,
const Simd8x16ShuffleImmediate<validate>& imm,
const Value& input0, const Value& input1,
Value* result) {
unsupported(decoder, "simd");
unsupported(decoder, kSimd, "simd");
}
void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>&,
const Vector<Value>& args) {
unsupported(decoder, "throw");
unsupported(decoder, kExceptionHandling, "throw");
}
void Rethrow(FullDecoder* decoder, const Value& exception) {
unsupported(decoder, "rethrow");
unsupported(decoder, kExceptionHandling, "rethrow");
}
void BrOnException(FullDecoder* decoder, const Value& exception,
const ExceptionIndexImmediate<validate>& imm,
uint32_t depth, Vector<Value> values) {
unsupported(decoder, "br_on_exn");
unsupported(decoder, kExceptionHandling, "br_on_exn");
}
void AtomicOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
const MemoryAccessImmediate<validate>& imm, Value* result) {
unsupported(decoder, "atomicop");
unsupported(decoder, kAtomics, "atomicop");
}
void MemoryInit(FullDecoder* decoder,
const MemoryInitImmediate<validate>& imm, const Value& dst,
const Value& src, const Value& size) {
unsupported(decoder, "memory.init");
unsupported(decoder, kBulkMemory, "memory.init");
}
void DataDrop(FullDecoder* decoder, const DataDropImmediate<validate>& imm) {
unsupported(decoder, "data.drop");
unsupported(decoder, kBulkMemory, "data.drop");
}
void MemoryCopy(FullDecoder* decoder,
const MemoryCopyImmediate<validate>& imm, const Value& dst,
const Value& src, const Value& size) {
unsupported(decoder, "memory.copy");
unsupported(decoder, kBulkMemory, "memory.copy");
}
void MemoryFill(FullDecoder* decoder,
const MemoryIndexImmediate<validate>& imm, const Value& dst,
const Value& value, const Value& size) {
unsupported(decoder, "memory.fill");
unsupported(decoder, kBulkMemory, "memory.fill");
}
void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm,
Vector<Value> args) {
unsupported(decoder, "table.init");
unsupported(decoder, kBulkMemory, "table.init");
}
void ElemDrop(FullDecoder* decoder, const ElemDropImmediate<validate>& imm) {
unsupported(decoder, "elem.drop");
unsupported(decoder, kBulkMemory, "elem.drop");
}
void TableCopy(FullDecoder* decoder, const TableCopyImmediate<validate>& imm,
Vector<Value> args) {
unsupported(decoder, "table.copy");
unsupported(decoder, kBulkMemory, "table.copy");
}
void TableGrow(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
Value& value, Value& delta, Value* result) {
unsupported(decoder, "table.grow");
unsupported(decoder, kAnyRef, "table.grow");
}
void TableSize(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
Value* result) {
unsupported(decoder, "table.size");
unsupported(decoder, kAnyRef, "table.size");
}
void TableFill(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
Value& start, Value& value, Value& count) {
unsupported(decoder, "table.fill");
unsupported(decoder, kAnyRef, "table.fill");
}
private:
LiftoffAssembler asm_;
compiler::CallDescriptor* const descriptor_;
CompilationEnv* const env_;
bool ok_ = true;
LiftoffBailoutReason bailout_reason_ = kSuccess;
std::vector<OutOfLineCode> out_of_line_code_;
SourcePositionTableBuilder source_position_table_builder_;
std::vector<trap_handler::ProtectedInstructionData> protected_instructions_;
......@@ -2081,11 +2112,17 @@ WasmCompilationResult ExecuteLiftoffCompilation(AccountingAllocator* allocator,
decoder.Decode();
liftoff_compile_time_scope.reset();
LiftoffCompiler* compiler = &decoder.interface();
if (decoder.failed()) {
compiler->OnFirstError(&decoder);
return WasmCompilationResult{};
}
if (!compiler->ok()) {
if (decoder.failed()) compiler->OnFirstError(&decoder);
// Check that the histogram for the bailout reasons has the correct size.
DCHECK_EQ(0, counters->liftoff_bailout_reasons()->min());
DCHECK_EQ(kNumBailoutReasons - 1, counters->liftoff_bailout_reasons()->max());
DCHECK_EQ(kNumBailoutReasons,
counters->liftoff_bailout_reasons()->num_buckets());
// Register the bailout reason (can also be {kSuccess}).
counters->liftoff_bailout_reasons()->AddSample(
static_cast<int>(compiler->bailout_reason()));
if (compiler->did_bailout()) {
// Liftoff compilation failed.
counters->liftoff_unsupported_functions()->Increment();
return WasmCompilationResult{};
......
......@@ -19,6 +19,38 @@ struct CompilationEnv;
struct FunctionBody;
struct WasmFeatures;
// Note: If this list changes, also the histogram "V8.LiftoffBailoutReasons"
// on the chromium side needs to be updated.
// Deprecating entries is always fine. Repurposing works if you don't care about
// temporary mix-ups. Increasing the number of reasons {kNumBailoutReasons} is
// more tricky, and might require introducing a new (updated) histogram.
enum LiftoffBailoutReason : int8_t {
// Nothing actually failed.
kSuccess = 0,
// Compilation failed, but not because of Liftoff.
kDecodeError = 1,
// Liftoff is not implemented on that architecture.
kUnsupportedArchitecture = 2,
// More complex code would be needed because a CPU feature is not present.
kMissingCPUFeature = 3,
// Liftoff does not implement a complex (and rare) instruction.
kComplexOperation = 4,
// Unimplemented proposals:
kSimd = 5,
kAnyRef = 6,
kExceptionHandling = 7,
kMultiValue = 8,
kTailCall = 9,
kAtomics = 10,
kBulkMemory = 11,
kNonTrappingFloatToInt = 12,
// A little gap, for forward compatibility.
// Any other reason (use rarely; introduce new reasons if this spikes).
kOtherReason = 20,
// Marker:
kNumBailoutReasons
};
WasmCompilationResult ExecuteLiftoffCompilation(
AccountingAllocator*, CompilationEnv*, const FunctionBody&, int func_index,
Counters*, WasmFeatures* detected_features);
......
......@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
#define BAILOUT(reason) bailout("mips " reason)
namespace v8 {
namespace internal {
namespace wasm {
......@@ -854,7 +852,7 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
BAILOUT("f32_copysign");
bailout(kComplexOperation, "f32_copysign");
}
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
......@@ -881,7 +879,7 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
BAILOUT("f64_copysign");
bailout(kComplexOperation, "f64_copysign");
}
#define FP_BINOP(name, instruction) \
......@@ -1026,10 +1024,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
TurboAssembler::BranchFalseF(trap);
return true;
} else {
BAILOUT("emit_type_conversion kExprI32SConvertF64");
return true;
}
bailout(kUnsupportedArchitecture, "kExprI32SConvertF64");
return true;
}
case kExprI32UConvertF64: {
if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
......@@ -1049,10 +1046,9 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp());
TurboAssembler::BranchFalseF(trap);
return true;
} else {
BAILOUT("emit_type_conversion kExprI32UConvertF64");
return true;
}
bailout(kUnsupportedArchitecture, "kExprI32UConvertF64");
return true;
}
case kExprI32ReinterpretF32:
mfc1(dst.gp(), src.fp());
......@@ -1116,26 +1112,26 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
BAILOUT("emit_i32_signextend_i8");
bailout(kComplexOperation, "i32_signextend_i8");
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
BAILOUT("emit_i32_signextend_i16");
bailout(kComplexOperation, "i32_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
BAILOUT("emit_i64_signextend_i8");
bailout(kComplexOperation, "i64_signextend_i8");
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
BAILOUT("emit_i64_signextend_i16");
bailout(kComplexOperation, "i64_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
BAILOUT("emit_i64_signextend_i32");
bailout(kComplexOperation, "i64_signextend_i32");
}
void LiftoffAssembler::emit_jump(Label* label) {
......@@ -1511,6 +1507,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal
} // namespace v8
#undef BAILOUT
#endif // V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_
......@@ -7,8 +7,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
#define BAILOUT(reason) bailout("mips64 " reason)
namespace v8 {
namespace internal {
namespace wasm {
......@@ -742,7 +740,7 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
BAILOUT("f32_copysign");
bailout(kComplexOperation, "f32_copysign");
}
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
......@@ -769,7 +767,7 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
BAILOUT("f64_copysign");
bailout(kComplexOperation, "f64_copysign");
}
#define FP_BINOP(name, instruction) \
......@@ -1010,26 +1008,26 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
BAILOUT("emit_i32_signextend_i8");
bailout(kComplexOperation, "i32_signextend_i8");
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
BAILOUT("emit_i32_signextend_i16");
bailout(kComplexOperation, "i32_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
LiftoffRegister src) {
BAILOUT("emit_i64_signextend_i8");
bailout(kComplexOperation, "i64_signextend_i8");
}
void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
LiftoffRegister src) {
BAILOUT("emit_i64_signextend_i16");
bailout(kComplexOperation, "i64_signextend_i16");
}
void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
LiftoffRegister src) {
BAILOUT("emit_i64_signextend_i32");
bailout(kComplexOperation, "i64_signextend_i32");
}
void LiftoffAssembler::emit_jump(Label* label) {
......@@ -1351,6 +1349,4 @@ void LiftoffStackSlots::Construct() {
} // namespace internal
} // namespace v8
#undef BAILOUT
#endif // V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
......@@ -16,7 +16,7 @@ namespace wasm {
#define REQUIRE_CPU_FEATURE(name, ...) \
if (!CpuFeatures::IsSupported(name)) { \
bailout("no " #name); \
bailout(kMissingCPUFeature, "no " #name); \
return __VA_ARGS__; \
} \
CpuFeatureScope feature(this, name);
......@@ -1260,7 +1260,7 @@ template <typename dst_type, typename src_type>
inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
DoubleRegister src, Label* trap) {
if (!CpuFeatures::IsSupported(SSE4_1)) {
assm->bailout("no SSE4.1");
assm->bailout(kMissingCPUFeature, "no SSE4.1");
return true;
}
CpuFeatureScope feature(assm, SSE4_1);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment