Commit 983ca744 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[Liftoff] Implement f32 values and operations

This CL adds support for f32 locals and parameters, and implements the
basic f32 binary operations.

R=titzer@chromium.org

Bug: v8:6600
Change-Id: Ia2d792dd1a6f7e97eab52a4ac49543b128fe3041
Reviewed-on: https://chromium-review.googlesource.com/796854Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49802}
parent 7e6bde90
......@@ -58,6 +58,12 @@ DEFAULT_I32_BINOP(xor, xor)
#undef DEFAULT_I32_BINOP
void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
} // namespace wasm
......
......@@ -58,6 +58,12 @@ DEFAULT_I32_BINOP(xor, xor)
#undef DEFAULT_I32_BINOP
void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
} // namespace wasm
......
......@@ -29,6 +29,8 @@ inline Operand GetContextOperand() { return Operand(ebp, -16); }
} // namespace liftoff
static constexpr DoubleRegister kScratchDoubleReg = xmm7;
void LiftoffAssembler::ReserveStackSpace(uint32_t space) {
stack_space_ = space;
sub(esp, Immediate(space));
......@@ -43,6 +45,9 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
mov(reg.gp(), Immediate(value.to_i32()));
}
break;
case kWasmF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
default:
UNREACHABLE();
}
......@@ -92,8 +97,14 @@ void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {
constexpr int32_t kCallerStackSlotSize = 4;
mov(dst.gp(), Operand(ebp, kCallerStackSlotSize * (caller_slot_idx + 1)));
constexpr int32_t kStackSlotSize = 4;
Operand src(ebp, kStackSlotSize * (caller_slot_idx + 1));
// TODO(clemensh): Handle different sizes here.
if (dst.is_gp()) {
mov(dst.gp(), src);
} else {
movsd(dst.fp(), src);
}
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
......@@ -109,7 +120,11 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
if (reg.gp() != eax) mov(eax, reg.gp());
// TODO(wasm): Extract the destination register from the CallDescriptor.
// TODO(wasm): Add multi-return support.
LiftoffRegister dst =
reg.is_gp() ? LiftoffRegister(eax) : LiftoffRegister(xmm1);
if (reg != dst) Move(dst, reg);
}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
......@@ -127,16 +142,37 @@ void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
mov(liftoff::GetStackSlot(index), reg.gp());
Operand dst = liftoff::GetStackSlot(index);
// TODO(clemensh): Handle different sizes here.
if (reg.is_gp()) {
mov(dst, reg.gp());
} else {
movsd(dst, reg.fp());
}
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
// TODO(clemensh): Handle different types here.
mov(liftoff::GetStackSlot(index), Immediate(value.to_i32()));
Operand dst = liftoff::GetStackSlot(index);
switch (value.type()) {
case kWasmI32:
mov(dst, Immediate(value.to_i32()));
break;
case kWasmF32:
mov(dst, Immediate(value.to_f32_boxed().get_bits()));
break;
default:
UNREACHABLE();
}
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
mov(reg.gp(), liftoff::GetStackSlot(index));
Operand src = liftoff::GetStackSlot(index);
// TODO(clemensh): Handle different sizes here.
if (reg.is_gp()) {
mov(reg.gp(), src);
} else {
movsd(reg.fp(), src);
}
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
......@@ -177,6 +213,53 @@ COMMUTATIVE_I32_BINOP(xor, xor_)
#undef DEFAULT_I32_BINOP
void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vaddss(dst, lhs, rhs);
return;
}
if (dst == rhs) {
addss(dst, lhs);
} else {
if (dst != lhs) movss(dst, lhs);
addss(dst, rhs);
}
}
void LiftoffAssembler::emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vsubss(dst, lhs, rhs);
return;
}
if (dst == rhs) {
movss(kScratchDoubleReg, rhs);
movss(dst, lhs);
subss(dst, kScratchDoubleReg);
} else {
if (dst != lhs) movss(dst, lhs);
subss(dst, rhs);
}
}
void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmulss(dst, lhs, rhs);
return;
}
if (dst == rhs) {
mulss(dst, lhs);
} else {
if (dst != lhs) movss(dst, lhs);
mulss(dst, rhs);
}
}
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {
test(reg, reg);
j(zero, label);
......
......@@ -207,6 +207,7 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister PopToRegister(RegClass, LiftoffRegList pinned = {});
void PushRegister(ValueType type, LiftoffRegister reg) {
DCHECK_EQ(reg_class_for(type), reg.reg_class());
cache_state_.inc_used(reg);
cache_state_.stack_state.emplace_back(type, reg);
}
......@@ -269,6 +270,13 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i32_or(Register dst, Register lhs, Register rhs);
inline void emit_i32_xor(Register dst, Register lhs, Register rhs);
inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
inline void emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
inline void emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
inline void JumpIfZero(Register, Label*);
////////////////////////////////////
......
......@@ -113,27 +113,32 @@ class LiftoffCompiler {
}
void ProcessParameter(uint32_t param_idx, uint32_t input_location) {
DCHECK_EQ(kWasmI32, __ local_type(param_idx));
ValueType type = __ local_type(param_idx);
RegClass rc = reg_class_for(type);
compiler::LinkageLocation param_loc =
call_desc_->GetInputLocation(input_location);
if (param_loc.IsRegister()) {
DCHECK(!param_loc.IsAnyRegister());
int reg_code = param_loc.AsRegister();
LiftoffRegister reg = LiftoffRegister(Register::from_code(reg_code));
if (kGpCacheRegList.has(reg)) {
LiftoffRegister reg =
rc == kGpReg ? LiftoffRegister(Register::from_code(reg_code))
: LiftoffRegister(DoubleRegister::from_code(reg_code));
LiftoffRegList cache_regs =
rc == kGpReg ? kGpCacheRegList : kFpCacheRegList;
if (cache_regs.has(reg)) {
// This is a cache register, just use it.
__ PushRegister(kWasmI32, reg);
__ PushRegister(type, reg);
return;
}
// No cache register. Push to the stack.
__ Spill(param_idx, reg);
__ cache_state()->stack_state.emplace_back(kWasmI32);
__ cache_state()->stack_state.emplace_back(type);
return;
}
if (param_loc.IsCallerFrameSlot()) {
LiftoffRegister tmp_reg = __ GetUnusedRegister(kGpReg);
LiftoffRegister tmp_reg = __ GetUnusedRegister(rc);
__ LoadCallerFrameSlot(tmp_reg, -param_loc.AsCallerFrameSlot());
__ PushRegister(kWasmI32, tmp_reg);
__ PushRegister(type, tmp_reg);
return;
}
UNREACHABLE();
......@@ -150,10 +155,20 @@ class LiftoffCompiler {
uint32_t num_params =
static_cast<uint32_t>(call_desc_->ParameterCount()) - 1;
for (uint32_t i = 0; i < __ num_locals(); ++i) {
// We can currently only handle i32 parameters and locals.
if (__ local_type(i) != kWasmI32) {
unsupported(decoder, "non-i32 param/local");
return;
switch (__ local_type(i)) {
case kWasmI32:
case kWasmF32:
// supported.
break;
case kWasmI64:
unsupported(decoder, "i64 param/local");
return;
case kWasmF64:
unsupported(decoder, "f64 param/local");
return;
default:
unsupported(decoder, "exotic param/local");
return;
}
}
// Input 0 is the call target, the context is at 1.
......@@ -170,12 +185,24 @@ class LiftoffCompiler {
constexpr int kFirstActualParameterIndex = kContextParameterIndex + 1;
ProcessParameter(param_idx, param_idx + kFirstActualParameterIndex);
}
// Set to a gp register, to mark this uninitialized.
LiftoffRegister zero_double_reg(Register::from_code<0>());
DCHECK(zero_double_reg.is_gp());
for (; param_idx < __ num_locals(); ++param_idx) {
ValueType type = decoder->GetLocalType(param_idx);
switch (type) {
case kWasmI32:
__ cache_state()->stack_state.emplace_back(kWasmI32, uint32_t{0});
break;
case kWasmF32:
if (zero_double_reg.is_gp()) {
// Use CacheState::unused_register directly. There must be an unused
// register, no spilling allowed here.
zero_double_reg = __ cache_state()->unused_register(kFpReg);
__ LoadConstant(zero_double_reg, WasmValue(0.f));
}
__ PushRegister(kWasmF32, zero_double_reg);
break;
default:
UNIMPLEMENTED();
}
......@@ -243,35 +270,53 @@ class LiftoffCompiler {
void UnOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& value, Value* result) {
unsupported(decoder, "unary operation");
unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
void I32BinOp(void (LiftoffAssembler::*emit_fn)(Register, Register,
Register)) {
LiftoffRegList pinned_regs;
LiftoffRegister target_reg =
pinned_regs.set(__ GetBinaryOpTargetRegister(kGpReg));
LiftoffRegister rhs_reg =
pinned_regs.set(__ PopToRegister(kGpReg, pinned_regs));
LiftoffRegister lhs_reg = __ PopToRegister(kGpReg, pinned_regs);
(asm_->*emit_fn)(target_reg.gp(), lhs_reg.gp(), rhs_reg.gp());
__ PushRegister(kWasmI32, target_reg);
}
void F32BinOp(void (LiftoffAssembler::*emit_fn)(DoubleRegister,
DoubleRegister,
DoubleRegister)) {
LiftoffRegList pinned_regs;
LiftoffRegister target_reg =
pinned_regs.set(__ GetBinaryOpTargetRegister(kFpReg));
LiftoffRegister rhs_reg =
pinned_regs.set(__ PopToRegister(kFpReg, pinned_regs));
LiftoffRegister lhs_reg = __ PopToRegister(kFpReg, pinned_regs);
(asm_->*emit_fn)(target_reg.fp(), lhs_reg.fp(), rhs_reg.fp());
__ PushRegister(kWasmF32, target_reg);
}
void BinOp(Decoder* decoder, WasmOpcode opcode, FunctionSig*,
const Value& lhs, const Value& rhs, Value* result) {
void (LiftoffAssembler::*emit_fn)(Register, Register, Register);
#define CASE_EMIT_FN(opcode, fn) \
case WasmOpcode::kExpr##opcode: \
emit_fn = &LiftoffAssembler::emit_##fn; \
break;
#define CASE_BINOP(opcode, type, fn) \
case WasmOpcode::kExpr##opcode: \
return type##BinOp(&LiftoffAssembler::emit_##fn);
switch (opcode) {
CASE_EMIT_FN(I32Add, i32_add)
CASE_EMIT_FN(I32Sub, i32_sub)
CASE_EMIT_FN(I32Mul, i32_mul)
CASE_EMIT_FN(I32And, i32_and)
CASE_EMIT_FN(I32Ior, i32_or)
CASE_EMIT_FN(I32Xor, i32_xor)
CASE_BINOP(I32Add, I32, i32_add)
CASE_BINOP(I32Sub, I32, i32_sub)
CASE_BINOP(I32Mul, I32, i32_mul)
CASE_BINOP(I32And, I32, i32_and)
CASE_BINOP(I32Ior, I32, i32_or)
CASE_BINOP(I32Xor, I32, i32_xor)
CASE_BINOP(F32Add, F32, f32_add)
CASE_BINOP(F32Sub, F32, f32_sub)
CASE_BINOP(F32Mul, F32, f32_mul)
default:
return unsupported(decoder, WasmOpcodes::OpcodeName(opcode));
}
#undef CASE_EMIT_FN
LiftoffRegList pinned;
LiftoffRegister target_reg =
pinned.set(__ GetBinaryOpTargetRegister(kGpReg));
LiftoffRegister rhs_reg = pinned.set(__ PopToRegister(kGpReg, pinned));
LiftoffRegister lhs_reg = __ PopToRegister(kGpReg, pinned);
(asm_->*emit_fn)(target_reg.gp(), lhs_reg.gp(), rhs_reg.gp());
__ PushRegister(kWasmI32, target_reg);
#undef CASE_BINOP
}
void I32Const(Decoder* decoder, Value* result, int32_t value) {
......@@ -282,9 +327,14 @@ class LiftoffCompiler {
void I64Const(Decoder* decoder, Value* result, int64_t value) {
unsupported(decoder, "i64.const");
}
void F32Const(Decoder* decoder, Value* result, float value) {
unsupported(decoder, "f32.const");
LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF32, reg);
CheckStackSizeLimit(decoder);
}
void F64Const(Decoder* decoder, Value* result, double value) {
unsupported(decoder, "f64.const");
}
......@@ -303,10 +353,8 @@ class LiftoffCompiler {
}
if (!values.is_empty()) {
if (values.size() > 1) return unsupported(decoder, "multi-return");
// TODO(clemensh): Handle other types.
if (values[0].type != kWasmI32)
return unsupported(decoder, "non-i32 return");
LiftoffRegister reg = __ PopToRegister(kGpReg);
RegClass rc = reg_class_for(values[0].type);
LiftoffRegister reg = __ PopToRegister(rc);
__ MoveToReturnRegister(reg);
}
__ LeaveFrame(StackFrame::WASM_COMPILED);
......
......@@ -28,6 +28,13 @@ static inline constexpr RegClass reg_class_for(ValueType type) {
: kNoReg; // other (unsupported) types
}
// RegForClass<rc>: Register for rc==kGpReg, DoubleRegister for rc==kFpReg, void
// for all other values of rc.
template <RegClass rc>
using RegForClass = typename std::conditional<
rc == kGpReg, Register,
typename std::conditional<rc == kFpReg, DoubleRegister, void>::type>::type;
// Maximum code of a gp cache register.
static constexpr int kMaxGpRegCode =
8 * sizeof(kLiftoffAssemblerGpCacheRegs) -
......
......@@ -58,6 +58,12 @@ DEFAULT_I32_BINOP(xor, xor)
#undef DEFAULT_I32_BINOP
void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
} // namespace wasm
......
......@@ -58,6 +58,12 @@ DEFAULT_I32_BINOP(xor, xor)
#undef DEFAULT_I32_BINOP
void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
} // namespace wasm
......
......@@ -58,6 +58,12 @@ DEFAULT_I32_BINOP(xor, xor)
#undef DEFAULT_I32_BINOP
void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
} // namespace wasm
......
......@@ -58,6 +58,12 @@ DEFAULT_I32_BINOP(xor, xor)
#undef DEFAULT_I32_BINOP
void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {}
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {}
} // namespace wasm
......
......@@ -43,6 +43,9 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value) {
movl(reg.gp(), Immediate(value.to_i32()));
}
break;
case kWasmF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
default:
UNREACHABLE();
}
......@@ -106,7 +109,13 @@ void LiftoffAssembler::Store(Register dst_addr, uint32_t offset_imm,
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx) {
constexpr int32_t kStackSlotSize = 8;
movl(dst.gp(), Operand(rbp, kStackSlotSize * (caller_slot_idx + 1)));
Operand src(rbp, kStackSlotSize * (caller_slot_idx + 1));
// TODO(clemensh): Handle different sizes here.
if (dst.is_gp()) {
movq(dst.gp(), src);
} else {
Movsd(dst.fp(), src);
}
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
......@@ -122,7 +131,11 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_index, uint32_t src_index) {
}
void LiftoffAssembler::MoveToReturnRegister(LiftoffRegister reg) {
if (reg.gp() != rax) movl(rax, reg.gp());
// TODO(wasm): Extract the destination register from the CallDescriptor.
// TODO(wasm): Add multi-return support.
LiftoffRegister dst =
reg.is_gp() ? LiftoffRegister(rax) : LiftoffRegister(xmm1);
if (reg != dst) Move(dst, reg);
}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
......@@ -140,16 +153,37 @@ void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src) {
}
void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg) {
movl(liftoff::GetStackSlot(index), reg.gp());
Operand dst = liftoff::GetStackSlot(index);
// TODO(clemensh): Handle different sizes here.
if (reg.is_gp()) {
movq(dst, reg.gp());
} else {
Movsd(dst, reg.fp());
}
}
void LiftoffAssembler::Spill(uint32_t index, WasmValue value) {
// TODO(clemensh): Handle different types here.
movl(liftoff::GetStackSlot(index), Immediate(value.to_i32()));
Operand dst = liftoff::GetStackSlot(index);
switch (value.type()) {
case kWasmI32:
movl(dst, Immediate(value.to_i32()));
break;
case kWasmF32:
movl(dst, Immediate(value.to_f32_boxed().get_bits()));
break;
default:
UNREACHABLE();
}
}
void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index) {
movl(reg.gp(), liftoff::GetStackSlot(index));
Operand src = liftoff::GetStackSlot(index);
// TODO(clemensh): Handle different sizes here.
if (reg.is_gp()) {
movq(reg.gp(), src);
} else {
Movsd(reg.fp(), src);
}
}
void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) {
......@@ -188,7 +222,48 @@ COMMUTATIVE_I32_BINOP(or, or)
COMMUTATIVE_I32_BINOP(xor, xor)
// clang-format on
#undef DEFAULT_I32_BINOP
#undef COMMUTATIVE_I32_BINOP
void LiftoffAssembler::emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vaddss(dst, lhs, rhs);
} else if (dst == rhs) {
addss(dst, lhs);
} else {
if (dst != lhs) movss(dst, lhs);
addss(dst, rhs);
}
}
void LiftoffAssembler::emit_f32_sub(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vsubss(dst, lhs, rhs);
} else if (dst == rhs) {
movss(kScratchDoubleReg, rhs);
movss(dst, lhs);
subss(dst, kScratchDoubleReg);
} else {
if (dst != lhs) movss(dst, lhs);
subss(dst, rhs);
}
}
void LiftoffAssembler::emit_f32_mul(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
vmulss(dst, lhs, rhs);
} else if (dst == rhs) {
mulss(dst, lhs);
} else {
if (dst != lhs) movss(dst, lhs);
mulss(dst, rhs);
}
}
void LiftoffAssembler::JumpIfZero(Register reg, Label* label) {
testl(reg, reg);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment