Commit 8d2d0513 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[wasm] Simplify C calls

Instead of passing multiple pointers to input and output, or to two
input values, just pass one pointer which holds all inputs and where
the output is written.
This also reduces the size of generated Turbofan graphs, since only one
stack slot is needed and less arguments are passed to the call.
It also fixes undefined behaviour, since we were passing a pointer e.g.
as {uint64_t*}, but accessed it using {ReadUnalignedValue}. Now we pass
an Address, which does not have any alignment constraints.

R=ahaas@chromium.org

Bug: v8:3770, v8:6600
Change-Id: I54ef80b7e27f77587a9062560c0b3e01d6593e6d
Reviewed-on: https://chromium-review.googlesource.com/1019147
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Cr-Commit-Position: refs/heads/master@{#52702}
parent cccc7abf
...@@ -1922,25 +1922,22 @@ Node* WasmGraphBuilder::BuildIntToFloatConversionInstruction( ...@@ -1922,25 +1922,22 @@ Node* WasmGraphBuilder::BuildIntToFloatConversionInstruction(
Node* input, ExternalReference ref, Node* input, ExternalReference ref,
MachineRepresentation parameter_representation, MachineRepresentation parameter_representation,
const MachineType result_type) { const MachineType result_type) {
Node* stack_slot_param = graph()->NewNode( int stack_slot_size =
jsgraph()->machine()->StackSlot(parameter_representation)); 1 << std::max(ElementSizeLog2Of(parameter_representation),
Node* stack_slot_result = graph()->NewNode( ElementSizeLog2Of(result_type.representation()));
jsgraph()->machine()->StackSlot(result_type.representation())); Node* stack_slot =
graph()->NewNode(jsgraph()->machine()->StackSlot(stack_slot_size));
const Operator* store_op = jsgraph()->machine()->Store( const Operator* store_op = jsgraph()->machine()->Store(
StoreRepresentation(parameter_representation, kNoWriteBarrier)); StoreRepresentation(parameter_representation, kNoWriteBarrier));
*effect_ = *effect_ = graph()->NewNode(store_op, stack_slot, jsgraph()->Int32Constant(0),
graph()->NewNode(store_op, stack_slot_param, jsgraph()->Int32Constant(0), input, *effect_, *control_);
input, *effect_, *control_); MachineType sig_types[] = {MachineType::Pointer()};
MachineSignature::Builder sig_builder(jsgraph()->zone(), 0, 2); MachineSignature sig(0, 1, sig_types);
sig_builder.AddParam(MachineType::Pointer());
sig_builder.AddParam(MachineType::Pointer());
Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref)); Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
BuildCCall(sig_builder.Build(), function, stack_slot_param, BuildCCall(&sig, function, stack_slot);
stack_slot_result);
const Operator* load_op = jsgraph()->machine()->Load(result_type); const Operator* load_op = jsgraph()->machine()->Load(result_type);
Node* load = Node* load = graph()->NewNode(
graph()->NewNode(load_op, stack_slot_result, jsgraph()->Int32Constant(0), load_op, stack_slot, jsgraph()->Int32Constant(0), *effect_, *control_);
*effect_, *control_);
*effect_ = load; *effect_ = load;
return load; return load;
} }
...@@ -1979,26 +1976,24 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input, ...@@ -1979,26 +1976,24 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
const MachineType int_ty = IntConvertType(opcode); const MachineType int_ty = IntConvertType(opcode);
const MachineType float_ty = FloatConvertType(opcode); const MachineType float_ty = FloatConvertType(opcode);
ExternalReference call_ref = convert_ccall_ref(this, opcode); ExternalReference call_ref = convert_ccall_ref(this, opcode);
Node* stack_slot_param = graph()->NewNode( int stack_slot_size =
jsgraph()->machine()->StackSlot(float_ty.representation())); 1 << std::max(ElementSizeLog2Of(int_ty.representation()),
Node* stack_slot_result = graph()->NewNode( ElementSizeLog2Of(float_ty.representation()));
jsgraph()->machine()->StackSlot(int_ty.representation())); Node* stack_slot =
graph()->NewNode(jsgraph()->machine()->StackSlot(stack_slot_size));
const Operator* store_op = jsgraph()->machine()->Store( const Operator* store_op = jsgraph()->machine()->Store(
StoreRepresentation(float_ty.representation(), kNoWriteBarrier)); StoreRepresentation(float_ty.representation(), kNoWriteBarrier));
*effect_ = graph()->NewNode(store_op, stack_slot_param, Int32Constant(0), *effect_ = graph()->NewNode(store_op, stack_slot, Int32Constant(0), input,
input, *effect_, *control_); *effect_, *control_);
MachineSignature::Builder sig_builder(jsgraph()->zone(), 1, 2); MachineType sig_types[] = {MachineType::Int32(), MachineType::Pointer()};
sig_builder.AddReturn(MachineType::Int32()); MachineSignature sig(1, 1, sig_types);
sig_builder.AddParam(MachineType::Pointer());
sig_builder.AddParam(MachineType::Pointer());
Node* function = Node* function =
graph()->NewNode(jsgraph()->common()->ExternalConstant(call_ref)); graph()->NewNode(jsgraph()->common()->ExternalConstant(call_ref));
Node* overflow = BuildCCall(sig_builder.Build(), function, stack_slot_param, Node* overflow = BuildCCall(&sig, function, stack_slot);
stack_slot_result);
if (IsTrappingConvertOp(opcode)) { if (IsTrappingConvertOp(opcode)) {
ZeroCheck32(wasm::kTrapFloatUnrepresentable, overflow, position); ZeroCheck32(wasm::kTrapFloatUnrepresentable, overflow, position);
const Operator* load_op = jsgraph()->machine()->Load(int_ty); const Operator* load_op = jsgraph()->machine()->Load(int_ty);
Node* load = graph()->NewNode(load_op, stack_slot_result, Int32Constant(0), Node* load = graph()->NewNode(load_op, stack_slot, Int32Constant(0),
*effect_, *control_); *effect_, *control_);
*effect_ = load; *effect_ = load;
return load; return load;
...@@ -2015,8 +2010,8 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input, ...@@ -2015,8 +2010,8 @@ Node* WasmGraphBuilder::BuildCcallConvertFloat(Node* input,
Node* sat_val = Node* sat_val =
sat_d.Phi(int_ty.representation(), Min(this, int_ty), Max(this, int_ty)); sat_d.Phi(int_ty.representation(), Min(this, int_ty), Max(this, int_ty));
const Operator* load_op = jsgraph()->machine()->Load(int_ty); const Operator* load_op = jsgraph()->machine()->Load(int_ty);
Node* load = graph()->NewNode(load_op, stack_slot_result, Int32Constant(0), Node* load = graph()->NewNode(load_op, stack_slot, Int32Constant(0), *effect_,
*effect_, *control_); *control_);
Node* nan_val = Node* nan_val =
nan_d.Phi(int_ty.representation(), Zero(this, int_ty), sat_val); nan_d.Phi(int_ty.representation(), Zero(this, int_ty), sat_val);
return tl_d.Phi(int_ty.representation(), nan_val, load); return tl_d.Phi(int_ty.representation(), nan_val, load);
......
...@@ -286,6 +286,7 @@ class ExternalReference BASE_EMBEDDED { ...@@ -286,6 +286,7 @@ class ExternalReference BASE_EMBEDDED {
Address address_; Address address_;
}; };
ASSERT_TRIVIALLY_COPYABLE(ExternalReference);
V8_EXPORT_PRIVATE bool operator==(ExternalReference, ExternalReference); V8_EXPORT_PRIVATE bool operator==(ExternalReference, ExternalReference);
bool operator!=(ExternalReference, ExternalReference); bool operator!=(ExternalReference, ExternalReference);
......
...@@ -276,34 +276,14 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { ...@@ -276,34 +276,14 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
BAILOUT("DropStackSlotsAndRet"); BAILOUT("DropStackSlotsAndRet");
} }
void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig, void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* args, const LiftoffRegister* args,
ValueType out_argument_type) { const LiftoffRegister* rets,
BAILOUT("PrepareCCall"); ValueType out_argument_type, int stack_bytes,
} ExternalReference ext_ref) {
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, int param_byte_offset,
ValueType type) {
BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(int stack_param_idx,
int param_byte_offset,
ValueType type) {
BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
int param_byte_offset) {
BAILOUT("LoadCCallOutArgument");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
BAILOUT("CallC"); BAILOUT("CallC");
} }
void LiftoffAssembler::FinishCCall() { BAILOUT("FinishCCall"); }
void LiftoffAssembler::CallNativeWasmCode(Address addr) { void LiftoffAssembler::CallNativeWasmCode(Address addr) {
BAILOUT("CallNativeWasmCode"); BAILOUT("CallNativeWasmCode");
} }
......
...@@ -407,34 +407,14 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { ...@@ -407,34 +407,14 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
Ret(); Ret();
} }
void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig, void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* args, const LiftoffRegister* args,
ValueType out_argument_type) { const LiftoffRegister* rets,
BAILOUT("PrepareCCall"); ValueType out_argument_type, int stack_bytes,
} ExternalReference ext_ref) {
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, int param_byte_offset,
ValueType type) {
BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(int stack_param_idx,
int param_byte_offset,
ValueType type) {
BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
int param_byte_offset) {
BAILOUT("LoadCCallOutArgument");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
BAILOUT("CallC"); BAILOUT("CallC");
} }
void LiftoffAssembler::FinishCCall() { BAILOUT("FinishCCall"); }
void LiftoffAssembler::CallNativeWasmCode(Address addr) { void LiftoffAssembler::CallNativeWasmCode(Address addr) {
BAILOUT("CallNativeWasmCode"); BAILOUT("CallNativeWasmCode");
} }
......
...@@ -45,11 +45,6 @@ static_assert(kByteRegs.GetNumRegsSet() == 4, "should have four byte regs"); ...@@ -45,11 +45,6 @@ static_assert(kByteRegs.GetNumRegsSet() == 4, "should have four byte regs");
static_assert((kByteRegs & kGpCacheRegList) == kByteRegs, static_assert((kByteRegs & kGpCacheRegList) == kByteRegs,
"kByteRegs only contains gp cache registers"); "kByteRegs only contains gp cache registers");
// Use this register to store the address of the last argument pushed on the
// stack for a call to C. This register must be callee saved according to the c
// calling convention.
static constexpr Register kCCallLastArgAddrReg = ebx;
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src, inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
ValueType type) { ValueType type) {
switch (type) { switch (type) {
...@@ -67,6 +62,28 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src, ...@@ -67,6 +62,28 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
} }
} }
inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
LiftoffRegister src, ValueType type) {
Operand dst(base, offset);
switch (type) {
case kWasmI32:
assm->mov(dst, src.gp());
break;
case kWasmI64:
assm->mov(dst, src.low_gp());
assm->mov(Operand(base, offset + 4), src.high_gp());
break;
case kWasmF32:
assm->movss(dst, src.fp());
break;
case kWasmF64:
assm->movsd(dst, src.fp());
break;
default:
UNREACHABLE();
}
}
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) { inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
switch (type) { switch (type) {
case kWasmI32: case kWasmI32:
...@@ -1314,59 +1331,50 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { ...@@ -1314,59 +1331,50 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
ret(static_cast<int>(num_stack_slots * kPointerSize)); ret(static_cast<int>(num_stack_slots * kPointerSize));
} }
void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig, void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* args, const LiftoffRegister* args,
ValueType out_argument_type) { const LiftoffRegister* rets,
int pushed_bytes = 0; ValueType out_argument_type, int stack_bytes,
ExternalReference ext_ref) {
sub(esp, Immediate(stack_bytes));
int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) { for (ValueType param_type : sig->parameters()) {
pushed_bytes += RoundUp<kPointerSize>(WasmOpcodes::MemSize(param_type)); liftoff::Store(this, esp, arg_bytes, *args++, param_type);
liftoff::push(this, *args++, param_type); arg_bytes += WasmOpcodes::MemSize(param_type);
} }
if (out_argument_type != kWasmStmt) { DCHECK_LE(arg_bytes, stack_bytes);
int size = RoundUp<kPointerSize>(WasmOpcodes::MemSize(out_argument_type));
sub(esp, Immediate(size)); constexpr Register kScratch = eax;
pushed_bytes += size; constexpr Register kArgumentBuffer = ecx;
constexpr int kNumCCallArgs = 1;
mov(kArgumentBuffer, esp);
PrepareCallCFunction(kNumCCallArgs, kScratch);
// Pass a pointer to the buffer with the arguments to the C function. ia32
// does not use registers here, so push to the stack.
mov(Operand(esp, 0), kArgumentBuffer);
// Now call the C function.
CallCFunction(ext_ref, kNumCCallArgs);
// Move return value to the right register.
const LiftoffRegister* next_result_reg = rets;
if (sig->return_count() > 0) {
DCHECK_EQ(1, sig->return_count());
constexpr Register kReturnReg = eax;
if (kReturnReg != next_result_reg->gp()) {
Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0));
}
++next_result_reg;
} }
// Save the original sp (before the first push), such that we can later
// compute pointers to the pushed values. Do this only *after* pushing the
// values, because {kCCallLastArgAddrReg} might collide with an arg register.
lea(liftoff::kCCallLastArgAddrReg, Operand(esp, pushed_bytes));
constexpr Register kScratch = ecx;
static_assert(kScratch != liftoff::kCCallLastArgAddrReg, "collision");
int num_c_call_arguments = static_cast<int>(sig->parameter_count()) +
(out_argument_type != kWasmStmt);
PrepareCallCFunction(num_c_call_arguments, kScratch);
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, int param_byte_offset,
ValueType type) {
// Check that we don't accidentally override kCCallLastArgAddrReg.
DCHECK_NE(liftoff::kCCallLastArgAddrReg, dst);
lea(dst, Operand(liftoff::kCCallLastArgAddrReg, -param_byte_offset));
}
void LiftoffAssembler::SetCCallStackParamAddr(int stack_param_idx,
int param_byte_offset,
ValueType type) {
static constexpr Register kScratch = ecx;
SetCCallRegParamAddr(kScratch, param_byte_offset, type);
mov(Operand(esp, stack_param_idx * kPointerSize), kScratch);
}
void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
int param_byte_offset) {
// Check that we don't accidentally override kCCallLastArgAddrReg.
DCHECK_NE(LiftoffRegister(liftoff::kCCallLastArgAddrReg), dst);
Operand src(liftoff::kCCallLastArgAddrReg, -param_byte_offset);
liftoff::Load(this, dst, src, type);
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) { // Load potential output value from the buffer on the stack.
CallCFunction(ext_ref, static_cast<int>(num_params)); if (out_argument_type != kWasmStmt) {
} liftoff::Load(this, *next_result_reg, Operand(esp, 0), out_argument_type);
}
void LiftoffAssembler::FinishCCall() { add(esp, Immediate(stack_bytes));
mov(esp, liftoff::kCCallLastArgAddrReg);
} }
void LiftoffAssembler::CallNativeWasmCode(Address addr) { void LiftoffAssembler::CallNativeWasmCode(Address addr) {
......
...@@ -505,25 +505,14 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -505,25 +505,14 @@ class LiftoffAssembler : public TurboAssembler {
inline void DropStackSlotsAndRet(uint32_t num_stack_slots); inline void DropStackSlotsAndRet(uint32_t num_stack_slots);
// {PrepareCCall} pushes the arguments on the stack (in the caller frame), // Execute a C call. Arguments are pushed to the stack and a pointer to this
// then aligns the stack to do a c call. Pointers to the pushed arguments are // region is passed to the C function. If {out_argument_type != kWasmStmt},
// later loaded to registers or stack slots via {SetCCall*ParamAddr}. After // this is the return value of the C function, stored in {rets[0]}. Further
// the c call, the output parameter (if it exists) can be loaded via // outputs (specified in {sig->returns()}) are read from the buffer and stored
// {LoadCCallOutArgument}. {FinishCCall} resets the stack pointer to the state // in the remaining {rets} registers.
// before {PrepareCCall}. inline void CallC(wasm::FunctionSig* sig, const LiftoffRegister* args,
// The {FunctionSig} passed to {PrepareCCall} describes the types of const LiftoffRegister* rets, ValueType out_argument_type,
// parameters which are then passed ot the C function via pointers, excluding int stack_bytes, ExternalReference ext_ref);
// the out argument.
inline void PrepareCCall(wasm::FunctionSig* sig, const LiftoffRegister* args,
ValueType out_argument_type);
inline void SetCCallRegParamAddr(Register dst, int param_byte_offset,
ValueType type);
inline void SetCCallStackParamAddr(int stack_param_idx, int param_byte_offset,
ValueType type);
inline void LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
int param_byte_offset);
inline void CallC(ExternalReference ext_ref, uint32_t num_params);
inline void FinishCCall();
inline void CallNativeWasmCode(Address addr); inline void CallNativeWasmCode(Address addr);
inline void CallRuntime(Zone* zone, Runtime::FunctionId fid); inline void CallRuntime(Zone* zone, Runtime::FunctionId fid);
......
...@@ -572,90 +572,20 @@ class LiftoffCompiler { ...@@ -572,90 +572,20 @@ class LiftoffCompiler {
ValueType out_argument_type, ValueType out_argument_type,
const LiftoffRegister* arg_regs, const LiftoffRegister* arg_regs,
ExternalReference ext_ref) { ExternalReference ext_ref) {
static constexpr int kMaxReturns = 1;
static constexpr int kMaxArgs = 2;
static constexpr MachineType kReps[]{
MachineType::Uint32(), MachineType::Pointer(), MachineType::Pointer()};
static_assert(arraysize(kReps) == kMaxReturns + kMaxArgs, "mismatch");
const bool has_out_argument = out_argument_type != kWasmStmt;
const uint32_t num_returns = static_cast<uint32_t>(sig->return_count());
// {total_num_args} is {num_args + 1} if the return value is stored in an
// out parameter, or {num_args} otherwise.
const uint32_t num_args = static_cast<uint32_t>(sig->parameter_count());
const uint32_t total_num_args = num_args + has_out_argument;
DCHECK_LE(num_args, kMaxArgs);
DCHECK_LE(num_returns, kMaxReturns);
MachineSignature machine_sig(num_returns, total_num_args,
kReps + (kMaxReturns - num_returns));
auto* call_descriptor = compiler::Linkage::GetSimplifiedCDescriptor(
compilation_zone_, &machine_sig);
// Before making a call, spill all cache registers. // Before making a call, spill all cache registers.
__ SpillAllRegisters(); __ SpillAllRegisters();
// Store arguments on our stack, then align the stack for calling to C. // Store arguments on our stack, then align the stack for calling to C.
__ PrepareCCall(sig, arg_regs, out_argument_type); int param_bytes = 0;
for (ValueType param_type : sig->parameters()) {
// The arguments to the c function are pointers to the stack slots we just param_bytes += WasmOpcodes::MemSize(param_type);
// pushed.
int num_stack_params = 0; // Number of stack parameters.
int input_idx = 1; // Input 0 is the call target.
int param_byte_offset = 0; // Byte offset into the pushed arguments.
auto add_argument = [&](ValueType arg_type) {
compiler::LinkageLocation loc =
call_descriptor->GetInputLocation(input_idx);
param_byte_offset +=
RoundUp<kPointerSize>(WasmOpcodes::MemSize(arg_type));
++input_idx;
if (loc.IsRegister()) {
Register reg = Register::from_code(loc.AsRegister());
// Load address of that parameter to the register.
__ SetCCallRegParamAddr(reg, param_byte_offset, arg_type);
} else {
DCHECK(loc.IsCallerFrameSlot());
__ SetCCallStackParamAddr(num_stack_params, param_byte_offset,
arg_type);
++num_stack_params;
}
};
for (ValueType arg_type : sig->parameters()) {
add_argument(arg_type);
}
if (has_out_argument) {
add_argument(out_argument_type);
}
DCHECK_EQ(input_idx, call_descriptor->InputCount());
// Now execute the call.
uint32_t c_call_arg_count =
static_cast<uint32_t>(sig->parameter_count()) + has_out_argument;
__ CallC(ext_ref, c_call_arg_count);
// Reset the stack pointer.
__ FinishCCall();
// Load return value.
const LiftoffRegister* next_result_reg = result_regs;
if (sig->return_count() > 0) {
DCHECK_EQ(1, sig->return_count());
compiler::LinkageLocation return_loc =
call_descriptor->GetReturnLocation(0);
DCHECK(return_loc.IsRegister());
Register return_reg = Register::from_code(return_loc.AsRegister());
if (return_reg != next_result_reg->gp()) {
__ Move(*next_result_reg, LiftoffRegister(return_reg),
sig->GetReturn(0));
}
++next_result_reg;
}
// Load potential return value from output argument.
if (has_out_argument) {
__ LoadCCallOutArgument(*next_result_reg, out_argument_type,
param_byte_offset);
} }
int out_arg_bytes = out_argument_type == kWasmStmt
? 0
: WasmOpcodes::MemSize(out_argument_type);
int stack_bytes = std::max(param_bytes, out_arg_bytes);
__ CallC(sig, arg_regs, result_regs, out_argument_type, stack_bytes,
ext_ref);
} }
template <ValueType src_type, ValueType result_type, class EmitFn> template <ValueType src_type, ValueType result_type, class EmitFn>
......
...@@ -33,11 +33,6 @@ inline MemOperand GetHalfStackSlot(uint32_t half_index) { ...@@ -33,11 +33,6 @@ inline MemOperand GetHalfStackSlot(uint32_t half_index) {
inline MemOperand GetInstanceOperand() { return MemOperand(fp, -8); } inline MemOperand GetInstanceOperand() { return MemOperand(fp, -8); }
// Use this register to store the address of the last argument pushed on the
// stack for a call to C. This register must be callee saved according to the c
// calling convention.
static constexpr Register kCCallLastArgAddrReg = s1;
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src, inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
ValueType type) { ValueType type) {
switch (type) { switch (type) {
...@@ -55,6 +50,28 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src, ...@@ -55,6 +50,28 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
} }
} }
inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
LiftoffRegister src, ValueType type) {
MemOperand dst(base, offset);
switch (type) {
case kWasmI32:
assm->Usw(src.gp(), dst);
break;
case kWasmI64:
assm->Usw(src.low_gp(), dst);
assm->Usw(src.high_gp(), MemOperand(base, offset + 4));
break;
case kWasmF32:
assm->Uswc1(src.fp(), dst, t8);
break;
case kWasmF64:
assm->Usdc1(src.fp(), dst, t8);
break;
default:
UNREACHABLE();
}
}
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) { inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
switch (type) { switch (type) {
case kWasmI32: case kWasmI32:
...@@ -1110,59 +1127,48 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { ...@@ -1110,59 +1127,48 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots)); TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
} }
void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig, void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* args, const LiftoffRegister* args,
ValueType out_argument_type) { const LiftoffRegister* rets,
int pushed_bytes = 0; ValueType out_argument_type, int stack_bytes,
ExternalReference ext_ref) {
addiu(sp, sp, -stack_bytes);
int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) { for (ValueType param_type : sig->parameters()) {
pushed_bytes += RoundUp<kPointerSize>(WasmOpcodes::MemSize(param_type)); liftoff::Store(this, sp, arg_bytes, *args++, param_type);
liftoff::push(this, *args++, param_type); arg_bytes += WasmOpcodes::MemSize(param_type);
} }
if (out_argument_type != kWasmStmt) { DCHECK_LE(arg_bytes, stack_bytes);
int size = RoundUp<kPointerSize>(WasmOpcodes::MemSize(out_argument_type));
addiu(sp, sp, -size);
pushed_bytes += size;
}
// Save the original sp (before the first push), such that we can later
// compute pointers to the pushed values. Do this only *after* pushing the
// values, because {kCCallLastArgAddrReg} might collide with an arg register.
addiu(liftoff::kCCallLastArgAddrReg, sp, pushed_bytes);
constexpr Register kScratch = at;
static_assert(kScratch != liftoff::kCCallLastArgAddrReg, "collision");
int num_c_call_arguments = static_cast<int>(sig->parameter_count()) +
(out_argument_type != kWasmStmt);
PrepareCallCFunction(num_c_call_arguments, kScratch);
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, int param_byte_offset, // Pass a pointer to the buffer with the arguments to the C function.
ValueType type) { // On mips, the first argument is passed in {a0}.
// Check that we don't accidentally override kCCallLastArgAddrReg. constexpr Register kFirstArgReg = a0;
DCHECK_NE(liftoff::kCCallLastArgAddrReg, dst); mov(kFirstArgReg, sp);
addiu(dst, liftoff::kCCallLastArgAddrReg, -param_byte_offset);
}
void LiftoffAssembler::SetCCallStackParamAddr(int stack_param_idx,
int param_byte_offset,
ValueType type) {
static constexpr Register kScratch = at;
SetCCallRegParamAddr(kScratch, param_byte_offset, type);
sw(kScratch, MemOperand(sp, stack_param_idx * kPointerSize));
}
void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type, // Now call the C function.
int param_byte_offset) { constexpr Register kScratch = at;
// Check that we don't accidentally override kCCallLastArgAddrReg. constexpr int kNumCCallArgs = 1;
DCHECK_NE(LiftoffRegister(liftoff::kCCallLastArgAddrReg), dst); PrepareCallCFunction(kNumCCallArgs, kScratch);
MemOperand src(liftoff::kCCallLastArgAddrReg, -param_byte_offset); CallCFunction(ext_ref, kNumCCallArgs);
liftoff::Load(this, dst, src, type);
} // Move return value to the right register.
const LiftoffRegister* next_result_reg = rets;
if (sig->return_count() > 0) {
DCHECK_EQ(1, sig->return_count());
constexpr Register kReturnReg = v0;
if (kReturnReg != next_result_reg->gp()) {
Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0));
}
++next_result_reg;
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) { // Load potential output value from the buffer on the stack.
CallCFunction(ext_ref, static_cast<int>(num_params)); if (out_argument_type != kWasmStmt) {
} liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_type);
}
void LiftoffAssembler::FinishCCall() { addiu(sp, sp, stack_bytes);
TurboAssembler::Move(sp, liftoff::kCCallLastArgAddrReg);
} }
void LiftoffAssembler::CallNativeWasmCode(Address addr) { void LiftoffAssembler::CallNativeWasmCode(Address addr) {
......
...@@ -28,11 +28,6 @@ inline MemOperand GetStackSlot(uint32_t index) { ...@@ -28,11 +28,6 @@ inline MemOperand GetStackSlot(uint32_t index) {
inline MemOperand GetInstanceOperand() { return MemOperand(fp, -16); } inline MemOperand GetInstanceOperand() { return MemOperand(fp, -16); }
// Use this register to store the address of the last argument pushed on the
// stack for a call to C. This register must be callee saved according to the c
// calling convention.
static constexpr Register kCCallLastArgAddrReg = s1;
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src, inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
ValueType type) { ValueType type) {
switch (type) { switch (type) {
...@@ -53,6 +48,27 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src, ...@@ -53,6 +48,27 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
} }
} }
inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
LiftoffRegister src, ValueType type) {
MemOperand dst(base, offset);
switch (type) {
case kWasmI32:
assm->Usw(src.gp(), dst);
break;
case kWasmI64:
assm->Usd(src.gp(), dst);
break;
case kWasmF32:
assm->Uswc1(src.fp(), dst, t8);
break;
case kWasmF64:
assm->Usdc1(src.fp(), dst, t8);
break;
default:
UNREACHABLE();
}
}
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) { inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
switch (type) { switch (type) {
case kWasmI32: case kWasmI32:
...@@ -889,56 +905,48 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { ...@@ -889,56 +905,48 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots)); TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
} }
void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig, void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* args, const LiftoffRegister* args,
ValueType out_argument_type) { const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
ExternalReference ext_ref) {
daddiu(sp, sp, -stack_bytes);
int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) { for (ValueType param_type : sig->parameters()) {
liftoff::push(this, *args++, param_type); liftoff::Store(this, sp, arg_bytes, *args++, param_type);
arg_bytes += WasmOpcodes::MemSize(param_type);
} }
if (out_argument_type != kWasmStmt) { DCHECK_LE(arg_bytes, stack_bytes);
daddiu(sp, sp, -kPointerSize);
}
// Save the original sp (before the first push), such that we can later
// compute pointers to the pushed values. Do this only *after* pushing the
// values, because {kCCallLastArgAddrReg} might collide with an arg register.
int num_c_call_arguments = static_cast<int>(sig->parameter_count()) +
(out_argument_type != kWasmStmt);
int pushed_bytes = kPointerSize * num_c_call_arguments;
daddiu(liftoff::kCCallLastArgAddrReg, sp, pushed_bytes);
constexpr Register kScratch = at;
static_assert(kScratch != liftoff::kCCallLastArgAddrReg, "collision");
PrepareCallCFunction(num_c_call_arguments, kScratch);
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, int param_byte_offset,
ValueType type) {
// Check that we don't accidentally override kCCallLastArgAddrReg.
DCHECK_NE(liftoff::kCCallLastArgAddrReg, dst);
daddiu(dst, liftoff::kCCallLastArgAddrReg, -param_byte_offset);
}
void LiftoffAssembler::SetCCallStackParamAddr(int stack_param_idx, // Pass a pointer to the buffer with the arguments to the C function.
int param_byte_offset, // On mips, the first argument is passed in {a0}.
ValueType type) { constexpr Register kFirstArgReg = a0;
static constexpr Register kScratch = at; mov(kFirstArgReg, sp);
SetCCallRegParamAddr(kScratch, param_byte_offset, type);
sd(kScratch, MemOperand(sp, stack_param_idx * kPointerSize));
}
void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type, // Now call the C function.
int param_byte_offset) { constexpr Register kScratch = at;
// Check that we don't accidentally override kCCallLastArgAddrReg. constexpr int kNumCCallArgs = 1;
DCHECK_NE(LiftoffRegister(liftoff::kCCallLastArgAddrReg), dst); PrepareCallCFunction(kNumCCallArgs, kScratch);
MemOperand src(liftoff::kCCallLastArgAddrReg, -param_byte_offset); CallCFunction(ext_ref, kNumCCallArgs);
liftoff::Load(this, dst, src, type);
} // Move return value to the right register.
const LiftoffRegister* next_result_reg = rets;
if (sig->return_count() > 0) {
DCHECK_EQ(1, sig->return_count());
constexpr Register kReturnReg = v0;
if (kReturnReg != next_result_reg->gp()) {
Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0));
}
++next_result_reg;
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) { // Load potential output value from the buffer on the stack.
CallCFunction(ext_ref, static_cast<int>(num_params)); if (out_argument_type != kWasmStmt) {
} liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_type);
}
void LiftoffAssembler::FinishCCall() { daddiu(sp, sp, stack_bytes);
TurboAssembler::Move(sp, liftoff::kCCallLastArgAddrReg);
} }
void LiftoffAssembler::CallNativeWasmCode(Address addr) { void LiftoffAssembler::CallNativeWasmCode(Address addr) {
......
...@@ -281,34 +281,14 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { ...@@ -281,34 +281,14 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
BAILOUT("DropStackSlotsAndRet"); BAILOUT("DropStackSlotsAndRet");
} }
void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig, void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* args, const LiftoffRegister* args,
ValueType out_argument_type) { const LiftoffRegister* rets,
BAILOUT("PrepareCCall"); ValueType out_argument_type, int stack_bytes,
} ExternalReference ext_ref) {
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, int param_byte_offset,
ValueType type) {
BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(int stack_param_idx,
int param_byte_offset,
ValueType type) {
BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
int param_byte_offset) {
BAILOUT("LoadCCallOutArgument");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
BAILOUT("CallC"); BAILOUT("CallC");
} }
void LiftoffAssembler::FinishCCall() { BAILOUT("FinishCCall"); }
void LiftoffAssembler::CallNativeWasmCode(Address addr) { void LiftoffAssembler::CallNativeWasmCode(Address addr) {
BAILOUT("CallNativeWasmCode"); BAILOUT("CallNativeWasmCode");
} }
......
...@@ -281,34 +281,14 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { ...@@ -281,34 +281,14 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
BAILOUT("DropStackSlotsAndRet"); BAILOUT("DropStackSlotsAndRet");
} }
void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig, void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* args, const LiftoffRegister* args,
ValueType out_argument_type) { const LiftoffRegister* rets,
BAILOUT("PrepareCCall"); ValueType out_argument_type, int stack_bytes,
} ExternalReference ext_ref) {
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, int param_byte_offset,
ValueType type) {
BAILOUT("SetCCallRegParamAddr");
}
void LiftoffAssembler::SetCCallStackParamAddr(int stack_param_idx,
int param_byte_offset,
ValueType type) {
BAILOUT("SetCCallStackParamAddr");
}
void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
int param_byte_offset) {
BAILOUT("LoadCCallOutArgument");
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
BAILOUT("CallC"); BAILOUT("CallC");
} }
void LiftoffAssembler::FinishCCall() { BAILOUT("FinishCCall"); }
void LiftoffAssembler::CallNativeWasmCode(Address addr) { void LiftoffAssembler::CallNativeWasmCode(Address addr) {
BAILOUT("CallNativeWasmCode"); BAILOUT("CallNativeWasmCode");
} }
......
...@@ -34,11 +34,6 @@ inline Operand GetStackSlot(uint32_t index) { ...@@ -34,11 +34,6 @@ inline Operand GetStackSlot(uint32_t index) {
// TODO(clemensh): Make this a constexpr variable once Operand is constexpr. // TODO(clemensh): Make this a constexpr variable once Operand is constexpr.
inline Operand GetInstanceOperand() { return Operand(rbp, -16); } inline Operand GetInstanceOperand() { return Operand(rbp, -16); }
// Use this register to store the address of the last argument pushed on the
// stack for a call to C. This register must be callee saved according to the c
// calling convention.
static constexpr Register kCCallLastArgAddrReg = rbx;
inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset, inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
uint32_t offset_imm, LiftoffRegList pinned) { uint32_t offset_imm, LiftoffRegList pinned) {
// Wasm memory is limited to a size <2GB, so all offsets can be encoded as // Wasm memory is limited to a size <2GB, so all offsets can be encoded as
...@@ -69,6 +64,26 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src, ...@@ -69,6 +64,26 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
} }
} }
inline void Store(LiftoffAssembler* assm, Operand dst, LiftoffRegister src,
ValueType type) {
switch (type) {
case kWasmI32:
assm->movl(dst, src.gp());
break;
case kWasmI64:
assm->movq(dst, src.gp());
break;
case kWasmF32:
assm->Movss(dst, src.fp());
break;
case kWasmF64:
assm->Movsd(dst, src.fp());
break;
default:
UNREACHABLE();
}
}
inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) { inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
switch (type) { switch (type) {
case kWasmI32: case kWasmI32:
...@@ -1108,53 +1123,52 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { ...@@ -1108,53 +1123,52 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
ret(static_cast<int>(num_stack_slots * kPointerSize)); ret(static_cast<int>(num_stack_slots * kPointerSize));
} }
void LiftoffAssembler::PrepareCCall(wasm::FunctionSig* sig, void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* args, const LiftoffRegister* args,
ValueType out_argument_type) { const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
ExternalReference ext_ref) {
subp(rsp, Immediate(stack_bytes));
int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) { for (ValueType param_type : sig->parameters()) {
liftoff::push(this, *args++, param_type); liftoff::Store(this, Operand(rsp, arg_bytes), *args++, param_type);
arg_bytes += WasmOpcodes::MemSize(param_type);
} }
if (out_argument_type != kWasmStmt) { DCHECK_LE(arg_bytes, stack_bytes);
subq(rsp, Immediate(kPointerSize));
// Pass a pointer to the buffer with the arguments to the C function.
// On win64, the first argument is in {rcx}, otherwise it is {rdi}.
#ifdef _WIN64
constexpr Register kFirstArgReg = rcx;
#else
constexpr Register kFirstArgReg = rdi;
#endif
movp(kFirstArgReg, rsp);
constexpr int kNumCCallArgs = 1;
// Now call the C function.
PrepareCallCFunction(kNumCCallArgs);
CallCFunction(ext_ref, kNumCCallArgs);
// Move return value to the right register.
const LiftoffRegister* next_result_reg = rets;
if (sig->return_count() > 0) {
DCHECK_EQ(1, sig->return_count());
constexpr Register kReturnReg = rax;
if (kReturnReg != next_result_reg->gp()) {
Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0));
}
++next_result_reg;
} }
// Save the original sp (before the first push), such that we can later
// compute pointers to the pushed values. Do this only *after* pushing the
// values, because {kCCallLastArgAddrReg} might collide with an arg register.
int num_c_call_arguments = static_cast<int>(sig->parameter_count()) +
(out_argument_type != kWasmStmt);
int pushed_bytes = kPointerSize * num_c_call_arguments;
leaq(liftoff::kCCallLastArgAddrReg, Operand(rsp, pushed_bytes));
PrepareCallCFunction(num_c_call_arguments);
}
void LiftoffAssembler::SetCCallRegParamAddr(Register dst, int param_byte_offset,
ValueType type) {
// Check that we don't accidentally override kCCallLastArgAddrReg.
DCHECK_NE(liftoff::kCCallLastArgAddrReg, dst);
leaq(dst, Operand(liftoff::kCCallLastArgAddrReg, -param_byte_offset));
}
void LiftoffAssembler::SetCCallStackParamAddr(int stack_param_idx, // Load potential output value from the buffer on the stack.
int param_byte_offset, if (out_argument_type != kWasmStmt) {
ValueType type) { liftoff::Load(this, *next_result_reg, Operand(rsp, 0), out_argument_type);
// On x64, all C call arguments fit in registers. }
UNREACHABLE();
}
void LiftoffAssembler::LoadCCallOutArgument(LiftoffRegister dst, ValueType type,
int param_byte_offset) {
// Check that we don't accidentally override kCCallLastArgAddrReg.
DCHECK_NE(LiftoffRegister(liftoff::kCCallLastArgAddrReg), dst);
Operand src(liftoff::kCCallLastArgAddrReg, -param_byte_offset);
liftoff::Load(this, dst, src, type);
}
void LiftoffAssembler::CallC(ExternalReference ext_ref, uint32_t num_params) {
CallCFunction(ext_ref, static_cast<int>(num_params));
}
void LiftoffAssembler::FinishCCall() { addp(rsp, Immediate(stack_bytes));
movp(rsp, liftoff::kCCallLastArgAddrReg);
} }
void LiftoffAssembler::CallNativeWasmCode(Address addr) { void LiftoffAssembler::CallNativeWasmCode(Address addr) {
......
...@@ -62,11 +62,15 @@ void f64_nearest_int_wrapper(double* param) { ...@@ -62,11 +62,15 @@ void f64_nearest_int_wrapper(double* param) {
WriteDoubleValue(param, nearbyint(ReadDoubleValue(param))); WriteDoubleValue(param, nearbyint(ReadDoubleValue(param)));
} }
void int64_to_float32_wrapper(int64_t* input, float* output) { void int64_to_float32_wrapper(Address data) {
*output = static_cast<float>(ReadUnalignedInt64(input)); int64_t input = ReadUnalignedValue<int64_t>(data);
WriteUnalignedValue<float>(data, static_cast<float>(input));
} }
void uint64_to_float32_wrapper(uint64_t* input, float* output) { void uint64_to_float32_wrapper(Address data) {
uint64_t input = ReadUnalignedValue<uint64_t>(data);
float result = static_cast<float>(input);
#if V8_CC_MSVC #if V8_CC_MSVC
// With MSVC we use static_cast<float>(uint32_t) instead of // With MSVC we use static_cast<float>(uint32_t) instead of
// static_cast<float>(uint64_t) to achieve round-to-nearest-ties-even // static_cast<float>(uint64_t) to achieve round-to-nearest-ties-even
...@@ -75,8 +79,8 @@ void uint64_to_float32_wrapper(uint64_t* input, float* output) { ...@@ -75,8 +79,8 @@ void uint64_to_float32_wrapper(uint64_t* input, float* output) {
// achieve proper rounding in all cases we have to adjust the high_word // achieve proper rounding in all cases we have to adjust the high_word
// with a "rounding bit" sometimes. The rounding bit is stored in the LSB of // with a "rounding bit" sometimes. The rounding bit is stored in the LSB of
// the high_word if the low_word may affect the rounding of the high_word. // the high_word if the low_word may affect the rounding of the high_word.
uint32_t low_word = static_cast<uint32_t>(*input & 0xFFFFFFFF); uint32_t low_word = static_cast<uint32_t>(input & 0xFFFFFFFF);
uint32_t high_word = static_cast<uint32_t>(*input >> 32); uint32_t high_word = static_cast<uint32_t>(input >> 32);
float shift = static_cast<float>(1ull << 32); float shift = static_cast<float>(1ull << 32);
// If the MSB of the high_word is set, then we make space for a rounding bit. // If the MSB of the high_word is set, then we make space for a rounding bit.
...@@ -90,86 +94,88 @@ void uint64_to_float32_wrapper(uint64_t* input, float* output) { ...@@ -90,86 +94,88 @@ void uint64_to_float32_wrapper(uint64_t* input, float* output) {
high_word |= 1; high_word |= 1;
} }
float result = static_cast<float>(high_word); result = static_cast<float>(high_word);
result *= shift; result *= shift;
result += static_cast<float>(low_word); result += static_cast<float>(low_word);
*output = result;
#else
*output = static_cast<float>(ReadUnalignedUint64(input));
#endif #endif
WriteUnalignedValue<float>(data, result);
} }
void int64_to_float64_wrapper(int64_t* input, double* output) { void int64_to_float64_wrapper(Address data) {
WriteDoubleValue(output, static_cast<double>(ReadUnalignedInt64(input))); int64_t input = ReadUnalignedValue<int64_t>(data);
WriteUnalignedValue<double>(data, static_cast<double>(input));
} }
void uint64_to_float64_wrapper(uint64_t* input, double* output) { void uint64_to_float64_wrapper(Address data) {
uint64_t input = ReadUnalignedValue<uint64_t>(data);
double result = static_cast<double>(input);
#if V8_CC_MSVC #if V8_CC_MSVC
// With MSVC we use static_cast<double>(uint32_t) instead of // With MSVC we use static_cast<double>(uint32_t) instead of
// static_cast<double>(uint64_t) to achieve round-to-nearest-ties-even // static_cast<double>(uint64_t) to achieve round-to-nearest-ties-even
// semantics. The idea is to calculate // semantics. The idea is to calculate
// static_cast<double>(high_word) * 2^32 + static_cast<double>(low_word). // static_cast<double>(high_word) * 2^32 + static_cast<double>(low_word).
uint32_t low_word = static_cast<uint32_t>(*input & 0xFFFFFFFF); uint32_t low_word = static_cast<uint32_t>(input & 0xFFFFFFFF);
uint32_t high_word = static_cast<uint32_t>(*input >> 32); uint32_t high_word = static_cast<uint32_t>(input >> 32);
double shift = static_cast<double>(1ull << 32); double shift = static_cast<double>(1ull << 32);
double result = static_cast<double>(high_word); result = static_cast<double>(high_word);
result *= shift; result *= shift;
result += static_cast<double>(low_word); result += static_cast<double>(low_word);
*output = result;
#else
WriteDoubleValue(output, static_cast<double>(ReadUnalignedUint64(input)));
#endif #endif
WriteUnalignedValue<double>(data, result);
} }
int32_t float32_to_int64_wrapper(float* input, int64_t* output) { int32_t float32_to_int64_wrapper(Address data) {
// We use "<" here to check the upper bound because of rounding problems: With // We use "<" here to check the upper bound because of rounding problems: With
// "<=" some inputs would be considered within int64 range which are actually // "<=" some inputs would be considered within int64 range which are actually
// not within int64 range. // not within int64 range.
if (*input >= static_cast<float>(std::numeric_limits<int64_t>::min()) && float input = ReadUnalignedValue<float>(data);
*input < static_cast<float>(std::numeric_limits<int64_t>::max())) { if (input >= static_cast<float>(std::numeric_limits<int64_t>::min()) &&
WriteUnalignedInt64(output, static_cast<int64_t>(*input)); input < static_cast<float>(std::numeric_limits<int64_t>::max())) {
WriteUnalignedValue<int64_t>(data, static_cast<int64_t>(input));
return 1; return 1;
} }
return 0; return 0;
} }
int32_t float32_to_uint64_wrapper(float* input, uint64_t* output) { int32_t float32_to_uint64_wrapper(Address data) {
float input = ReadUnalignedValue<float>(data);
// We use "<" here to check the upper bound because of rounding problems: With // We use "<" here to check the upper bound because of rounding problems: With
// "<=" some inputs would be considered within uint64 range which are actually // "<=" some inputs would be considered within uint64 range which are actually
// not within uint64 range. // not within uint64 range.
if (*input > -1.0 && if (input > -1.0 &&
*input < static_cast<float>(std::numeric_limits<uint64_t>::max())) { input < static_cast<float>(std::numeric_limits<uint64_t>::max())) {
WriteUnalignedUint64(output, static_cast<uint64_t>(*input)); WriteUnalignedValue<uint64_t>(data, static_cast<uint64_t>(input));
return 1; return 1;
} }
return 0; return 0;
} }
int32_t float64_to_int64_wrapper(double* input, int64_t* output) { int32_t float64_to_int64_wrapper(Address data) {
// We use "<" here to check the upper bound because of rounding problems: With // We use "<" here to check the upper bound because of rounding problems: With
// "<=" some inputs would be considered within int64 range which are actually // "<=" some inputs would be considered within int64 range which are actually
// not within int64 range. // not within int64 range.
double input_val = ReadDoubleValue(input); double input = ReadUnalignedValue<double>(data);
if (input_val >= static_cast<double>(std::numeric_limits<int64_t>::min()) && if (input >= static_cast<double>(std::numeric_limits<int64_t>::min()) &&
input_val < static_cast<double>(std::numeric_limits<int64_t>::max())) { input < static_cast<double>(std::numeric_limits<int64_t>::max())) {
WriteUnalignedInt64(output, static_cast<int64_t>(input_val)); WriteUnalignedValue<int64_t>(data, static_cast<int64_t>(input));
return 1; return 1;
} }
return 0; return 0;
} }
int32_t float64_to_uint64_wrapper(double* input, uint64_t* output) { int32_t float64_to_uint64_wrapper(Address data) {
// We use "<" here to check the upper bound because of rounding problems: With // We use "<" here to check the upper bound because of rounding problems: With
// "<=" some inputs would be considered within uint64 range which are actually // "<=" some inputs would be considered within uint64 range which are actually
// not within uint64 range. // not within uint64 range.
double input_val = ReadDoubleValue(input); double input = ReadUnalignedValue<double>(data);
if (input_val > -1.0 && if (input > -1.0 &&
input_val < static_cast<double>(std::numeric_limits<uint64_t>::max())) { input < static_cast<double>(std::numeric_limits<uint64_t>::max())) {
WriteUnalignedUint64(output, static_cast<uint64_t>(input_val)); WriteUnalignedValue<uint64_t>(data, static_cast<uint64_t>(input));
return 1; return 1;
} }
return 0; return 0;
...@@ -218,30 +224,32 @@ int32_t uint64_mod_wrapper(uint64_t* dst, uint64_t* src) { ...@@ -218,30 +224,32 @@ int32_t uint64_mod_wrapper(uint64_t* dst, uint64_t* src) {
return 1; return 1;
} }
uint32_t word32_ctz_wrapper(uint32_t* input) { uint32_t word32_ctz_wrapper(Address data) {
return base::bits::CountTrailingZeros(*input); return base::bits::CountTrailingZeros(ReadUnalignedValue<uint32_t>(data));
} }
uint32_t word64_ctz_wrapper(uint64_t* input) { uint32_t word64_ctz_wrapper(Address data) {
return base::bits::CountTrailingZeros(ReadUnalignedUint64(input)); return base::bits::CountTrailingZeros(ReadUnalignedValue<uint64_t>(data));
} }
uint32_t word32_popcnt_wrapper(uint32_t* input) { uint32_t word32_popcnt_wrapper(Address data) {
return base::bits::CountPopulation(*input); return base::bits::CountPopulation(ReadUnalignedValue<uint32_t>(data));
} }
uint32_t word64_popcnt_wrapper(uint64_t* input) { uint32_t word64_popcnt_wrapper(Address data) {
return base::bits::CountPopulation(ReadUnalignedUint64(input)); return base::bits::CountPopulation(ReadUnalignedValue<uint64_t>(data));
} }
uint32_t word32_rol_wrapper(uint32_t* input_p, uint32_t* shift_p) { uint32_t word32_rol_wrapper(Address data) {
uint32_t shift = (*shift_p & 31); uint32_t input = ReadUnalignedValue<uint32_t>(data);
return (*input_p << shift) | (*input_p >> (32 - shift)); uint32_t shift = ReadUnalignedValue<uint32_t>(data + sizeof(input)) & 31;
return (input << shift) | (input >> (32 - shift));
} }
uint32_t word32_ror_wrapper(uint32_t* input_p, uint32_t* shift_p) { uint32_t word32_ror_wrapper(Address data) {
uint32_t shift = (*shift_p & 31); uint32_t input = ReadUnalignedValue<uint32_t>(data);
return (*input_p >> shift) | (*input_p << (32 - shift)); uint32_t shift = ReadUnalignedValue<uint32_t>(data + sizeof(input)) & 31;
return (input >> shift) | (input << (32 - shift));
} }
void float64_pow_wrapper(double* param0, double* param1) { void float64_pow_wrapper(double* param0, double* param1) {
......
...@@ -2,11 +2,13 @@ ...@@ -2,11 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#include <stdint.h>
#ifndef V8_WASM_WASM_EXTERNAL_REFS_H_ #ifndef V8_WASM_WASM_EXTERNAL_REFS_H_
#define V8_WASM_WASM_EXTERNAL_REFS_H_ #define V8_WASM_WASM_EXTERNAL_REFS_H_
#include <stdint.h>
#include "src/globals.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
namespace wasm { namespace wasm {
...@@ -27,21 +29,21 @@ void f64_ceil_wrapper(double* param); ...@@ -27,21 +29,21 @@ void f64_ceil_wrapper(double* param);
void f64_nearest_int_wrapper(double* param); void f64_nearest_int_wrapper(double* param);
void int64_to_float32_wrapper(int64_t* input, float* output); void int64_to_float32_wrapper(Address data);
void uint64_to_float32_wrapper(uint64_t* input, float* output); void uint64_to_float32_wrapper(Address data);
void int64_to_float64_wrapper(int64_t* input, double* output); void int64_to_float64_wrapper(Address data);
void uint64_to_float64_wrapper(uint64_t* input, double* output); void uint64_to_float64_wrapper(Address data);
int32_t float32_to_int64_wrapper(float* input, int64_t* output); int32_t float32_to_int64_wrapper(Address data);
int32_t float32_to_uint64_wrapper(float* input, uint64_t* output); int32_t float32_to_uint64_wrapper(Address data);
int32_t float64_to_int64_wrapper(double* input, int64_t* output); int32_t float64_to_int64_wrapper(Address data);
int32_t float64_to_uint64_wrapper(double* input, uint64_t* output); int32_t float64_to_uint64_wrapper(Address data);
int32_t int64_div_wrapper(int64_t* dst, int64_t* src); int32_t int64_div_wrapper(int64_t* dst, int64_t* src);
...@@ -51,17 +53,17 @@ int32_t uint64_div_wrapper(uint64_t* dst, uint64_t* src); ...@@ -51,17 +53,17 @@ int32_t uint64_div_wrapper(uint64_t* dst, uint64_t* src);
int32_t uint64_mod_wrapper(uint64_t* dst, uint64_t* src); int32_t uint64_mod_wrapper(uint64_t* dst, uint64_t* src);
uint32_t word32_ctz_wrapper(uint32_t* input); uint32_t word32_ctz_wrapper(Address data);
uint32_t word64_ctz_wrapper(uint64_t* input); uint32_t word64_ctz_wrapper(Address data);
uint32_t word32_popcnt_wrapper(uint32_t* input); uint32_t word32_popcnt_wrapper(Address data);
uint32_t word64_popcnt_wrapper(uint64_t* input); uint32_t word64_popcnt_wrapper(Address data);
uint32_t word32_rol_wrapper(uint32_t* input_p, uint32_t* shift_p); uint32_t word32_rol_wrapper(Address data);
uint32_t word32_ror_wrapper(uint32_t* input_p, uint32_t* shift_p); uint32_t word32_ror_wrapper(Address data);
void float64_pow_wrapper(double* param0, double* param1); void float64_pow_wrapper(double* param0, double* param1);
......
...@@ -378,7 +378,7 @@ uint32_t ExecuteI32Ctz(uint32_t val, TrapReason* trap) { ...@@ -378,7 +378,7 @@ uint32_t ExecuteI32Ctz(uint32_t val, TrapReason* trap) {
} }
uint32_t ExecuteI32Popcnt(uint32_t val, TrapReason* trap) { uint32_t ExecuteI32Popcnt(uint32_t val, TrapReason* trap) {
return word32_popcnt_wrapper(&val); return base::bits::CountPopulation(val);
} }
inline uint32_t ExecuteI32Eqz(uint32_t val, TrapReason* trap) { inline uint32_t ExecuteI32Eqz(uint32_t val, TrapReason* trap) {
...@@ -394,7 +394,7 @@ inline uint64_t ExecuteI64Ctz(uint64_t val, TrapReason* trap) { ...@@ -394,7 +394,7 @@ inline uint64_t ExecuteI64Ctz(uint64_t val, TrapReason* trap) {
} }
inline int64_t ExecuteI64Popcnt(uint64_t val, TrapReason* trap) { inline int64_t ExecuteI64Popcnt(uint64_t val, TrapReason* trap) {
return word64_popcnt_wrapper(&val); return base::bits::CountPopulation(val);
} }
inline int32_t ExecuteI64Eqz(uint64_t val, TrapReason* trap) { inline int32_t ExecuteI64Eqz(uint64_t val, TrapReason* trap) {
...@@ -466,16 +466,32 @@ int_type ExecuteConvertSaturate(float_type a) { ...@@ -466,16 +466,32 @@ int_type ExecuteConvertSaturate(float_type a) {
: std::numeric_limits<int_type>::max()); : std::numeric_limits<int_type>::max());
} }
template <typename dst_type, typename src_type, void (*fn)(Address)>
inline dst_type CallExternalIntToFloatFunction(src_type input) {
uint8_t data[std::max(sizeof(dst_type), sizeof(src_type))];
Address data_addr = reinterpret_cast<Address>(data);
WriteUnalignedValue<src_type>(data_addr, input);
fn(data_addr);
return ReadUnalignedValue<dst_type>(data_addr);
}
template <typename dst_type, typename src_type, int32_t (*fn)(Address)>
inline dst_type CallExternalFloatToIntFunction(src_type input,
TrapReason* trap) {
uint8_t data[std::max(sizeof(dst_type), sizeof(src_type))];
Address data_addr = reinterpret_cast<Address>(data);
WriteUnalignedValue<src_type>(data_addr, input);
if (!fn(data_addr)) *trap = kTrapFloatUnrepresentable;
return ReadUnalignedValue<dst_type>(data_addr);
}
inline uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) { inline uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) {
return static_cast<uint32_t>(a & 0xFFFFFFFF); return static_cast<uint32_t>(a & 0xFFFFFFFF);
} }
int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) { int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
int64_t output; return CallExternalFloatToIntFunction<int64_t, float,
if (!float32_to_int64_wrapper(&a, &output)) { float32_to_int64_wrapper>(a, trap);
*trap = kTrapFloatUnrepresentable;
}
return output;
} }
int64_t ExecuteI64SConvertSatF32(float a) { int64_t ExecuteI64SConvertSatF32(float a) {
...@@ -490,11 +506,8 @@ int64_t ExecuteI64SConvertSatF32(float a) { ...@@ -490,11 +506,8 @@ int64_t ExecuteI64SConvertSatF32(float a) {
} }
int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) { int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
int64_t output; return CallExternalFloatToIntFunction<int64_t, double,
if (!float64_to_int64_wrapper(&a, &output)) { float64_to_int64_wrapper>(a, trap);
*trap = kTrapFloatUnrepresentable;
}
return output;
} }
int64_t ExecuteI64SConvertSatF64(double a) { int64_t ExecuteI64SConvertSatF64(double a) {
...@@ -509,11 +522,8 @@ int64_t ExecuteI64SConvertSatF64(double a) { ...@@ -509,11 +522,8 @@ int64_t ExecuteI64SConvertSatF64(double a) {
} }
uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) { uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
uint64_t output; return CallExternalFloatToIntFunction<uint64_t, float,
if (!float32_to_uint64_wrapper(&a, &output)) { float32_to_uint64_wrapper>(a, trap);
*trap = kTrapFloatUnrepresentable;
}
return output;
} }
uint64_t ExecuteI64UConvertSatF32(float a) { uint64_t ExecuteI64UConvertSatF32(float a) {
...@@ -528,11 +538,8 @@ uint64_t ExecuteI64UConvertSatF32(float a) { ...@@ -528,11 +538,8 @@ uint64_t ExecuteI64UConvertSatF32(float a) {
} }
uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) { uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
uint64_t output; return CallExternalFloatToIntFunction<uint64_t, double,
if (!float64_to_uint64_wrapper(&a, &output)) { float64_to_uint64_wrapper>(a, trap);
*trap = kTrapFloatUnrepresentable;
}
return output;
} }
uint64_t ExecuteI64UConvertSatF64(double a) { uint64_t ExecuteI64UConvertSatF64(double a) {
...@@ -563,15 +570,12 @@ inline float ExecuteF32UConvertI32(uint32_t a, TrapReason* trap) { ...@@ -563,15 +570,12 @@ inline float ExecuteF32UConvertI32(uint32_t a, TrapReason* trap) {
} }
inline float ExecuteF32SConvertI64(int64_t a, TrapReason* trap) { inline float ExecuteF32SConvertI64(int64_t a, TrapReason* trap) {
float output; return static_cast<float>(a);
int64_to_float32_wrapper(&a, &output);
return output;
} }
inline float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) { inline float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) {
float output; return CallExternalIntToFloatFunction<float, uint64_t,
uint64_to_float32_wrapper(&a, &output); uint64_to_float32_wrapper>(a);
return output;
} }
inline float ExecuteF32ConvertF64(double a, TrapReason* trap) { inline float ExecuteF32ConvertF64(double a, TrapReason* trap) {
...@@ -591,15 +595,12 @@ inline double ExecuteF64UConvertI32(uint32_t a, TrapReason* trap) { ...@@ -591,15 +595,12 @@ inline double ExecuteF64UConvertI32(uint32_t a, TrapReason* trap) {
} }
inline double ExecuteF64SConvertI64(int64_t a, TrapReason* trap) { inline double ExecuteF64SConvertI64(int64_t a, TrapReason* trap) {
double output; return static_cast<double>(a);
int64_to_float64_wrapper(&a, &output);
return output;
} }
inline double ExecuteF64UConvertI64(uint64_t a, TrapReason* trap) { inline double ExecuteF64UConvertI64(uint64_t a, TrapReason* trap) {
double output; return CallExternalIntToFloatFunction<double, uint64_t,
uint64_to_float64_wrapper(&a, &output); uint64_to_float64_wrapper>(a);
return output;
} }
inline double ExecuteF64ConvertF32(float a, TrapReason* trap) { inline double ExecuteF64ConvertF32(float a, TrapReason* trap) {
......
...@@ -82,6 +82,81 @@ void TestExternalReference(BufferedRawMachineAssemblerTester<R>* m, ...@@ -82,6 +82,81 @@ void TestExternalReference(BufferedRawMachineAssemblerTester<R>* m,
CHECK_EQ(comparison_param2, param2); CHECK_EQ(comparison_param2, param2);
} }
template <typename InType, typename OutType, typename Iterable>
void TestExternalReference_ConvertOp(
BufferedRawMachineAssemblerTester<int32_t>* m, ExternalReference ref,
void (*wrapper)(Address), Iterable inputs) {
uint8_t buffer[std::max(sizeof(InType), sizeof(OutType))];
Address buffer_addr = reinterpret_cast<Address>(buffer);
Node* function = m->ExternalConstant(ref);
m->CallCFunction1(MachineType::Pointer(), MachineType::Pointer(), function,
m->PointerConstant(buffer));
m->Return(m->Int32Constant(4356));
for (InType input : inputs) {
WriteUnalignedValue<InType>(buffer_addr, input);
m->Call();
OutType output = ReadUnalignedValue<OutType>(buffer_addr);
WriteUnalignedValue<InType>(buffer_addr, input);
wrapper(buffer_addr);
OutType expected_output = ReadUnalignedValue<OutType>(buffer_addr);
CHECK_EQ(expected_output, output);
}
}
template <typename InType, typename OutType, typename Iterable>
void TestExternalReference_ConvertOpWithOutputAndReturn(
BufferedRawMachineAssemblerTester<int32_t>* m, ExternalReference ref,
int32_t (*wrapper)(Address), Iterable inputs) {
uint8_t buffer[std::max(sizeof(InType), sizeof(OutType))];
Address buffer_addr = reinterpret_cast<Address>(buffer);
Node* function = m->ExternalConstant(ref);
m->Return(m->CallCFunction1(MachineType::Int32(), MachineType::Pointer(),
function, m->PointerConstant(buffer)));
for (InType input : inputs) {
WriteUnalignedValue<InType>(buffer_addr, input);
int32_t ret = m->Call();
OutType output = ReadUnalignedValue<OutType>(buffer_addr);
WriteUnalignedValue<InType>(buffer_addr, input);
int32_t expected_ret = wrapper(buffer_addr);
OutType expected_output = ReadUnalignedValue<OutType>(buffer_addr);
CHECK_EQ(expected_ret, ret);
CHECK_EQ(expected_output, output);
}
}
template <typename InType, typename OutType, typename Iterable>
void TestExternalReference_ConvertOpWithReturn(
BufferedRawMachineAssemblerTester<OutType>* m, ExternalReference ref,
OutType (*wrapper)(Address), Iterable inputs) {
uint8_t buffer[sizeof(InType)];
Address buffer_addr = reinterpret_cast<Address>(buffer);
Node* function = m->ExternalConstant(ref);
m->Return(m->CallCFunction1(MachineType::Int32(), MachineType::Pointer(),
function, m->PointerConstant(buffer)));
for (InType input : inputs) {
WriteUnalignedValue<InType>(buffer_addr, input);
OutType ret = m->Call();
WriteUnalignedValue<InType>(buffer_addr, input);
OutType expected_ret = wrapper(buffer_addr);
CHECK_EQ(expected_ret, ret);
}
}
TEST(RunCallF32Trunc) { TEST(RunCallF32Trunc) {
BufferedRawMachineAssemblerTester<int32_t> m; BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_f32_trunc(m.isolate()); ExternalReference ref = ExternalReference::wasm_f32_trunc(m.isolate());
...@@ -133,61 +208,61 @@ TEST(RunCallF64RoundTiesEven) { ...@@ -133,61 +208,61 @@ TEST(RunCallF64RoundTiesEven) {
TEST(RunCallInt64ToFloat32) { TEST(RunCallInt64ToFloat32) {
BufferedRawMachineAssemblerTester<int32_t> m; BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_int64_to_float32(m.isolate()); ExternalReference ref = ExternalReference::wasm_int64_to_float32(m.isolate());
TestExternalReference(&m, ref, wasm::int64_to_float32_wrapper, int64_t(-2124), TestExternalReference_ConvertOp<int64_t, float>(
1.25f); &m, ref, wasm::int64_to_float32_wrapper, ValueHelper::int64_vector());
} }
TEST(RunCallUint64ToFloat32) { TEST(RunCallUint64ToFloat32) {
BufferedRawMachineAssemblerTester<int32_t> m; BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference ref =
ExternalReference::wasm_uint64_to_float32(m.isolate()); ExternalReference::wasm_uint64_to_float32(m.isolate());
TestExternalReference(&m, ref, wasm::uint64_to_float32_wrapper, TestExternalReference_ConvertOp<uint64_t, float>(
uint64_t(2124), 1.25f); &m, ref, wasm::uint64_to_float32_wrapper, ValueHelper::uint64_vector());
} }
TEST(RunCallInt64ToFloat64) { TEST(RunCallInt64ToFloat64) {
BufferedRawMachineAssemblerTester<int32_t> m; BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_int64_to_float64(m.isolate()); ExternalReference ref = ExternalReference::wasm_int64_to_float64(m.isolate());
TestExternalReference(&m, ref, wasm::int64_to_float64_wrapper, int64_t(2124), TestExternalReference_ConvertOp<int64_t, double>(
1.25); &m, ref, wasm::int64_to_float64_wrapper, ValueHelper::int64_vector());
} }
TEST(RunCallUint64ToFloat64) { TEST(RunCallUint64ToFloat64) {
BufferedRawMachineAssemblerTester<int32_t> m; BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference ref =
ExternalReference::wasm_uint64_to_float64(m.isolate()); ExternalReference::wasm_uint64_to_float64(m.isolate());
TestExternalReference(&m, ref, wasm::uint64_to_float64_wrapper, TestExternalReference_ConvertOp<uint64_t, double>(
uint64_t(2124), 1.25); &m, ref, wasm::uint64_to_float64_wrapper, ValueHelper::uint64_vector());
} }
TEST(RunCallFloat32ToInt64) { TEST(RunCallFloat32ToInt64) {
BufferedRawMachineAssemblerTester<int32_t> m; BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_float32_to_int64(m.isolate()); ExternalReference ref = ExternalReference::wasm_float32_to_int64(m.isolate());
TestExternalReference(&m, ref, wasm::float32_to_int64_wrapper, 1.25f, TestExternalReference_ConvertOpWithOutputAndReturn<float, int64_t>(
int64_t(2124)); &m, ref, wasm::float32_to_int64_wrapper, ValueHelper::float32_vector());
} }
TEST(RunCallFloat32ToUint64) { TEST(RunCallFloat32ToUint64) {
BufferedRawMachineAssemblerTester<int32_t> m; BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference ref =
ExternalReference::wasm_float32_to_uint64(m.isolate()); ExternalReference::wasm_float32_to_uint64(m.isolate());
TestExternalReference(&m, ref, wasm::float32_to_uint64_wrapper, 1.25f, TestExternalReference_ConvertOpWithOutputAndReturn<float, uint64_t>(
uint64_t(2124)); &m, ref, wasm::float32_to_uint64_wrapper, ValueHelper::float32_vector());
} }
TEST(RunCallFloat64ToInt64) { TEST(RunCallFloat64ToInt64) {
BufferedRawMachineAssemblerTester<int32_t> m; BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_float64_to_int64(m.isolate()); ExternalReference ref = ExternalReference::wasm_float64_to_int64(m.isolate());
TestExternalReference(&m, ref, wasm::float64_to_int64_wrapper, 1.25, TestExternalReference_ConvertOpWithOutputAndReturn<double, int64_t>(
int64_t(2124)); &m, ref, wasm::float64_to_int64_wrapper, ValueHelper::float64_vector());
} }
TEST(RunCallFloat64ToUint64) { TEST(RunCallFloat64ToUint64) {
BufferedRawMachineAssemblerTester<int32_t> m; BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference ref =
ExternalReference::wasm_float64_to_uint64(m.isolate()); ExternalReference::wasm_float64_to_uint64(m.isolate());
TestExternalReference(&m, ref, wasm::float64_to_uint64_wrapper, 1.25, TestExternalReference_ConvertOpWithOutputAndReturn<double, uint64_t>(
uint64_t(2124)); &m, ref, wasm::float64_to_uint64_wrapper, ValueHelper::float64_vector());
} }
TEST(RunCallInt64Div) { TEST(RunCallInt64Div) {
...@@ -221,25 +296,29 @@ TEST(RunCallUint64Mod) { ...@@ -221,25 +296,29 @@ TEST(RunCallUint64Mod) {
TEST(RunCallWord32Ctz) { TEST(RunCallWord32Ctz) {
BufferedRawMachineAssemblerTester<uint32_t> m; BufferedRawMachineAssemblerTester<uint32_t> m;
ExternalReference ref = ExternalReference::wasm_word32_ctz(m.isolate()); ExternalReference ref = ExternalReference::wasm_word32_ctz(m.isolate());
TestExternalReference(&m, ref, wasm::word32_ctz_wrapper, uint32_t(1774)); TestExternalReference_ConvertOpWithReturn<int32_t, uint32_t>(
&m, ref, wasm::word32_ctz_wrapper, ValueHelper::int32_vector());
} }
TEST(RunCallWord64Ctz) { TEST(RunCallWord64Ctz) {
BufferedRawMachineAssemblerTester<uint32_t> m; BufferedRawMachineAssemblerTester<uint32_t> m;
ExternalReference ref = ExternalReference::wasm_word64_ctz(m.isolate()); ExternalReference ref = ExternalReference::wasm_word64_ctz(m.isolate());
TestExternalReference(&m, ref, wasm::word64_ctz_wrapper, uint64_t(1774)); TestExternalReference_ConvertOpWithReturn<int64_t, uint32_t>(
&m, ref, wasm::word64_ctz_wrapper, ValueHelper::int64_vector());
} }
TEST(RunCallWord32Popcnt) { TEST(RunCallWord32Popcnt) {
BufferedRawMachineAssemblerTester<uint32_t> m; BufferedRawMachineAssemblerTester<uint32_t> m;
ExternalReference ref = ExternalReference::wasm_word32_popcnt(m.isolate()); ExternalReference ref = ExternalReference::wasm_word32_popcnt(m.isolate());
TestExternalReference(&m, ref, wasm::word32_popcnt_wrapper, uint32_t(1774)); TestExternalReference_ConvertOpWithReturn<uint32_t, uint32_t>(
&m, ref, wasm::word32_popcnt_wrapper, ValueHelper::int32_vector());
} }
TEST(RunCallWord64Popcnt) { TEST(RunCallWord64Popcnt) {
BufferedRawMachineAssemblerTester<uint32_t> m; BufferedRawMachineAssemblerTester<uint32_t> m;
ExternalReference ref = ExternalReference::wasm_word64_popcnt(m.isolate()); ExternalReference ref = ExternalReference::wasm_word64_popcnt(m.isolate());
TestExternalReference(&m, ref, wasm::word64_popcnt_wrapper, uint64_t(1774)); TestExternalReference_ConvertOpWithReturn<int64_t, uint32_t>(
&m, ref, wasm::word64_popcnt_wrapper, ValueHelper::int64_vector());
} }
TEST(RunCallFloat64Pow) { TEST(RunCallFloat64Pow) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment