Commit 7ad6b04e authored by Andreas Haas's avatar Andreas Haas Committed by Commit Bot

Reland "[wasm] Refactor AtomicWait implementation"

Stack parameters in the StubCallDescriptor were set to the wrong type. I
changed it now so that for stack parameters that are specified in the
CallInterfaceDescriptor, type specified type is used. All other
parameters are assumed to be tagged, as it has been until now.

Original change's description:
> [wasm] Refactor AtomicWait implementation
>
> The existing implementation included aspects that are not
> straight-forward to implement in Liftoff and seemed inefficient:
> * Convert the timeout in WebAssembly code from I64 to F64, just to
>   convert it back in the runtime.
>   * On 32-bit platforms this conversion needs an additional C-call.
> * Split the I64 expected value from I64 into two I32 values in the
>   wasm-compiler.
>   * Ideally the int64-lowering takes care of 32-bit specific handling.
>
> With this CL the timeout and the expected value are passed as I64 to
> the runtime (a builtin moves the I64 into a bigint for that). The
> int64-lowering takes care of 32-bit platforms. There are special
> builtins for 32-bit platforms, but they are written such that ideally
> also the int64-lowering could create them.

Bug: v8:10108
Change-Id: Ib87b543666708457c0d686208a86e46cdca3f9a2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2080362Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Commit-Queue: Andreas Haas <ahaas@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66533}
parent 97d90590
......@@ -852,8 +852,10 @@ namespace internal {
ASM(WasmCompileLazy, Dummy) \
ASM(WasmDebugBreak, Dummy) \
TFC(WasmAtomicNotify, WasmAtomicNotify) \
TFC(WasmI32AtomicWait, WasmI32AtomicWait) \
TFC(WasmI64AtomicWait, WasmI64AtomicWait) \
TFC(WasmI32AtomicWait32, WasmI32AtomicWait32) \
TFC(WasmI32AtomicWait64, WasmI32AtomicWait64) \
TFC(WasmI64AtomicWait32, WasmI64AtomicWait32) \
TFC(WasmI64AtomicWait64, WasmI64AtomicWait64) \
TFC(WasmMemoryGrow, WasmMemoryGrow) \
TFC(WasmTableGet, WasmTableGet) \
TFC(WasmTableSet, WasmTableSet) \
......
......@@ -79,48 +79,118 @@ TF_BUILTIN(WasmAtomicNotify, WasmBuiltinsAssembler) {
Return(Unsigned(SmiToInt32(result_smi)));
}
TF_BUILTIN(WasmI32AtomicWait, WasmBuiltinsAssembler) {
TF_BUILTIN(WasmI32AtomicWait32, WasmBuiltinsAssembler) {
if (!Is32()) {
Unreachable();
return;
}
TNode<Uint32T> address =
UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
TNode<Number> address_number = ChangeUint32ToTagged(address);
TNode<Int32T> expected_value =
UncheckedCast<Int32T>(Parameter(Descriptor::kExpectedValue));
TNode<Float64T> timeout =
UncheckedCast<Float64T>(Parameter(Descriptor::kTimeout));
TNode<Number> expected_value_number = ChangeInt32ToTagged(expected_value);
TNode<IntPtrT> timeout_low =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutLow));
TNode<IntPtrT> timeout_high =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutHigh));
TNode<BigInt> timeout = BigIntFromInt32Pair(timeout_low, timeout_high);
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
TNode<Context> context = LoadContextFromInstance(instance);
TNode<Smi> result_smi =
CAST(CallRuntime(Runtime::kWasmI32AtomicWait, context, instance,
address_number, expected_value_number, timeout));
Return(Unsigned(SmiToInt32(result_smi)));
}
TF_BUILTIN(WasmI32AtomicWait64, WasmBuiltinsAssembler) {
if (!Is64()) {
Unreachable();
return;
}
TNode<Uint32T> address =
UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
TNode<Number> address_number = ChangeUint32ToTagged(address);
TNode<Int32T> expected_value =
UncheckedCast<Int32T>(Parameter(Descriptor::kExpectedValue));
TNode<Number> expected_value_number = ChangeInt32ToTagged(expected_value);
TNode<Number> timeout_number = ChangeFloat64ToTagged(timeout);
TNode<IntPtrT> timeout_raw =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeout));
TNode<BigInt> timeout = BigIntFromInt64(timeout_raw);
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
TNode<Context> context = LoadContextFromInstance(instance);
TNode<Smi> result_smi =
CAST(CallRuntime(Runtime::kWasmI32AtomicWait, context, instance,
address_number, expected_value_number, timeout_number));
address_number, expected_value_number, timeout));
Return(Unsigned(SmiToInt32(result_smi)));
}
TF_BUILTIN(WasmI64AtomicWait, WasmBuiltinsAssembler) {
TF_BUILTIN(WasmI64AtomicWait32, WasmBuiltinsAssembler) {
if (!Is32()) {
Unreachable();
return;
}
TNode<Uint32T> address =
UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
TNode<Uint32T> expected_value_high =
UncheckedCast<Uint32T>(Parameter(Descriptor::kExpectedValueHigh));
TNode<Uint32T> expected_value_low =
UncheckedCast<Uint32T>(Parameter(Descriptor::kExpectedValueLow));
TNode<Float64T> timeout =
UncheckedCast<Float64T>(Parameter(Descriptor::kTimeout));
TNode<Number> address_number = ChangeUint32ToTagged(address);
TNode<IntPtrT> expected_value_low =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kExpectedValueLow));
TNode<IntPtrT> expected_value_high =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kExpectedValueHigh));
TNode<BigInt> expected_value =
BigIntFromInt32Pair(expected_value_low, expected_value_high);
TNode<IntPtrT> timeout_low =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutLow));
TNode<IntPtrT> timeout_high =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutHigh));
TNode<BigInt> timeout = BigIntFromInt32Pair(timeout_low, timeout_high);
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
TNode<Context> context = LoadContextFromInstance(instance);
TNode<Smi> result_smi =
CAST(CallRuntime(Runtime::kWasmI64AtomicWait, context, instance,
address_number, expected_value, timeout));
Return(Unsigned(SmiToInt32(result_smi)));
}
TF_BUILTIN(WasmI64AtomicWait64, WasmBuiltinsAssembler) {
if (!Is64()) {
Unreachable();
return;
}
TNode<Uint32T> address =
UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
TNode<Number> address_number = ChangeUint32ToTagged(address);
TNode<Number> expected_value_high_number =
ChangeUint32ToTagged(expected_value_high);
TNode<Number> expected_value_low_number =
ChangeUint32ToTagged(expected_value_low);
TNode<Number> timeout_number = ChangeFloat64ToTagged(timeout);
TNode<IntPtrT> expected_value_raw =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kExpectedValue));
TNode<BigInt> expected_value = BigIntFromInt64(expected_value_raw);
TNode<IntPtrT> timeout_raw =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeout));
TNode<BigInt> timeout = BigIntFromInt64(timeout_raw);
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
TNode<Context> context = LoadContextFromInstance(instance);
TNode<Smi> result_smi = CAST(CallRuntime(
Runtime::kWasmI64AtomicWait, context, instance, address_number,
expected_value_high_number, expected_value_low_number, timeout_number));
TNode<Smi> result_smi =
CAST(CallRuntime(Runtime::kWasmI64AtomicWait, context, instance,
address_number, expected_value, timeout));
Return(Unsigned(SmiToInt32(result_smi)));
}
......
......@@ -401,12 +401,23 @@ void WasmAtomicNotifyDescriptor::InitializePlatformSpecific(
}
#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64)
void WasmI32AtomicWaitDescriptor::InitializePlatformSpecific(
void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
}
void WasmI64AtomicWaitDescriptor::InitializePlatformSpecific(
void WasmI32AtomicWait64Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
}
void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data,
kParameterCount - kStackArgumentsCount);
}
void WasmI64AtomicWait64Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
}
......
......@@ -91,8 +91,10 @@ namespace internal {
V(Typeof) \
V(Void) \
V(WasmAtomicNotify) \
V(WasmI32AtomicWait) \
V(WasmI64AtomicWait) \
V(WasmI32AtomicWait32) \
V(WasmI32AtomicWait64) \
V(WasmI64AtomicWait32) \
V(WasmI64AtomicWait64) \
V(WasmMemoryGrow) \
V(WasmTableGet) \
V(WasmTableSet) \
......@@ -454,18 +456,22 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
CallInterfaceDescriptorData::kAllowVarArgs, 1, ##__VA_ARGS__)
#define DEFINE_RESULT_AND_PARAMETER_TYPES(...) \
void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
override { \
MachineType machine_types[] = {__VA_ARGS__}; \
static_assert( \
kReturnCount + kParameterCount == arraysize(machine_types), \
"Parameter names definition is not consistent with parameter types"); \
data->InitializePlatformIndependent(Flags(kDescriptorFlags), kReturnCount, \
kParameterCount, machine_types, \
arraysize(machine_types)); \
#define DEFINE_RESULT_AND_PARAMETER_TYPES_WITH_FLAG(flag, ...) \
void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
override { \
MachineType machine_types[] = {__VA_ARGS__}; \
static_assert( \
kReturnCount + kParameterCount == arraysize(machine_types), \
"Parameter names definition is not consistent with parameter types"); \
data->InitializePlatformIndependent( \
Flags(flag | kDescriptorFlags), kReturnCount, kParameterCount, \
machine_types, arraysize(machine_types)); \
}
#define DEFINE_RESULT_AND_PARAMETER_TYPES(...) \
DEFINE_RESULT_AND_PARAMETER_TYPES_WITH_FLAG( \
CallInterfaceDescriptorData::kNoFlags, __VA_ARGS__)
#define DEFINE_PARAMETER_TYPES(...) \
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged() /* result */, \
##__VA_ARGS__)
......@@ -1361,27 +1367,62 @@ class WasmAtomicNotifyDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(WasmAtomicNotifyDescriptor, CallInterfaceDescriptor)
};
class WasmI32AtomicWaitDescriptor final : public CallInterfaceDescriptor {
class WasmI32AtomicWait32Descriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeoutLow,
kTimeoutHigh)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
MachineType::Uint32(), // kAddress
MachineType::Int32(), // kExpectedValue
MachineType::Uint32(), // kTimeoutLow
MachineType::Uint32()) // kTimeoutHigh
DECLARE_DESCRIPTOR(WasmI32AtomicWait32Descriptor, CallInterfaceDescriptor)
};
class WasmI64AtomicWait32Descriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValueLow, kExpectedValueHigh,
kTimeoutLow, kTimeoutHigh)
DEFINE_RESULT_AND_PARAMETER_TYPES_WITH_FLAG(
CallInterfaceDescriptorData::kNoStackScan, // allow untagged stack params
MachineType::Uint32(), // result 1
MachineType::Uint32(), // kAddress
MachineType::Uint32(), // kExpectedValueLow
MachineType::Uint32(), // kExpectedValueHigh
MachineType::Uint32(), // kTimeoutLow
MachineType::Uint32()) // kTimeoutHigh
#if V8_TARGET_ARCH_IA32
static constexpr bool kPassLastArgOnStack = true;
#else
static constexpr bool kPassLastArgOnStack = false;
#endif
// Pass the last parameter through the stack.
static constexpr int kStackArgumentsCount = kPassLastArgOnStack ? 1 : 0;
DECLARE_DESCRIPTOR(WasmI64AtomicWait32Descriptor, CallInterfaceDescriptor)
};
class WasmI32AtomicWait64Descriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeout)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
MachineType::Uint32(), // kAddress
MachineType::Int32(), // kExpectedValue
MachineType::Float64()) // kTimeout
DECLARE_DESCRIPTOR(WasmI32AtomicWaitDescriptor, CallInterfaceDescriptor)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
MachineType::Uint32(), // kAddress
MachineType::Int32(), // kExpectedValue
MachineType::Uint64()) // kTimeout
DECLARE_DESCRIPTOR(WasmI32AtomicWait64Descriptor, CallInterfaceDescriptor)
};
class WasmI64AtomicWaitDescriptor final : public CallInterfaceDescriptor {
class WasmI64AtomicWait64Descriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValueHigh, kExpectedValueLow,
kTimeout)
DEFINE_RESULT_AND_PARAMETER_TYPES(
MachineType::Uint32(), // result 1
MachineType::Uint32(), // kAddress
MachineType::Uint32(), // kExpectedValueHigh
MachineType::Uint32(), // kExpectedValueLow
MachineType::Float64()) // kTimeout
DECLARE_DESCRIPTOR(WasmI64AtomicWaitDescriptor, CallInterfaceDescriptor)
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeout)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
MachineType::Uint32(), // kAddress
MachineType::Uint64(), // kExpectedValue
MachineType::Uint64()) // kTimeout
DECLARE_DESCRIPTOR(WasmI64AtomicWait64Descriptor, CallInterfaceDescriptor)
};
class CloneObjectWithVectorDescriptor final : public CallInterfaceDescriptor {
......
......@@ -401,7 +401,9 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
// The rest of the parameters go on the stack.
int stack_slot = i - register_parameter_count - stack_parameter_count;
locations.AddParam(LinkageLocation::ForCallerFrameSlot(
stack_slot, MachineType::AnyTagged()));
stack_slot, i < descriptor.GetParameterCount()
? descriptor.GetParameterType(i)
: MachineType::AnyTagged()));
}
}
// Add context.
......
......@@ -4061,8 +4061,60 @@ Signature<MachineRepresentation>* CreateMachineSignature(
}
return builder.Build();
}
template <typename BuiltinDescriptor>
CallDescriptor* GetBuiltinCallDescriptor(WasmGraphBuilder* builder,
StubCallMode stub_mode) {
BuiltinDescriptor interface_descriptor;
return Linkage::GetStubCallDescriptor(
builder->mcgraph()->zone(), // zone
interface_descriptor, // descriptor
interface_descriptor.GetStackParameterCount(), // stack parameter count
CallDescriptor::kNoFlags, // flags
Operator::kNoProperties, // properties
stub_mode); // stub call mode
}
} // namespace
void WasmGraphBuilder::AddInt64LoweringReplacement(
CallDescriptor* original, CallDescriptor* replacement) {
if (!lowering_special_case_) {
lowering_special_case_ = std::make_unique<Int64LoweringSpecialCase>();
}
lowering_special_case_->replacements.insert({original, replacement});
}
CallDescriptor* WasmGraphBuilder::GetI32AtomicWaitCallDescriptor() {
if (i32_atomic_wait_descriptor_) return i32_atomic_wait_descriptor_;
i32_atomic_wait_descriptor_ =
GetBuiltinCallDescriptor<WasmI32AtomicWait64Descriptor>(
this, StubCallMode::kCallWasmRuntimeStub);
AddInt64LoweringReplacement(
i32_atomic_wait_descriptor_,
GetBuiltinCallDescriptor<WasmI32AtomicWait32Descriptor>(
this, StubCallMode::kCallWasmRuntimeStub));
return i32_atomic_wait_descriptor_;
}
CallDescriptor* WasmGraphBuilder::GetI64AtomicWaitCallDescriptor() {
if (i64_atomic_wait_descriptor_) return i64_atomic_wait_descriptor_;
i64_atomic_wait_descriptor_ =
GetBuiltinCallDescriptor<WasmI64AtomicWait64Descriptor>(
this, StubCallMode::kCallWasmRuntimeStub);
AddInt64LoweringReplacement(
i64_atomic_wait_descriptor_,
GetBuiltinCallDescriptor<WasmI64AtomicWait32Descriptor>(
this, StubCallMode::kCallWasmRuntimeStub));
return i64_atomic_wait_descriptor_;
}
void WasmGraphBuilder::LowerInt64(CallOrigin origin) {
if (mcgraph()->machine()->Is64()) return;
Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(), mcgraph()->common(),
......@@ -4804,23 +4856,17 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
// Now that we've bounds-checked, compute the effective address.
Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
Uint32Constant(offset), index);
Node* timeout;
if (mcgraph()->machine()->Is32()) {
timeout = BuildF64SConvertI64(inputs[2]);
} else {
timeout = graph()->NewNode(mcgraph()->machine()->RoundInt64ToFloat64(),
inputs[2]);
}
WasmI32AtomicWaitDescriptor interface_descriptor;
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), interface_descriptor,
interface_descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
StubCallMode::kCallWasmRuntimeStub);
auto call_descriptor = GetI32AtomicWaitCallDescriptor();
intptr_t target = mcgraph()->machine()->Is64()
? wasm::WasmCode::kWasmI32AtomicWait64
: wasm::WasmCode::kWasmI32AtomicWait32;
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmI32AtomicWait, RelocInfo::WASM_STUB_CALL);
target, RelocInfo::WASM_STUB_CALL);
node = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
call_target, address, inputs[1], timeout,
call_target, address, inputs[1], inputs[2],
effect(), control());
break;
}
......@@ -4832,30 +4878,18 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
// Now that we've bounds-checked, compute the effective address.
Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
Uint32Constant(offset), index);
Node* timeout;
if (mcgraph()->machine()->Is32()) {
timeout = BuildF64SConvertI64(inputs[2]);
} else {
timeout = graph()->NewNode(mcgraph()->machine()->RoundInt64ToFloat64(),
inputs[2]);
}
Node* expected_value_low = graph()->NewNode(
mcgraph()->machine()->TruncateInt64ToInt32(), inputs[1]);
Node* tmp = graph()->NewNode(mcgraph()->machine()->Word64Shr(), inputs[1],
Int64Constant(32));
Node* expected_value_high =
graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), tmp);
WasmI64AtomicWaitDescriptor interface_descriptor;
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), interface_descriptor,
interface_descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
StubCallMode::kCallWasmRuntimeStub);
CallDescriptor* call_descriptor = GetI64AtomicWaitCallDescriptor();
intptr_t target = mcgraph()->machine()->Is64()
? wasm::WasmCode::kWasmI64AtomicWait64
: wasm::WasmCode::kWasmI64AtomicWait32;
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmI64AtomicWait, RelocInfo::WASM_STUB_CALL);
target, RelocInfo::WASM_STUB_CALL);
node = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
call_target, address, expected_value_high,
expected_value_low, timeout, effect(), control());
call_target, address, inputs[1], inputs[2],
effect(), control());
break;
}
......@@ -5086,18 +5120,6 @@ void WasmGraphBuilder::RemoveBytecodePositionDecorator() {
}
namespace {
template <typename BuiltinDescriptor>
CallDescriptor* GetBuiltinCallDescriptor(WasmGraphBuilder* builder,
StubCallMode stub_mode) {
BuiltinDescriptor interface_descriptor;
return Linkage::GetStubCallDescriptor(
builder->mcgraph()->zone(), // zone
interface_descriptor, // descriptor
interface_descriptor.GetStackParameterCount(), // stack parameter count
CallDescriptor::kNoFlags, // flags
Operator::kNoProperties, // properties
stub_mode); // stub call mode
}
class WasmWrapperGraphBuilder : public WasmGraphBuilder {
public:
......@@ -5111,31 +5133,25 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
CallDescriptor* GetI64ToBigIntCallDescriptor() {
if (i64_to_bigint_descriptor_) return i64_to_bigint_descriptor_;
i64_to_bigint_descriptor_ =
GetBuiltinCallDescriptor<I64ToBigIntDescriptor>(this, stub_mode_);
if (!lowering_special_case_) {
lowering_special_case_ = std::make_unique<Int64LoweringSpecialCase>();
}
lowering_special_case_->replacements.insert(
{i64_to_bigint_descriptor_,
GetBuiltinCallDescriptor<I32PairToBigIntDescriptor>(this,
stub_mode_)});
AddInt64LoweringReplacement(
i64_to_bigint_descriptor_,
GetBuiltinCallDescriptor<I32PairToBigIntDescriptor>(this, stub_mode_));
return i64_to_bigint_descriptor_;
}
CallDescriptor* GetBigIntToI64CallDescriptor() {
if (bigint_to_i64_descriptor_) return bigint_to_i64_descriptor_;
if (!lowering_special_case_) {
lowering_special_case_ = std::make_unique<Int64LoweringSpecialCase>();
}
bigint_to_i64_descriptor_ =
GetBuiltinCallDescriptor<BigIntToI64Descriptor>(this, stub_mode_);
lowering_special_case_->replacements.insert(
{bigint_to_i64_descriptor_,
GetBuiltinCallDescriptor<BigIntToI32PairDescriptor>(this,
stub_mode_)});
AddInt64LoweringReplacement(
bigint_to_i64_descriptor_,
GetBuiltinCallDescriptor<BigIntToI32PairDescriptor>(this, stub_mode_));
return bigint_to_i64_descriptor_;
}
......
......@@ -558,6 +558,13 @@ class WasmGraphBuilder {
Node** parameters, int parameter_count);
TrapId GetTrapIdForTrap(wasm::TrapReason reason);
void AddInt64LoweringReplacement(CallDescriptor* original,
CallDescriptor* replacement);
CallDescriptor* GetI32AtomicWaitCallDescriptor();
CallDescriptor* GetI64AtomicWaitCallDescriptor();
std::unique_ptr<WasmGraphAssembler> gasm_;
Zone* const zone_;
MachineGraph* const mcgraph_;
......@@ -583,6 +590,8 @@ class WasmGraphBuilder {
compiler::SourcePositionTable* const source_position_table_ = nullptr;
std::unique_ptr<Int64LoweringSpecialCase> lowering_special_case_;
CallDescriptor* i32_atomic_wait_descriptor_ = nullptr;
CallDescriptor* i64_atomic_wait_descriptor_ = nullptr;
};
enum WasmCallKind { kWasmFunction, kWasmImportWrapper, kWasmCapiFunction };
......
......@@ -106,55 +106,79 @@ Object WaitJsTranslateReturn(Isolate* isolate, Object res) {
Object FutexEmulation::WaitJs32(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
int32_t value, double rel_timeout_ms) {
Object res = Wait32(isolate, array_buffer, addr, value, rel_timeout_ms);
Object res =
Wait<int32_t>(isolate, array_buffer, addr, value, rel_timeout_ms);
return WaitJsTranslateReturn(isolate, res);
}
Object FutexEmulation::WaitJs64(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
int64_t value, double rel_timeout_ms) {
Object res = Wait64(isolate, array_buffer, addr, value, rel_timeout_ms);
Object res =
Wait<int64_t>(isolate, array_buffer, addr, value, rel_timeout_ms);
return WaitJsTranslateReturn(isolate, res);
}
Object FutexEmulation::Wait32(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
int32_t value, double rel_timeout_ms) {
return Wait<int32_t>(isolate, array_buffer, addr, value, rel_timeout_ms);
Object FutexEmulation::WaitWasm32(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer,
size_t addr, int32_t value,
int64_t rel_timeout_ns) {
return Wait<int32_t>(isolate, array_buffer, addr, value, rel_timeout_ns >= 0,
rel_timeout_ns);
}
Object FutexEmulation::Wait64(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
int64_t value, double rel_timeout_ms) {
return Wait<int64_t>(isolate, array_buffer, addr, value, rel_timeout_ms);
Object FutexEmulation::WaitWasm64(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer,
size_t addr, int64_t value,
int64_t rel_timeout_ns) {
return Wait<int64_t>(isolate, array_buffer, addr, value, rel_timeout_ns >= 0,
rel_timeout_ns);
}
template <typename T>
Object FutexEmulation::Wait(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
T value, double rel_timeout_ms) {
VMState<ATOMICS_WAIT> state(isolate);
DCHECK_LT(addr, array_buffer->byte_length());
bool use_timeout = rel_timeout_ms != V8_INFINITY;
int64_t rel_timeout_ns = -1;
base::TimeDelta rel_timeout;
if (use_timeout) {
// Convert to nanoseconds.
double rel_timeout_ns = rel_timeout_ms *
base::Time::kNanosecondsPerMicrosecond *
base::Time::kMicrosecondsPerMillisecond;
if (rel_timeout_ns >
static_cast<double>(std::numeric_limits<int64_t>::max())) {
double timeout_ns = rel_timeout_ms *
base::Time::kNanosecondsPerMicrosecond *
base::Time::kMicrosecondsPerMillisecond;
if (timeout_ns > static_cast<double>(std::numeric_limits<int64_t>::max())) {
// 2**63 nanoseconds is 292 years. Let's just treat anything greater as
// infinite.
use_timeout = false;
} else {
rel_timeout = base::TimeDelta::FromNanoseconds(
static_cast<int64_t>(rel_timeout_ns));
rel_timeout_ns = static_cast<int64_t>(timeout_ns);
}
}
return Wait(isolate, array_buffer, addr, value, use_timeout, rel_timeout_ns);
}
namespace {
double WaitTimeoutInMs(double timeout_ns) {
return timeout_ns < 0
? V8_INFINITY
: timeout_ns / (base::Time::kNanosecondsPerMicrosecond *
base::Time::kMicrosecondsPerMillisecond);
}
} // namespace
template <typename T>
Object FutexEmulation::Wait(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
T value, bool use_timeout, int64_t rel_timeout_ns) {
VMState<ATOMICS_WAIT> state(isolate);
base::TimeDelta rel_timeout =
base::TimeDelta::FromNanoseconds(rel_timeout_ns);
// We have to convert the timeout back to double for the AtomicsWaitCallback.
double rel_timeout_ms = WaitTimeoutInMs(static_cast<double>(rel_timeout_ns));
AtomicsWaitWakeHandle stop_handle(isolate);
isolate->RunAtomicsWaitCallback(AtomicsWaitEvent::kStartWait, array_buffer,
......
......@@ -126,13 +126,13 @@ class FutexEmulation : public AllStatic {
// Same as WaitJs above except it returns 0 (ok), 1 (not equal) and 2 (timed
// out) as expected by Wasm.
static Object Wait32(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
size_t addr, int32_t value, double rel_timeout_ms);
static Object WaitWasm32(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
size_t addr, int32_t value, int64_t rel_timeout_ns);
// Same as Wait32 above except it checks for an int64_t value in the
// array_buffer.
static Object Wait64(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
size_t addr, int64_t value, double rel_timeout_ms);
static Object WaitWasm64(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
size_t addr, int64_t value, int64_t rel_timeout_ns);
// Wake |num_waiters_to_wake| threads that are waiting on the given |addr|.
// |num_waiters_to_wake| can be kWakeAll, in which case all waiters are
......@@ -154,6 +154,11 @@ class FutexEmulation : public AllStatic {
static Object Wait(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
size_t addr, T value, double rel_timeout_ms);
template <typename T>
static Object Wait(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
size_t addr, T value, bool use_timeout,
int64_t rel_timeout_ns);
// `mutex_` protects the composition of `wait_list_` (i.e. no elements may be
// added or removed without holding this mutex), as well as the `waiting_`
// and `interrupted_` fields for each individual list node that is currently
......
......@@ -391,13 +391,6 @@ RUNTIME_FUNCTION(Runtime_WasmAtomicNotify) {
return FutexEmulation::Wake(array_buffer, address, count);
}
double WaitTimeoutInMs(double timeout_ns) {
return timeout_ns < 0
? V8_INFINITY
: timeout_ns / (base::Time::kNanosecondsPerMicrosecond *
base::Time::kMicrosecondsPerMillisecond);
}
RUNTIME_FUNCTION(Runtime_WasmI32AtomicWait) {
ClearThreadInWasmScope clear_wasm_flag;
HandleScope scope(isolate);
......@@ -405,30 +398,28 @@ RUNTIME_FUNCTION(Runtime_WasmI32AtomicWait) {
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_NUMBER_CHECKED(uint32_t, address, Uint32, args[1]);
CONVERT_NUMBER_CHECKED(int32_t, expected_value, Int32, args[2]);
CONVERT_DOUBLE_ARG_CHECKED(timeout_ns, 3);
double timeout_ms = WaitTimeoutInMs(timeout_ns);
CONVERT_ARG_HANDLE_CHECKED(BigInt, timeout_ns, 3);
Handle<JSArrayBuffer> array_buffer =
getSharedArrayBuffer(instance, isolate, address);
return FutexEmulation::Wait32(isolate, array_buffer, address, expected_value,
timeout_ms);
return FutexEmulation::WaitWasm32(isolate, array_buffer, address,
expected_value, timeout_ns->AsInt64());
}
RUNTIME_FUNCTION(Runtime_WasmI64AtomicWait) {
ClearThreadInWasmScope clear_wasm_flag;
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_NUMBER_CHECKED(uint32_t, address, Uint32, args[1]);
CONVERT_NUMBER_CHECKED(uint32_t, expected_value_high, Uint32, args[2]);
CONVERT_NUMBER_CHECKED(uint32_t, expected_value_low, Uint32, args[3]);
CONVERT_DOUBLE_ARG_CHECKED(timeout_ns, 4);
int64_t expected_value = (static_cast<uint64_t>(expected_value_high) << 32) |
static_cast<uint64_t>(expected_value_low);
double timeout_ms = WaitTimeoutInMs(timeout_ns);
CONVERT_ARG_HANDLE_CHECKED(BigInt, expected_value, 2);
CONVERT_ARG_HANDLE_CHECKED(BigInt, timeout_ns, 3);
Handle<JSArrayBuffer> array_buffer =
getSharedArrayBuffer(instance, isolate, address);
return FutexEmulation::Wait64(isolate, array_buffer, address, expected_value,
timeout_ms);
return FutexEmulation::WaitWasm64(isolate, array_buffer, address,
expected_value->AsInt64(),
timeout_ns->AsInt64());
}
namespace {
......
......@@ -50,8 +50,10 @@ struct WasmModule;
V(WasmCompileLazy) \
V(WasmDebugBreak) \
V(WasmAtomicNotify) \
V(WasmI32AtomicWait) \
V(WasmI64AtomicWait) \
V(WasmI32AtomicWait32) \
V(WasmI32AtomicWait64) \
V(WasmI64AtomicWait32) \
V(WasmI64AtomicWait64) \
V(WasmMemoryGrow) \
V(WasmTableGet) \
V(WasmTableSet) \
......
......@@ -1752,15 +1752,11 @@ class ThreadImpl {
bool ExtractAtomicWaitNotifyParams(Decoder* decoder, InterpreterCode* code,
pc_t pc, int* const len,
uint32_t* buffer_offset, type* val,
double* timeout = nullptr) {
int64_t* timeout = nullptr) {
MemoryAccessImmediate<Decoder::kValidate> imm(decoder, code->at(pc + 1),
sizeof(type));
if (timeout) {
double timeout_ns = Pop().to<int64_t>();
*timeout = (timeout_ns < 0)
? V8_INFINITY
: timeout_ns / (base::Time::kNanosecondsPerMicrosecond *
base::Time::kMicrosecondsPerMillisecond);
*timeout = Pop().to<int64_t>();
}
*val = Pop().to<type>();
auto index = Pop().to<uint32_t>();
......@@ -2169,7 +2165,7 @@ class ThreadImpl {
break;
case kExprI32AtomicWait: {
int32_t val;
double timeout;
int64_t timeout;
uint32_t buffer_offset;
if (!ExtractAtomicWaitNotifyParams<int32_t>(
decoder, code, pc, len, &buffer_offset, &val, &timeout)) {
......@@ -2178,14 +2174,14 @@ class ThreadImpl {
HandleScope handle_scope(isolate_);
Handle<JSArrayBuffer> array_buffer(
instance_object_->memory_object().array_buffer(), isolate_);
auto result = FutexEmulation::Wait32(isolate_, array_buffer,
buffer_offset, val, timeout);
auto result = FutexEmulation::WaitWasm32(isolate_, array_buffer,
buffer_offset, val, timeout);
Push(WasmValue(result.ToSmi().value()));
break;
}
case kExprI64AtomicWait: {
int64_t val;
double timeout;
int64_t timeout;
uint32_t buffer_offset;
if (!ExtractAtomicWaitNotifyParams<int64_t>(
decoder, code, pc, len, &buffer_offset, &val, &timeout)) {
......@@ -2194,8 +2190,8 @@ class ThreadImpl {
HandleScope handle_scope(isolate_);
Handle<JSArrayBuffer> array_buffer(
instance_object_->memory_object().array_buffer(), isolate_);
auto result = FutexEmulation::Wait64(isolate_, array_buffer,
buffer_offset, val, timeout);
auto result = FutexEmulation::WaitWasm64(isolate_, array_buffer,
buffer_offset, val, timeout);
Push(WasmValue(result.ToSmi().value()));
break;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment