Commit 9945e908 authored by Sathya Gunasekaran's avatar Sathya Gunasekaran Committed by Commit Bot

Revert "[wasm] Refactor AtomicWait implementation"

This reverts commit 77d4e230.

Reason for revert: verify csa build bot broken
https://ci.chromium.org/p/v8/builders/ci/V8%20Linux%20-%20verify%20csa/16218?

Original change's description:
> [wasm] Refactor AtomicWait implementation
> 
> The existing implementation included aspects that are not
> straight-forward to implement in Liftoff and seemed inefficient:
> * Convert the timeout in WebAssembly code from I64 to F64, just to
>   convert it back in the runtime.
>   * On 32-bit platforms this conversion needs an additional C-call.
> * Split the I64 expected value from I64 into two I32 values in the
>   wasm-compiler.
>   * Ideally the int64-lowering takes care of 32-bit specific handling.
> 
> With this CL the timeout and the expected value are passed as I64 to
> the runtime (a builtin moves the I64 into a bigint for that). The
> int64-lowering takes care of 32-bit platforms. There are special
> builtins for 32-bit platforms, but they are written such that ideally
> also the int64-lowering could create them.
> 
> R=​jkummerow@chromium.org, binji@chromium.org
> 
> Bug: v8:10108
> Change-Id: I2dbba5839779961b1c5bde4c23fc3f38f1895a52
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2071867
> Commit-Queue: Andreas Haas <ahaas@chromium.org>
> Reviewed-by: Clemens Backes <clemensb@chromium.org>
> Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
> Reviewed-by: Ben Smith <binji@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#66497}

TBR=binji@chromium.org,jkummerow@chromium.org,ahaas@chromium.org,clemensb@chromium.org

Change-Id: If284aa07eedddd2fbea4df8c53c7d371cac1d42e
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:10108
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2080250Reviewed-by: 's avatarSathya Gunasekaran  <gsathya@chromium.org>
Commit-Queue: Sathya Gunasekaran  <gsathya@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66498}
parent 77d4e230
......@@ -852,10 +852,8 @@ namespace internal {
ASM(WasmCompileLazy, Dummy) \
ASM(WasmDebugBreak, Dummy) \
TFC(WasmAtomicNotify, WasmAtomicNotify) \
TFC(WasmI32AtomicWait32, WasmI32AtomicWait32) \
TFC(WasmI32AtomicWait64, WasmI32AtomicWait64) \
TFC(WasmI64AtomicWait32, WasmI64AtomicWait32) \
TFC(WasmI64AtomicWait64, WasmI64AtomicWait64) \
TFC(WasmI32AtomicWait, WasmI32AtomicWait) \
TFC(WasmI64AtomicWait, WasmI64AtomicWait) \
TFC(WasmMemoryGrow, WasmMemoryGrow) \
TFC(WasmTableGet, WasmTableGet) \
TFC(WasmTableSet, WasmTableSet) \
......
......@@ -79,118 +79,48 @@ TF_BUILTIN(WasmAtomicNotify, WasmBuiltinsAssembler) {
Return(Unsigned(SmiToInt32(result_smi)));
}
TF_BUILTIN(WasmI32AtomicWait32, WasmBuiltinsAssembler) {
if (!Is32()) {
Unreachable();
return;
}
TF_BUILTIN(WasmI32AtomicWait, WasmBuiltinsAssembler) {
TNode<Uint32T> address =
UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
TNode<Number> address_number = ChangeUint32ToTagged(address);
TNode<Int32T> expected_value =
UncheckedCast<Int32T>(Parameter(Descriptor::kExpectedValue));
TNode<Number> expected_value_number = ChangeInt32ToTagged(expected_value);
TNode<IntPtrT> timeout_low =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutLow));
TNode<IntPtrT> timeout_high =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutHigh));
TNode<BigInt> timeout = BigIntFromInt32Pair(timeout_low, timeout_high);
TNode<Float64T> timeout =
UncheckedCast<Float64T>(Parameter(Descriptor::kTimeout));
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
TNode<Context> context = LoadContextFromInstance(instance);
TNode<Smi> result_smi =
CAST(CallRuntime(Runtime::kWasmI32AtomicWait, context, instance,
address_number, expected_value_number, timeout));
Return(Unsigned(SmiToInt32(result_smi)));
}
TF_BUILTIN(WasmI32AtomicWait64, WasmBuiltinsAssembler) {
if (!Is64()) {
Unreachable();
return;
}
TNode<Uint32T> address =
UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
TNode<Number> address_number = ChangeUint32ToTagged(address);
TNode<Int32T> expected_value =
UncheckedCast<Int32T>(Parameter(Descriptor::kExpectedValue));
TNode<Number> expected_value_number = ChangeInt32ToTagged(expected_value);
TNode<IntPtrT> timeout_raw =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeout));
TNode<BigInt> timeout = BigIntFromInt64(timeout_raw);
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
TNode<Number> timeout_number = ChangeFloat64ToTagged(timeout);
TNode<Context> context = LoadContextFromInstance(instance);
TNode<Smi> result_smi =
CAST(CallRuntime(Runtime::kWasmI32AtomicWait, context, instance,
address_number, expected_value_number, timeout));
address_number, expected_value_number, timeout_number));
Return(Unsigned(SmiToInt32(result_smi)));
}
TF_BUILTIN(WasmI64AtomicWait32, WasmBuiltinsAssembler) {
if (!Is32()) {
Unreachable();
return;
}
TF_BUILTIN(WasmI64AtomicWait, WasmBuiltinsAssembler) {
TNode<Uint32T> address =
UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
TNode<Number> address_number = ChangeUint32ToTagged(address);
TNode<IntPtrT> expected_value_low =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kExpectedValueLow));
TNode<IntPtrT> expected_value_high =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kExpectedValueHigh));
TNode<BigInt> expected_value =
BigIntFromInt32Pair(expected_value_low, expected_value_high);
TNode<IntPtrT> timeout_low =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutLow));
TNode<IntPtrT> timeout_high =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeoutHigh));
TNode<BigInt> timeout = BigIntFromInt32Pair(timeout_low, timeout_high);
TNode<Uint32T> expected_value_high =
UncheckedCast<Uint32T>(Parameter(Descriptor::kExpectedValueHigh));
TNode<Uint32T> expected_value_low =
UncheckedCast<Uint32T>(Parameter(Descriptor::kExpectedValueLow));
TNode<Float64T> timeout =
UncheckedCast<Float64T>(Parameter(Descriptor::kTimeout));
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
TNode<Context> context = LoadContextFromInstance(instance);
TNode<Smi> result_smi =
CAST(CallRuntime(Runtime::kWasmI64AtomicWait, context, instance,
address_number, expected_value, timeout));
Return(Unsigned(SmiToInt32(result_smi)));
}
TF_BUILTIN(WasmI64AtomicWait64, WasmBuiltinsAssembler) {
if (!Is64()) {
Unreachable();
return;
}
TNode<Uint32T> address =
UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
TNode<Number> address_number = ChangeUint32ToTagged(address);
TNode<IntPtrT> expected_value_raw =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kExpectedValue));
TNode<BigInt> expected_value = BigIntFromInt64(expected_value_raw);
TNode<IntPtrT> timeout_raw =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kTimeout));
TNode<BigInt> timeout = BigIntFromInt64(timeout_raw);
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
TNode<Number> expected_value_high_number =
ChangeUint32ToTagged(expected_value_high);
TNode<Number> expected_value_low_number =
ChangeUint32ToTagged(expected_value_low);
TNode<Number> timeout_number = ChangeFloat64ToTagged(timeout);
TNode<Context> context = LoadContextFromInstance(instance);
TNode<Smi> result_smi =
CAST(CallRuntime(Runtime::kWasmI64AtomicWait, context, instance,
address_number, expected_value, timeout));
TNode<Smi> result_smi = CAST(CallRuntime(
Runtime::kWasmI64AtomicWait, context, instance, address_number,
expected_value_high_number, expected_value_low_number, timeout_number));
Return(Unsigned(SmiToInt32(result_smi)));
}
......
......@@ -401,23 +401,12 @@ void WasmAtomicNotifyDescriptor::InitializePlatformSpecific(
}
#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64)
void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
void WasmI32AtomicWaitDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
}
void WasmI32AtomicWait64Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
}
void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data,
kParameterCount - kStackArgumentsCount);
}
void WasmI64AtomicWait64Descriptor::InitializePlatformSpecific(
void WasmI64AtomicWaitDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
}
......
......@@ -91,10 +91,8 @@ namespace internal {
V(Typeof) \
V(Void) \
V(WasmAtomicNotify) \
V(WasmI32AtomicWait32) \
V(WasmI32AtomicWait64) \
V(WasmI64AtomicWait32) \
V(WasmI64AtomicWait64) \
V(WasmI32AtomicWait) \
V(WasmI64AtomicWait) \
V(WasmMemoryGrow) \
V(WasmTableGet) \
V(WasmTableSet) \
......@@ -456,22 +454,18 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
CallInterfaceDescriptorData::kAllowVarArgs, 1, ##__VA_ARGS__)
#define DEFINE_RESULT_AND_PARAMETER_TYPES_WITH_FLAG(flag, ...) \
void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
override { \
MachineType machine_types[] = {__VA_ARGS__}; \
static_assert( \
kReturnCount + kParameterCount == arraysize(machine_types), \
"Parameter names definition is not consistent with parameter types"); \
data->InitializePlatformIndependent( \
Flags(flag | kDescriptorFlags), kReturnCount, kParameterCount, \
machine_types, arraysize(machine_types)); \
#define DEFINE_RESULT_AND_PARAMETER_TYPES(...) \
void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
override { \
MachineType machine_types[] = {__VA_ARGS__}; \
static_assert( \
kReturnCount + kParameterCount == arraysize(machine_types), \
"Parameter names definition is not consistent with parameter types"); \
data->InitializePlatformIndependent(Flags(kDescriptorFlags), kReturnCount, \
kParameterCount, machine_types, \
arraysize(machine_types)); \
}
#define DEFINE_RESULT_AND_PARAMETER_TYPES(...) \
DEFINE_RESULT_AND_PARAMETER_TYPES_WITH_FLAG( \
CallInterfaceDescriptorData::kNoFlags, __VA_ARGS__)
#define DEFINE_PARAMETER_TYPES(...) \
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged() /* result */, \
##__VA_ARGS__)
......@@ -1367,62 +1361,27 @@ class WasmAtomicNotifyDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(WasmAtomicNotifyDescriptor, CallInterfaceDescriptor)
};
class WasmI32AtomicWait32Descriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeoutLow,
kTimeoutHigh)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
MachineType::Uint32(), // kAddress
MachineType::Int32(), // kExpectedValue
MachineType::Uint32(), // kTimeoutLow
MachineType::Uint32()) // kTimeoutHigh
DECLARE_DESCRIPTOR(WasmI32AtomicWait32Descriptor, CallInterfaceDescriptor)
};
class WasmI64AtomicWait32Descriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValueLow, kExpectedValueHigh,
kTimeoutLow, kTimeoutHigh)
DEFINE_RESULT_AND_PARAMETER_TYPES_WITH_FLAG(
CallInterfaceDescriptorData::kNoStackScan, // allow untagged stack params
MachineType::Uint32(), // result 1
MachineType::Uint32(), // kAddress
MachineType::Uint32(), // kExpectedValueLow
MachineType::Uint32(), // kExpectedValueHigh
MachineType::Uint32(), // kTimeoutLow
MachineType::Uint32()) // kTimeoutHigh
#if V8_TARGET_ARCH_IA32
static constexpr bool kPassLastArgOnStack = true;
#else
static constexpr bool kPassLastArgOnStack = false;
#endif
// Pass the last parameter through the stack.
static constexpr int kStackArgumentsCount = kPassLastArgOnStack ? 1 : 0;
DECLARE_DESCRIPTOR(WasmI64AtomicWait32Descriptor, CallInterfaceDescriptor)
};
class WasmI32AtomicWait64Descriptor final : public CallInterfaceDescriptor {
class WasmI32AtomicWaitDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeout)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
MachineType::Uint32(), // kAddress
MachineType::Int32(), // kExpectedValue
MachineType::Uint64()) // kTimeout
DECLARE_DESCRIPTOR(WasmI32AtomicWait64Descriptor, CallInterfaceDescriptor)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
MachineType::Uint32(), // kAddress
MachineType::Int32(), // kExpectedValue
MachineType::Float64()) // kTimeout
DECLARE_DESCRIPTOR(WasmI32AtomicWaitDescriptor, CallInterfaceDescriptor)
};
class WasmI64AtomicWait64Descriptor final : public CallInterfaceDescriptor {
class WasmI64AtomicWaitDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeout)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
MachineType::Uint32(), // kAddress
MachineType::Uint64(), // kExpectedValue
MachineType::Uint64()) // kTimeout
DECLARE_DESCRIPTOR(WasmI64AtomicWait64Descriptor, CallInterfaceDescriptor)
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValueHigh, kExpectedValueLow,
kTimeout)
DEFINE_RESULT_AND_PARAMETER_TYPES(
MachineType::Uint32(), // result 1
MachineType::Uint32(), // kAddress
MachineType::Uint32(), // kExpectedValueHigh
MachineType::Uint32(), // kExpectedValueLow
MachineType::Float64()) // kTimeout
DECLARE_DESCRIPTOR(WasmI64AtomicWaitDescriptor, CallInterfaceDescriptor)
};
class CloneObjectWithVectorDescriptor final : public CallInterfaceDescriptor {
......
......@@ -4061,60 +4061,8 @@ Signature<MachineRepresentation>* CreateMachineSignature(
}
return builder.Build();
}
template <typename BuiltinDescriptor>
CallDescriptor* GetBuiltinCallDescriptor(WasmGraphBuilder* builder,
StubCallMode stub_mode) {
BuiltinDescriptor interface_descriptor;
return Linkage::GetStubCallDescriptor(
builder->mcgraph()->zone(), // zone
interface_descriptor, // descriptor
interface_descriptor.GetStackParameterCount(), // stack parameter count
CallDescriptor::kNoFlags, // flags
Operator::kNoProperties, // properties
stub_mode); // stub call mode
}
} // namespace
void WasmGraphBuilder::AddInt64LoweringReplacement(
CallDescriptor* original, CallDescriptor* replacement) {
if (!lowering_special_case_) {
lowering_special_case_ = std::make_unique<Int64LoweringSpecialCase>();
}
lowering_special_case_->replacements.insert({original, replacement});
}
CallDescriptor* WasmGraphBuilder::GetI32AtomicWaitCallDescriptor() {
if (i32_atomic_wait_descriptor_) return i32_atomic_wait_descriptor_;
i32_atomic_wait_descriptor_ =
GetBuiltinCallDescriptor<WasmI32AtomicWait64Descriptor>(
this, StubCallMode::kCallWasmRuntimeStub);
AddInt64LoweringReplacement(
i32_atomic_wait_descriptor_,
GetBuiltinCallDescriptor<WasmI32AtomicWait32Descriptor>(
this, StubCallMode::kCallWasmRuntimeStub));
return i32_atomic_wait_descriptor_;
}
CallDescriptor* WasmGraphBuilder::GetI64AtomicWaitCallDescriptor() {
if (i64_atomic_wait_descriptor_) return i64_atomic_wait_descriptor_;
i64_atomic_wait_descriptor_ =
GetBuiltinCallDescriptor<WasmI64AtomicWait64Descriptor>(
this, StubCallMode::kCallWasmRuntimeStub);
AddInt64LoweringReplacement(
i64_atomic_wait_descriptor_,
GetBuiltinCallDescriptor<WasmI64AtomicWait32Descriptor>(
this, StubCallMode::kCallWasmRuntimeStub));
return i64_atomic_wait_descriptor_;
}
void WasmGraphBuilder::LowerInt64(CallOrigin origin) {
if (mcgraph()->machine()->Is64()) return;
Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(), mcgraph()->common(),
......@@ -4856,17 +4804,23 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
// Now that we've bounds-checked, compute the effective address.
Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
Uint32Constant(offset), index);
auto call_descriptor = GetI32AtomicWaitCallDescriptor();
intptr_t target = mcgraph()->machine()->Is64()
? wasm::WasmCode::kWasmI32AtomicWait64
: wasm::WasmCode::kWasmI32AtomicWait32;
Node* timeout;
if (mcgraph()->machine()->Is32()) {
timeout = BuildF64SConvertI64(inputs[2]);
} else {
timeout = graph()->NewNode(mcgraph()->machine()->RoundInt64ToFloat64(),
inputs[2]);
}
WasmI32AtomicWaitDescriptor interface_descriptor;
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), interface_descriptor,
interface_descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
StubCallMode::kCallWasmRuntimeStub);
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
target, RelocInfo::WASM_STUB_CALL);
wasm::WasmCode::kWasmI32AtomicWait, RelocInfo::WASM_STUB_CALL);
node = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
call_target, address, inputs[1], inputs[2],
call_target, address, inputs[1], timeout,
effect(), control());
break;
}
......@@ -4878,18 +4832,30 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
// Now that we've bounds-checked, compute the effective address.
Node* address = graph()->NewNode(mcgraph()->machine()->Int32Add(),
Uint32Constant(offset), index);
CallDescriptor* call_descriptor = GetI64AtomicWaitCallDescriptor();
intptr_t target = mcgraph()->machine()->Is64()
? wasm::WasmCode::kWasmI64AtomicWait64
: wasm::WasmCode::kWasmI64AtomicWait32;
Node* timeout;
if (mcgraph()->machine()->Is32()) {
timeout = BuildF64SConvertI64(inputs[2]);
} else {
timeout = graph()->NewNode(mcgraph()->machine()->RoundInt64ToFloat64(),
inputs[2]);
}
Node* expected_value_low = graph()->NewNode(
mcgraph()->machine()->TruncateInt64ToInt32(), inputs[1]);
Node* tmp = graph()->NewNode(mcgraph()->machine()->Word64Shr(), inputs[1],
Int64Constant(32));
Node* expected_value_high =
graph()->NewNode(mcgraph()->machine()->TruncateInt64ToInt32(), tmp);
WasmI64AtomicWaitDescriptor interface_descriptor;
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), interface_descriptor,
interface_descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties,
StubCallMode::kCallWasmRuntimeStub);
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
target, RelocInfo::WASM_STUB_CALL);
wasm::WasmCode::kWasmI64AtomicWait, RelocInfo::WASM_STUB_CALL);
node = graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
call_target, address, inputs[1], inputs[2],
effect(), control());
call_target, address, expected_value_high,
expected_value_low, timeout, effect(), control());
break;
}
......@@ -5120,6 +5086,18 @@ void WasmGraphBuilder::RemoveBytecodePositionDecorator() {
}
namespace {
template <typename BuiltinDescriptor>
CallDescriptor* GetBuiltinCallDescriptor(WasmGraphBuilder* builder,
StubCallMode stub_mode) {
BuiltinDescriptor interface_descriptor;
return Linkage::GetStubCallDescriptor(
builder->mcgraph()->zone(), // zone
interface_descriptor, // descriptor
interface_descriptor.GetStackParameterCount(), // stack parameter count
CallDescriptor::kNoFlags, // flags
Operator::kNoProperties, // properties
stub_mode); // stub call mode
}
class WasmWrapperGraphBuilder : public WasmGraphBuilder {
public:
......@@ -5133,25 +5111,31 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
CallDescriptor* GetI64ToBigIntCallDescriptor() {
if (i64_to_bigint_descriptor_) return i64_to_bigint_descriptor_;
i64_to_bigint_descriptor_ =
GetBuiltinCallDescriptor<I64ToBigIntDescriptor>(this, stub_mode_);
AddInt64LoweringReplacement(
i64_to_bigint_descriptor_,
GetBuiltinCallDescriptor<I32PairToBigIntDescriptor>(this, stub_mode_));
if (!lowering_special_case_) {
lowering_special_case_ = std::make_unique<Int64LoweringSpecialCase>();
}
lowering_special_case_->replacements.insert(
{i64_to_bigint_descriptor_,
GetBuiltinCallDescriptor<I32PairToBigIntDescriptor>(this,
stub_mode_)});
return i64_to_bigint_descriptor_;
}
CallDescriptor* GetBigIntToI64CallDescriptor() {
if (bigint_to_i64_descriptor_) return bigint_to_i64_descriptor_;
if (!lowering_special_case_) {
lowering_special_case_ = std::make_unique<Int64LoweringSpecialCase>();
}
bigint_to_i64_descriptor_ =
GetBuiltinCallDescriptor<BigIntToI64Descriptor>(this, stub_mode_);
AddInt64LoweringReplacement(
bigint_to_i64_descriptor_,
GetBuiltinCallDescriptor<BigIntToI32PairDescriptor>(this, stub_mode_));
lowering_special_case_->replacements.insert(
{bigint_to_i64_descriptor_,
GetBuiltinCallDescriptor<BigIntToI32PairDescriptor>(this,
stub_mode_)});
return bigint_to_i64_descriptor_;
}
......
......@@ -558,13 +558,6 @@ class WasmGraphBuilder {
Node** parameters, int parameter_count);
TrapId GetTrapIdForTrap(wasm::TrapReason reason);
void AddInt64LoweringReplacement(CallDescriptor* original,
CallDescriptor* replacement);
CallDescriptor* GetI32AtomicWaitCallDescriptor();
CallDescriptor* GetI64AtomicWaitCallDescriptor();
std::unique_ptr<WasmGraphAssembler> gasm_;
Zone* const zone_;
MachineGraph* const mcgraph_;
......@@ -590,8 +583,6 @@ class WasmGraphBuilder {
compiler::SourcePositionTable* const source_position_table_ = nullptr;
std::unique_ptr<Int64LoweringSpecialCase> lowering_special_case_;
CallDescriptor* i32_atomic_wait_descriptor_ = nullptr;
CallDescriptor* i64_atomic_wait_descriptor_ = nullptr;
};
enum WasmCallKind { kWasmFunction, kWasmImportWrapper, kWasmCapiFunction };
......
......@@ -106,79 +106,55 @@ Object WaitJsTranslateReturn(Isolate* isolate, Object res) {
Object FutexEmulation::WaitJs32(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
int32_t value, double rel_timeout_ms) {
Object res =
Wait<int32_t>(isolate, array_buffer, addr, value, rel_timeout_ms);
Object res = Wait32(isolate, array_buffer, addr, value, rel_timeout_ms);
return WaitJsTranslateReturn(isolate, res);
}
Object FutexEmulation::WaitJs64(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
int64_t value, double rel_timeout_ms) {
Object res =
Wait<int64_t>(isolate, array_buffer, addr, value, rel_timeout_ms);
Object res = Wait64(isolate, array_buffer, addr, value, rel_timeout_ms);
return WaitJsTranslateReturn(isolate, res);
}
Object FutexEmulation::WaitWasm32(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer,
size_t addr, int32_t value,
int64_t rel_timeout_ns) {
return Wait<int32_t>(isolate, array_buffer, addr, value, rel_timeout_ns >= 0,
rel_timeout_ns);
Object FutexEmulation::Wait32(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
int32_t value, double rel_timeout_ms) {
return Wait<int32_t>(isolate, array_buffer, addr, value, rel_timeout_ms);
}
Object FutexEmulation::WaitWasm64(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer,
size_t addr, int64_t value,
int64_t rel_timeout_ns) {
return Wait<int64_t>(isolate, array_buffer, addr, value, rel_timeout_ns >= 0,
rel_timeout_ns);
Object FutexEmulation::Wait64(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
int64_t value, double rel_timeout_ms) {
return Wait<int64_t>(isolate, array_buffer, addr, value, rel_timeout_ms);
}
template <typename T>
Object FutexEmulation::Wait(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
T value, double rel_timeout_ms) {
VMState<ATOMICS_WAIT> state(isolate);
DCHECK_LT(addr, array_buffer->byte_length());
bool use_timeout = rel_timeout_ms != V8_INFINITY;
int64_t rel_timeout_ns = -1;
base::TimeDelta rel_timeout;
if (use_timeout) {
// Convert to nanoseconds.
double timeout_ns = rel_timeout_ms *
base::Time::kNanosecondsPerMicrosecond *
base::Time::kMicrosecondsPerMillisecond;
if (timeout_ns > static_cast<double>(std::numeric_limits<int64_t>::max())) {
double rel_timeout_ns = rel_timeout_ms *
base::Time::kNanosecondsPerMicrosecond *
base::Time::kMicrosecondsPerMillisecond;
if (rel_timeout_ns >
static_cast<double>(std::numeric_limits<int64_t>::max())) {
// 2**63 nanoseconds is 292 years. Let's just treat anything greater as
// infinite.
use_timeout = false;
} else {
rel_timeout_ns = static_cast<int64_t>(timeout_ns);
rel_timeout = base::TimeDelta::FromNanoseconds(
static_cast<int64_t>(rel_timeout_ns));
}
}
return Wait(isolate, array_buffer, addr, value, use_timeout, rel_timeout_ns);
}
namespace {
double WaitTimeoutInMs(double timeout_ns) {
return timeout_ns < 0
? V8_INFINITY
: timeout_ns / (base::Time::kNanosecondsPerMicrosecond *
base::Time::kMicrosecondsPerMillisecond);
}
} // namespace
template <typename T>
Object FutexEmulation::Wait(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
T value, bool use_timeout, int64_t rel_timeout_ns) {
VMState<ATOMICS_WAIT> state(isolate);
base::TimeDelta rel_timeout =
base::TimeDelta::FromNanoseconds(rel_timeout_ns);
// We have to convert the timeout back to double for the AtomicsWaitCallback.
double rel_timeout_ms = WaitTimeoutInMs(static_cast<double>(rel_timeout_ns));
AtomicsWaitWakeHandle stop_handle(isolate);
isolate->RunAtomicsWaitCallback(AtomicsWaitEvent::kStartWait, array_buffer,
......
......@@ -126,13 +126,13 @@ class FutexEmulation : public AllStatic {
// Same as WaitJs above except it returns 0 (ok), 1 (not equal) and 2 (timed
// out) as expected by Wasm.
static Object WaitWasm32(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
size_t addr, int32_t value, int64_t rel_timeout_ns);
static Object Wait32(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
size_t addr, int32_t value, double rel_timeout_ms);
// Same as Wait32 above except it checks for an int64_t value in the
// array_buffer.
static Object WaitWasm64(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
size_t addr, int64_t value, int64_t rel_timeout_ns);
static Object Wait64(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
size_t addr, int64_t value, double rel_timeout_ms);
// Wake |num_waiters_to_wake| threads that are waiting on the given |addr|.
// |num_waiters_to_wake| can be kWakeAll, in which case all waiters are
......@@ -154,11 +154,6 @@ class FutexEmulation : public AllStatic {
static Object Wait(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
size_t addr, T value, double rel_timeout_ms);
template <typename T>
static Object Wait(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
size_t addr, T value, bool use_timeout,
int64_t rel_timeout_ns);
// `mutex_` protects the composition of `wait_list_` (i.e. no elements may be
// added or removed without holding this mutex), as well as the `waiting_`
// and `interrupted_` fields for each individual list node that is currently
......
......@@ -391,6 +391,13 @@ RUNTIME_FUNCTION(Runtime_WasmAtomicNotify) {
return FutexEmulation::Wake(array_buffer, address, count);
}
double WaitTimeoutInMs(double timeout_ns) {
return timeout_ns < 0
? V8_INFINITY
: timeout_ns / (base::Time::kNanosecondsPerMicrosecond *
base::Time::kMicrosecondsPerMillisecond);
}
RUNTIME_FUNCTION(Runtime_WasmI32AtomicWait) {
ClearThreadInWasmScope clear_wasm_flag;
HandleScope scope(isolate);
......@@ -398,28 +405,30 @@ RUNTIME_FUNCTION(Runtime_WasmI32AtomicWait) {
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_NUMBER_CHECKED(uint32_t, address, Uint32, args[1]);
CONVERT_NUMBER_CHECKED(int32_t, expected_value, Int32, args[2]);
CONVERT_ARG_HANDLE_CHECKED(BigInt, timeout_ns, 3);
CONVERT_DOUBLE_ARG_CHECKED(timeout_ns, 3);
double timeout_ms = WaitTimeoutInMs(timeout_ns);
Handle<JSArrayBuffer> array_buffer =
getSharedArrayBuffer(instance, isolate, address);
return FutexEmulation::WaitWasm32(isolate, array_buffer, address,
expected_value, timeout_ns->AsInt64());
return FutexEmulation::Wait32(isolate, array_buffer, address, expected_value,
timeout_ms);
}
RUNTIME_FUNCTION(Runtime_WasmI64AtomicWait) {
ClearThreadInWasmScope clear_wasm_flag;
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
DCHECK_EQ(5, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_NUMBER_CHECKED(uint32_t, address, Uint32, args[1]);
CONVERT_ARG_HANDLE_CHECKED(BigInt, expected_value, 2);
CONVERT_ARG_HANDLE_CHECKED(BigInt, timeout_ns, 3);
CONVERT_NUMBER_CHECKED(uint32_t, expected_value_high, Uint32, args[2]);
CONVERT_NUMBER_CHECKED(uint32_t, expected_value_low, Uint32, args[3]);
CONVERT_DOUBLE_ARG_CHECKED(timeout_ns, 4);
int64_t expected_value = (static_cast<uint64_t>(expected_value_high) << 32) |
static_cast<uint64_t>(expected_value_low);
double timeout_ms = WaitTimeoutInMs(timeout_ns);
Handle<JSArrayBuffer> array_buffer =
getSharedArrayBuffer(instance, isolate, address);
return FutexEmulation::WaitWasm64(isolate, array_buffer, address,
expected_value->AsInt64(),
timeout_ns->AsInt64());
return FutexEmulation::Wait64(isolate, array_buffer, address, expected_value,
timeout_ms);
}
namespace {
......
......@@ -50,10 +50,8 @@ struct WasmModule;
V(WasmCompileLazy) \
V(WasmDebugBreak) \
V(WasmAtomicNotify) \
V(WasmI32AtomicWait32) \
V(WasmI32AtomicWait64) \
V(WasmI64AtomicWait32) \
V(WasmI64AtomicWait64) \
V(WasmI32AtomicWait) \
V(WasmI64AtomicWait) \
V(WasmMemoryGrow) \
V(WasmTableGet) \
V(WasmTableSet) \
......
......@@ -1752,11 +1752,15 @@ class ThreadImpl {
bool ExtractAtomicWaitNotifyParams(Decoder* decoder, InterpreterCode* code,
pc_t pc, int* const len,
uint32_t* buffer_offset, type* val,
int64_t* timeout = nullptr) {
double* timeout = nullptr) {
MemoryAccessImmediate<Decoder::kValidate> imm(decoder, code->at(pc + 1),
sizeof(type));
if (timeout) {
*timeout = Pop().to<int64_t>();
double timeout_ns = Pop().to<int64_t>();
*timeout = (timeout_ns < 0)
? V8_INFINITY
: timeout_ns / (base::Time::kNanosecondsPerMicrosecond *
base::Time::kMicrosecondsPerMillisecond);
}
*val = Pop().to<type>();
auto index = Pop().to<uint32_t>();
......@@ -2165,7 +2169,7 @@ class ThreadImpl {
break;
case kExprI32AtomicWait: {
int32_t val;
int64_t timeout;
double timeout;
uint32_t buffer_offset;
if (!ExtractAtomicWaitNotifyParams<int32_t>(
decoder, code, pc, len, &buffer_offset, &val, &timeout)) {
......@@ -2174,14 +2178,14 @@ class ThreadImpl {
HandleScope handle_scope(isolate_);
Handle<JSArrayBuffer> array_buffer(
instance_object_->memory_object().array_buffer(), isolate_);
auto result = FutexEmulation::WaitWasm32(isolate_, array_buffer,
buffer_offset, val, timeout);
auto result = FutexEmulation::Wait32(isolate_, array_buffer,
buffer_offset, val, timeout);
Push(WasmValue(result.ToSmi().value()));
break;
}
case kExprI64AtomicWait: {
int64_t val;
int64_t timeout;
double timeout;
uint32_t buffer_offset;
if (!ExtractAtomicWaitNotifyParams<int64_t>(
decoder, code, pc, len, &buffer_offset, &val, &timeout)) {
......@@ -2190,8 +2194,8 @@ class ThreadImpl {
HandleScope handle_scope(isolate_);
Handle<JSArrayBuffer> array_buffer(
instance_object_->memory_object().array_buffer(), isolate_);
auto result = FutexEmulation::WaitWasm64(isolate_, array_buffer,
buffer_offset, val, timeout);
auto result = FutexEmulation::Wait64(isolate_, array_buffer,
buffer_offset, val, timeout);
Push(WasmValue(result.ToSmi().value()));
break;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment