Commit 729235c2 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

[wasm][memory64] Prepare atomic builtins for i64 indexes

The offset (also called "address") passed to the atomic builtins was
32-bit until now. With memory64, we will have to also accept 64-bit
values there, or at least values within the allocatable memory range.
This CL thus changes the builtins to receive uintptr values instead of
uint32, and pass them on to the runtime functions as double (as before).
The runtime then casts them back to uintptr_t instead of uint32_t.

Liftoff is extended to zero-extend the uint32 value if needed. TurboFan
already passed a machine-word sized integer before.

Drive-by: Rename some "address" to "offset" to make the semantics more
clear.

R=ahaas@chromium.org
CC=​​manoskouk@chromium.org

Bug: v8:10949
Change-Id: I66968cc99a908775156c952da46d2f26219ffb58
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2489685
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70700}
parent 365948ed
......@@ -282,24 +282,32 @@ builtin WasmUint32ToNumber(value: uint32): Number {
return ChangeUint32ToTagged(value);
}
builtin UintPtr53ToNumber(value: uintptr): Number {
if (value <= kSmiMaxValue) return Convert<Smi>(Convert<intptr>(value));
const valueFloat = ChangeUintPtrToFloat64(value);
// Values need to be within [0..2^53], such that they can be represented as
// float64.
assert(ChangeFloat64ToUintPtr(valueFloat) == value);
return AllocateHeapNumberWithValue(valueFloat);
}
extern builtin I64ToBigInt(intptr): BigInt;
builtin WasmAtomicNotify(address: uint32, count: uint32): uint32 {
builtin WasmAtomicNotify(offset: uintptr, count: uint32): uint32 {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const result: Smi = runtime::WasmAtomicNotify(
LoadContextFromInstance(instance), instance, WasmUint32ToNumber(address),
LoadContextFromInstance(instance), instance, UintPtr53ToNumber(offset),
WasmUint32ToNumber(count));
return Unsigned(SmiToInt32(result));
}
builtin WasmI32AtomicWait64(
address: uint32, expectedValue: int32, timeout: intptr): uint32 {
offset: uintptr, expectedValue: int32, timeout: intptr): uint32 {
if constexpr (Is64()) {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const result: Smi = runtime::WasmI32AtomicWait(
LoadContextFromInstance(instance), instance,
WasmUint32ToNumber(address), WasmInt32ToNumber(expectedValue),
I64ToBigInt(timeout));
LoadContextFromInstance(instance), instance, UintPtr53ToNumber(offset),
WasmInt32ToNumber(expectedValue), I64ToBigInt(timeout));
return Unsigned(SmiToInt32(result));
} else {
unreachable;
......@@ -307,13 +315,12 @@ builtin WasmI32AtomicWait64(
}
builtin WasmI64AtomicWait64(
address: uint32, expectedValue: intptr, timeout: intptr): uint32 {
offset: uintptr, expectedValue: intptr, timeout: intptr): uint32 {
if constexpr (Is64()) {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const result: Smi = runtime::WasmI64AtomicWait(
LoadContextFromInstance(instance), instance,
WasmUint32ToNumber(address), I64ToBigInt(expectedValue),
I64ToBigInt(timeout));
LoadContextFromInstance(instance), instance, UintPtr53ToNumber(offset),
I64ToBigInt(expectedValue), I64ToBigInt(timeout));
return Unsigned(SmiToInt32(result));
} else {
unreachable;
......
......@@ -5226,8 +5226,9 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
graph()->NewNode(op, num_actual_inputs + 4, input_nodes));
}
// After we've bounds-checked, compute the effective address.
Node* address = gasm_->IntAdd(gasm_->UintPtrConstant(capped_offset), index);
// After we've bounds-checked, compute the effective offset.
Node* effective_offset =
gasm_->IntAdd(gasm_->UintPtrConstant(capped_offset), index);
switch (opcode) {
case wasm::kExprAtomicNotify: {
......@@ -5236,7 +5237,8 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
this, StubCallMode::kCallWasmRuntimeStub);
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmAtomicNotify, RelocInfo::WASM_STUB_CALL);
return gasm_->Call(call_descriptor, call_target, address, inputs[1]);
return gasm_->Call(call_descriptor, call_target, effective_offset,
inputs[1]);
}
case wasm::kExprI32AtomicWait: {
......@@ -5248,8 +5250,8 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
target, RelocInfo::WASM_STUB_CALL);
return gasm_->Call(call_descriptor, call_target, address, inputs[1],
inputs[2]);
return gasm_->Call(call_descriptor, call_target, effective_offset,
inputs[1], inputs[2]);
}
case wasm::kExprI64AtomicWait: {
......@@ -5261,8 +5263,8 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
target, RelocInfo::WASM_STUB_CALL);
return gasm_->Call(call_descriptor, call_target, address, inputs[1],
inputs[2]);
return gasm_->Call(call_descriptor, call_target, effective_offset,
inputs[1], inputs[2]);
}
default:
......
......@@ -255,32 +255,20 @@ RUNTIME_FUNCTION(Runtime_WasmTriggerTierUp) {
return ReadOnlyRoots(isolate).undefined_value();
}
// Should be called from within a handle scope
Handle<JSArrayBuffer> GetArrayBuffer(Handle<WasmInstanceObject> instance,
Isolate* isolate, uint32_t address) {
DCHECK(instance->has_memory_object());
Handle<JSArrayBuffer> array_buffer(instance->memory_object().array_buffer(),
isolate);
// Should have trapped if address was OOB
DCHECK_LT(address, array_buffer->byte_length());
return array_buffer;
}
RUNTIME_FUNCTION(Runtime_WasmAtomicNotify) {
ClearThreadInWasmScope clear_wasm_flag;
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_NUMBER_CHECKED(uint32_t, address, Uint32, args[1]);
CONVERT_DOUBLE_ARG_CHECKED(offset_double, 1);
uintptr_t offset = static_cast<uintptr_t>(offset_double);
CONVERT_NUMBER_CHECKED(uint32_t, count, Uint32, args[2]);
Handle<JSArrayBuffer> array_buffer =
GetArrayBuffer(instance, isolate, address);
if (array_buffer->is_shared()) {
return FutexEmulation::Wake(array_buffer, address, count);
} else {
return Smi::FromInt(0);
}
Handle<JSArrayBuffer> array_buffer{instance->memory_object().array_buffer(),
isolate};
// Should have trapped if address was OOB.
DCHECK_LT(offset, array_buffer->byte_length());
if (!array_buffer->is_shared()) return Smi::FromInt(0);
return FutexEmulation::Wake(array_buffer, offset, count);
}
RUNTIME_FUNCTION(Runtime_WasmI32AtomicWait) {
......@@ -288,18 +276,21 @@ RUNTIME_FUNCTION(Runtime_WasmI32AtomicWait) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_NUMBER_CHECKED(uint32_t, address, Uint32, args[1]);
CONVERT_DOUBLE_ARG_CHECKED(offset_double, 1);
uintptr_t offset = static_cast<uintptr_t>(offset_double);
CONVERT_NUMBER_CHECKED(int32_t, expected_value, Int32, args[2]);
CONVERT_ARG_HANDLE_CHECKED(BigInt, timeout_ns, 3);
Handle<JSArrayBuffer> array_buffer =
GetArrayBuffer(instance, isolate, address);
Handle<JSArrayBuffer> array_buffer{instance->memory_object().array_buffer(),
isolate};
// Should have trapped if address was OOB.
DCHECK_LT(offset, array_buffer->byte_length());
// Trap if memory is not shared
// Trap if memory is not shared.
if (!array_buffer->is_shared()) {
return ThrowWasmError(isolate, MessageTemplate::kAtomicsWaitNotAllowed);
}
return FutexEmulation::WaitWasm32(isolate, array_buffer, address,
return FutexEmulation::WaitWasm32(isolate, array_buffer, offset,
expected_value, timeout_ns->AsInt64());
}
......@@ -308,18 +299,21 @@ RUNTIME_FUNCTION(Runtime_WasmI64AtomicWait) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_NUMBER_CHECKED(uint32_t, address, Uint32, args[1]);
CONVERT_DOUBLE_ARG_CHECKED(offset_double, 1);
uintptr_t offset = static_cast<uintptr_t>(offset_double);
CONVERT_ARG_HANDLE_CHECKED(BigInt, expected_value, 2);
CONVERT_ARG_HANDLE_CHECKED(BigInt, timeout_ns, 3);
Handle<JSArrayBuffer> array_buffer =
GetArrayBuffer(instance, isolate, address);
Handle<JSArrayBuffer> array_buffer{instance->memory_object().array_buffer(),
isolate};
// Should have trapped if address was OOB.
DCHECK_LT(offset, array_buffer->byte_length());
// Trap if memory is not shared
// Trap if memory is not shared.
if (!array_buffer->is_shared()) {
return ThrowWasmError(isolate, MessageTemplate::kAtomicsWaitNotAllowed);
}
return FutexEmulation::WaitWasm64(isolate, array_buffer, address,
return FutexEmulation::WaitWasm64(isolate, array_buffer, offset,
expected_value->AsInt64(),
timeout_ns->AsInt64());
}
......
......@@ -675,6 +675,15 @@ class LiftoffAssembler : public TurboAssembler {
}
}
inline void emit_ptrsize_zeroextend_i32(Register dst, Register src) {
if (kSystemPointerSize == 8) {
emit_type_conversion(kExprI64UConvertI32, LiftoffRegister(dst),
LiftoffRegister(src));
} else if (dst != src) {
Move(dst, src, kWasmI32);
}
}
// f32 binops.
inline void emit_f32_add(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs);
......
......@@ -3249,13 +3249,15 @@ class LiftoffCompiler {
uint32_t offset = imm.offset;
index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
Register index_plus_offset = index_reg;
Register index_plus_offset =
__ cache_state()->is_used(LiftoffRegister(index_reg))
? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
: index_reg;
if (offset) {
if (__ cache_state()->is_used(LiftoffRegister(index_reg))) {
index_plus_offset =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
}
__ emit_i32_addi(index_plus_offset, index_reg, offset);
__ emit_ptrsize_zeroextend_i32(index_plus_offset, index_plus_offset);
} else {
__ emit_ptrsize_zeroextend_i32(index_plus_offset, index_reg);
}
LiftoffAssembler::VarState timeout =
......@@ -3296,7 +3298,7 @@ class LiftoffCompiler {
}
}
ValueType sig_reps[] = {kWasmI32, type, kWasmI64};
ValueType sig_reps[] = {kPointerValueType, type, kWasmI64};
FunctionSig sig(0, 3, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor,
......@@ -3324,16 +3326,18 @@ class LiftoffCompiler {
uint32_t offset = imm.offset;
index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
Register index_plus_offset = index_reg;
Register index_plus_offset =
__ cache_state()->is_used(LiftoffRegister(index_reg))
? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
: index_reg;
if (offset) {
if (__ cache_state()->is_used(LiftoffRegister(index_reg))) {
index_plus_offset =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
}
__ emit_i32_addi(index_plus_offset, index_reg, offset);
__ emit_ptrsize_zeroextend_i32(index_plus_offset, index_plus_offset);
} else {
__ emit_ptrsize_zeroextend_i32(index_plus_offset, index_reg);
}
ValueType sig_reps[] = {kWasmI32, kWasmI32, kWasmI32};
ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32};
FunctionSig sig(1, 2, sig_reps);
auto call_descriptor =
GetBuiltinCallDescriptor<WasmAtomicNotifyDescriptor>(compilation_zone_);
......
......@@ -58,7 +58,8 @@ CAST_ACCESSOR(WasmArray)
Object value = TaggedField<Object, offset>::load(isolate, *this); \
return !value.IsUndefined(GetReadOnlyRoots(isolate)); \
} \
ACCESSORS(holder, name, type, offset)
ACCESSORS_CHECKED2(holder, name, type, offset, \
!value.IsUndefined(GetReadOnlyRoots(isolate)), true)
#define PRIMITIVE_ACCESSORS(holder, name, type, offset) \
type holder::name() const { \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment