Commit ec5b796f authored by Jakob Kummerow's avatar Jakob Kummerow Committed by Commit Bot

[wasm-gc] Liftoff support part 6: funcrefs

This implements support for the following instructions:
ref.func, call_ref, return_call_ref

Bug: v8:7748,v8:9495
Change-Id: If5bdc2b9bc2347de056de2917430b8d9dc901c53
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2632591
Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72270}
parent 6d11bcda
......@@ -815,7 +815,6 @@ namespace internal {
TFC(WasmFloat64ToNumber, WasmFloat64ToNumber) \
TFC(WasmI32AtomicWait32, WasmI32AtomicWait32) \
TFC(WasmI64AtomicWait32, WasmI64AtomicWait32) \
TFS(WasmAllocatePair, kValue1, kValue2) \
\
/* WeakMap */ \
TFJ(WeakMapConstructor, kDontAdaptArgumentsSentinel) \
......
......@@ -107,27 +107,5 @@ TF_BUILTIN(WasmI64AtomicWait32, WasmBuiltinsAssembler) {
Return(Unsigned(SmiToInt32(result_smi)));
}
TF_BUILTIN(WasmAllocatePair, WasmBuiltinsAssembler) {
TNode<WasmInstanceObject> instance = LoadInstanceFromFrame();
TNode<HeapObject> value1 = Parameter<HeapObject>(Descriptor::kValue1);
TNode<HeapObject> value2 = Parameter<HeapObject>(Descriptor::kValue2);
TNode<IntPtrT> roots = LoadObjectField<IntPtrT>(
instance, WasmInstanceObject::kIsolateRootOffset);
TNode<Map> map = CAST(Load(
MachineType::AnyTagged(), roots,
IntPtrConstant(IsolateData::root_slot_offset(RootIndex::kTuple2Map))));
TNode<IntPtrT> instance_size =
TimesTaggedSize(LoadMapInstanceSizeInWords(map));
TNode<Tuple2> result = UncheckedCast<Tuple2>(Allocate(instance_size));
StoreMap(result, map);
StoreObjectField(result, Tuple2::kValue1Offset, value1);
StoreObjectField(result, Tuple2::kValue2Offset, value2);
Return(result);
}
} // namespace internal
} // namespace v8
......@@ -231,6 +231,11 @@ builtin WasmAllocateJSArray(implicit context: Context)(size: Smi): JSArray {
return AllocateJSArray(ElementsKind::PACKED_ELEMENTS, map, size, size);
}
builtin WasmAllocatePair(first: Object, second: Object): Tuple2 {
const tuple2Map: Map = %GetClassMapConstant<Tuple2>();
return new Tuple2{map: tuple2Map, value1: first, value2: second};
}
builtin WasmAllocateRtt(typeIndex: intptr, parent: Map): Map {
tail runtime::WasmAllocateRtt(
LoadContextFromFrame(), SmiTag(typeIndex), parent);
......
......@@ -3221,9 +3221,8 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args,
wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset));
// TODO(manoskouk): Find an elegant way to avoid allocating this pair for
// every call.
Node* function_instance_node = CALL_BUILTIN(
WasmAllocatePair, instance_node_.get(), callable,
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
Node* function_instance_node =
CALL_BUILTIN(WasmAllocatePair, instance_node_.get(), callable);
gasm_->Goto(&end_label, call_target, function_instance_node);
}
......
......@@ -90,8 +90,24 @@ class StackTransferRecipe {
DCHECK(load_dst_regs_.is_empty());
}
#if DEBUG
bool CheckCompatibleStackSlotTypes(ValueType dst, ValueType src) {
if (dst.is_object_reference_type()) {
// Since Liftoff doesn't do accurate type tracking (e.g. on loop back
// edges), we only care that pointer types stay amongst pointer types.
// It's fine if ref/optref overwrite each other.
DCHECK(src.is_object_reference_type());
// TODO(7748): Check that one type is subtype of the other?
} else {
// All other types (primitive numbers, RTTs, bottom/stmt) must be equal.
DCHECK_EQ(dst, src);
}
return true; // Dummy so this can be called via DCHECK.
}
#endif
V8_INLINE void TransferStackSlot(const VarState& dst, const VarState& src) {
DCHECK_EQ(dst.type(), src.type());
DCHECK(CheckCompatibleStackSlotTypes(dst.type(), src.type()));
if (dst.is_reg()) {
LoadIntoRegister(dst.reg(), src, src.offset());
return;
......
......@@ -561,15 +561,19 @@ class LiftoffAssembler : public TurboAssembler {
inline void LoadFixedArrayLengthAsInt32(LiftoffRegister dst, Register array,
LiftoffRegList pinned) {
int offset = FixedArray::kLengthOffset - kHeapObjectTag;
LoadTaggedSignedAsInt32(dst, array, offset, pinned);
}
inline void LoadTaggedSignedAsInt32(LiftoffRegister dst, Register src_addr,
int32_t offset, LiftoffRegList pinned) {
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
DCHECK_EQ(kSmiShiftSize + kSmiTagSize, 4 * kBitsPerByte);
offset += 4;
#endif
Load(dst, array, no_reg, offset, LoadType::kI32Load, pinned);
Load(dst, src_addr, no_reg, offset, LoadType::kI32Load, pinned);
} else {
DCHECK(SmiValuesAre31Bits());
Load(dst, array, no_reg, offset, LoadType::kI32Load, pinned);
Load(dst, src_addr, no_reg, offset, LoadType::kI32Load, pinned);
emit_i32_sari(dst.gp(), dst.gp(), kSmiTagSize);
}
}
......
This diff is collapsed.
......@@ -1000,8 +1000,9 @@ WASM_COMPILED_EXEC_TEST(ArrayNewMap) {
CHECK_EQ(Handle<WasmArray>::cast(result)->map(), *map);
}
TEST(FunctionRefs) {
WasmGCTester tester;
WASM_COMPILED_EXEC_TEST(FunctionRefs) {
WasmGCTester tester(execution_tier);
FLAG_experimental_liftoff_extern_ref = true;
const byte func_index =
tester.DefineFunction(tester.sigs.i_v(), {}, {WASM_I32V(42), kExprEnd});
const byte sig_index = 0;
......@@ -1072,8 +1073,9 @@ TEST(FunctionRefs) {
tester.CheckResult(test, 0);
}
TEST(CallRef) {
WasmGCTester tester;
WASM_COMPILED_EXEC_TEST(CallRef) {
WasmGCTester tester(execution_tier);
FLAG_experimental_liftoff_extern_ref = true;
byte callee = tester.DefineFunction(
tester.sigs.i_ii(), {},
{WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(1)), kExprEnd});
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment