Commit bee5992a authored by Jakob Kummerow's avatar Jakob Kummerow Committed by Commit Bot

[wasm-gc] Initial Liftoff support

This CL implements Liftoff support for struct.get/set,
struct.new_with_rtt, rtt.canon, and ref.is_null, which
is enough to make the first testcase pass.

Bug: v8:7748
Change-Id: Id09e9872d2126127192c852b3cb6d57ff9417582
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2584951
Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71744}
parent 1156c90f
......@@ -236,8 +236,7 @@ builtin WasmAllocateRtt(implicit context: Context)(
tail runtime::WasmAllocateRtt(context, typeIndex, parent);
}
builtin WasmAllocateStructWithRtt(implicit context: Context)(rtt: Map):
HeapObject {
builtin WasmAllocateStructWithRtt(rtt: Map): HeapObject {
const instanceSize: intptr =
unsafe::TimesTaggedSize(Convert<intptr>(rtt.instance_size_in_words));
const result: HeapObject = unsafe::Allocate(instanceSize);
......
......@@ -5625,9 +5625,7 @@ Node* WasmGraphBuilder::TableFill(uint32_t table_index, Node* start,
Node* WasmGraphBuilder::StructNewWithRtt(uint32_t struct_index,
const wasm::StructType* type,
Node* rtt, Vector<Node*> fields) {
Node* s = CALL_BUILTIN(
WasmAllocateStructWithRtt, rtt,
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
Node* s = CALL_BUILTIN(WasmAllocateStructWithRtt, rtt);
for (uint32_t i = 0; i < type->field_count(); i++) {
gasm_->StoreStructField(s, type, i, fields[i]);
}
......
......@@ -303,6 +303,7 @@ inline void Store(LiftoffAssembler* assm, LiftoffRegister src, MemOperand dst,
case ValueType::kI32:
case ValueType::kOptRef:
case ValueType::kRef:
case ValueType::kRtt:
assm->str(src.gp(), dst);
break;
case ValueType::kI64:
......@@ -336,6 +337,7 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
case ValueType::kI32:
case ValueType::kOptRef:
case ValueType::kRef:
case ValueType::kRtt:
assm->ldr(dst.gp(), src);
break;
case ValueType::kI64:
......@@ -2155,10 +2157,14 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
Label* label, ValueType type,
Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond);
DCHECK_EQ(type, kWasmI32);
if (rhs == no_reg) {
DCHECK_EQ(type, kWasmI32);
cmp(lhs, Operand(0));
} else {
DCHECK(type == kWasmI32 ||
(type.is_reference_type() &&
(liftoff_cond == kEqual || liftoff_cond == kUnequal)));
cmp(lhs, rhs);
}
b(label, cond);
......
......@@ -79,6 +79,7 @@ inline CPURegister GetRegFromType(const LiftoffRegister& reg, ValueType type) {
case ValueType::kI64:
case ValueType::kRef:
case ValueType::kOptRef:
case ValueType::kRtt:
return reg.gp().X();
case ValueType::kF32:
return reg.fp().S();
......@@ -1452,6 +1453,12 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
Cmp(lhs.W(), wzr);
}
break;
case ValueType::kRef:
case ValueType::kOptRef:
case ValueType::kRtt:
DCHECK(rhs.is_valid());
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
V8_FALLTHROUGH;
case ValueType::kI64:
if (rhs.is_valid()) {
Cmp(lhs.X(), rhs.X());
......
......@@ -1121,6 +1121,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
case ValueType::kI32:
case ValueType::kOptRef:
case ValueType::kRef:
case ValueType::kRtt:
mov(dst, reg.gp());
break;
case ValueType::kI64:
......@@ -2372,6 +2373,11 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs != no_reg) {
switch (type.kind()) {
case ValueType::kRef:
case ValueType::kOptRef:
case ValueType::kRtt:
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
V8_FALLTHROUGH;
case ValueType::kI32:
cmp(lhs, rhs);
break;
......
......@@ -730,6 +730,15 @@ class LiftoffAssembler : public TurboAssembler {
}
}
inline void emit_ptrsize_set_cond(LiftoffCondition condition, Register dst,
LiftoffRegister lhs, LiftoffRegister rhs) {
if (kSystemPointerSize == 8) {
emit_i64_set_cond(condition, dst, lhs, rhs);
} else {
emit_i32_set_cond(condition, dst, lhs.gp(), rhs.gp());
}
}
inline void emit_ptrsize_zeroextend_i32(Register dst, Register src) {
if (kSystemPointerSize == 8) {
emit_type_conversion(kExprI64UConvertI32, LiftoffRegister(dst),
......
......@@ -652,16 +652,9 @@ class LiftoffCompiler {
++param_idx) {
ValueType type = decoder->local_type(param_idx);
if (type.is_reference_type()) {
Register isolate_root = __ GetUnusedRegister(kGpReg, {}).gp();
// We can re-use the isolate_root register as result register.
Register result = isolate_root;
LOAD_INSTANCE_FIELD(isolate_root, IsolateRoot, kSystemPointerSize);
__ LoadTaggedPointer(
result, isolate_root, no_reg,
IsolateData::root_slot_offset(RootIndex::kNullValue), {});
__ Spill(__ cache_state()->stack_state.back().offset(),
LiftoffRegister(result), type);
LiftoffRegister result = __ GetUnusedRegister(kGpReg, {});
LoadNullValue(result.gp(), {});
__ Spill(__ cache_state()->stack_state.back().offset(), result, type);
}
}
}
......@@ -1282,9 +1275,22 @@ class LiftoffCompiler {
__ emit_type_conversion(kExprI64UConvertI32, dst, c_call_dst,
nullptr);
});
case kExprRefIsNull:
unsupported(decoder, kRefTypes, "ref_is_null");
case kExprRefIsNull: {
if (!FLAG_experimental_liftoff_extern_ref) {
unsupported(decoder, kRefTypes, "ref_is_null");
return;
}
LiftoffRegList pinned;
LiftoffRegister ref = pinned.set(__ PopToRegister());
LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
LoadNullValue(null.gp(), pinned);
// Prefer to overwrite one of the input registers with the result
// of the comparison.
LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {ref, null}, {});
__ emit_ptrsize_set_cond(kEqual, dst.gp(), ref, null);
__ PushRegister(kWasmI32, dst);
return;
}
default:
UNREACHABLE();
}
......@@ -1696,15 +1702,9 @@ class LiftoffCompiler {
unsupported(decoder, kRefTypes, "ref_null");
return;
}
Register isolate_root = __ GetUnusedRegister(kGpReg, {}).gp();
// We can re-use the isolate_root register as result register.
Register result = isolate_root;
LOAD_INSTANCE_FIELD(isolate_root, IsolateRoot, kSystemPointerSize);
__ LoadTaggedPointer(result, isolate_root, no_reg,
IsolateData::root_slot_offset(RootIndex::kNullValue),
{});
__ PushRegister(type, LiftoffRegister(result));
LiftoffRegister null = __ GetUnusedRegister(kGpReg, {});
LoadNullValue(null.gp(), {});
__ PushRegister(type, null);
}
void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
......@@ -3837,26 +3837,67 @@ class LiftoffCompiler {
void StructNewWithRtt(FullDecoder* decoder,
const StructIndexImmediate<validate>& imm,
const Value& rtt, const Value args[], Value* result) {
// TODO(7748): Implement.
unsupported(decoder, kGC, "struct.new_with_rtt");
ValueType struct_value_type = ValueType::Ref(imm.index, kNonNullable);
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateStructWithRtt;
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmAllocateStructWithRttDescriptor>(
compilation_zone_);
ValueType sig_reps[] = {struct_value_type, rtt.type};
FunctionSig sig(1, 1, sig_reps);
LiftoffAssembler::VarState rtt_value =
__ cache_state()->stack_state.end()[-1];
__ PrepareBuiltinCall(&sig, call_descriptor, {rtt_value});
__ CallRuntimeStub(target);
DefineSafepoint();
// Drop the RTT.
__ cache_state()->stack_state.pop_back(1);
LiftoffRegister obj(kReturnRegister0);
LiftoffRegList pinned = LiftoffRegList::ForRegs(obj);
for (uint32_t i = imm.struct_type->field_count(); i > 0;) {
i--;
int offset = StructFieldOffset(imm.struct_type, i);
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
ValueType field_type = imm.struct_type->field(i);
StoreObjectField(obj.gp(), offset, value, pinned, field_type);
pinned.clear(value);
}
__ PushRegister(struct_value_type, obj);
}
void StructNewDefault(FullDecoder* decoder,
const StructIndexImmediate<validate>& imm,
const Value& rtt, Value* result) {
// TODO(7748): Implement.
unsupported(decoder, kGC, "struct.new_default_with_rtt");
}
void StructGet(FullDecoder* decoder, const Value& struct_obj,
const FieldIndexImmediate<validate>& field, bool is_signed,
Value* result) {
// TODO(7748): Implement.
unsupported(decoder, kGC, "struct.get");
const StructType* struct_type = field.struct_index.struct_type;
ValueType field_type = struct_type->field(field.index);
int offset = StructFieldOffset(struct_type, field.index);
LiftoffRegList pinned;
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type);
LiftoffRegister value =
pinned.set(__ GetUnusedRegister(reg_class_for(field_type), pinned));
LoadObjectField(value, obj.gp(), offset, field_type, is_signed, pinned);
__ PushRegister(field_type, value);
}
void StructSet(FullDecoder* decoder, const Value& struct_obj,
const FieldIndexImmediate<validate>& field,
const Value& field_value) {
// TODO(7748): Implement.
unsupported(decoder, kGC, "struct.set");
const StructType* struct_type = field.struct_index.struct_type;
ValueType field_type = struct_type->field(field.index);
int offset = StructFieldOffset(struct_type, field.index);
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type);
StoreObjectField(obj.gp(), offset, value, pinned, field_type);
}
void ArrayNewWithRtt(FullDecoder* decoder,
......@@ -3904,8 +3945,41 @@ class LiftoffCompiler {
void RttCanon(FullDecoder* decoder, const HeapTypeImmediate<validate>& imm,
Value* result) {
// TODO(7748): Implement.
unsupported(decoder, kGC, "rtt.canon");
LiftoffRegister rtt = __ GetUnusedRegister(kGpReg, {});
RootIndex index;
switch (imm.type.representation()) {
case wasm::HeapType::kEq:
index = RootIndex::kWasmRttEqrefMap;
break;
case wasm::HeapType::kExtern:
index = RootIndex::kWasmRttExternrefMap;
break;
case wasm::HeapType::kFunc:
index = RootIndex::kWasmRttFuncrefMap;
break;
case wasm::HeapType::kI31:
index = RootIndex::kWasmRttI31refMap;
break;
case wasm::HeapType::kAny:
index = RootIndex::kWasmRttAnyrefMap;
break;
case wasm::HeapType::kBottom:
UNREACHABLE();
default:
// User-defined type.
LOAD_TAGGED_PTR_INSTANCE_FIELD(rtt.gp(), ManagedObjectMaps);
__ LoadTaggedPointer(
rtt.gp(), rtt.gp(), no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
imm.type.ref_index()),
{});
__ PushRegister(ValueType::Rtt(imm.type, 1), rtt);
return;
}
LOAD_INSTANCE_FIELD(rtt.gp(), IsolateRoot, kSystemPointerSize);
__ LoadTaggedPointer(rtt.gp(), rtt.gp(), no_reg,
IsolateData::root_slot_offset(index), {});
__ PushRegister(ValueType::Rtt(imm.type, 1), rtt);
}
void RttSub(FullDecoder* decoder, const HeapTypeImmediate<validate>& imm,
const Value& parent, Value* result) {
......@@ -4141,6 +4215,51 @@ class LiftoffCompiler {
__ FinishCall(imm.sig, call_descriptor);
}
void LoadNullValue(Register null, LiftoffRegList pinned) {
LOAD_INSTANCE_FIELD(null, IsolateRoot, kSystemPointerSize);
__ LoadTaggedPointer(null, null, no_reg,
IsolateData::root_slot_offset(RootIndex::kNullValue),
pinned);
}
void MaybeEmitNullCheck(FullDecoder* decoder, Register object,
LiftoffRegList pinned, ValueType type) {
if (!type.is_nullable()) return;
Label* trap_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapNullDereference);
LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
LoadNullValue(null.gp(), pinned);
__ emit_cond_jump(LiftoffCondition::kEqual, trap_label, type, object,
null.gp());
}
int StructFieldOffset(const StructType* struct_type, int field_index) {
return wasm::ObjectAccess::ToTagged(WasmStruct::kHeaderSize +
struct_type->field_offset(field_index));
}
void LoadObjectField(LiftoffRegister dst, Register src, int offset,
ValueType type, bool is_signed, LiftoffRegList pinned) {
if (type.is_reference_type()) {
__ LoadTaggedPointer(dst.gp(), src, no_reg, offset, pinned);
} else {
// Primitive type.
LoadType load_type = LoadType::ForValueType(type, is_signed);
__ Load(dst, src, no_reg, offset, load_type, pinned);
}
}
void StoreObjectField(Register obj, int offset, LiftoffRegister value,
LiftoffRegList pinned, ValueType type) {
if (type.is_reference_type()) {
__ StoreTaggedPointer(obj, offset, value, pinned);
} else {
// Primitive type.
StoreType store_type = StoreType::ForValueType(type);
__ Store(obj, no_reg, offset, value, store_type, pinned);
}
}
static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
LiftoffAssembler asm_;
......
......@@ -66,6 +66,7 @@ static inline constexpr RegClass reg_class_for(ValueType::Kind kind) {
return kNeedS128RegPair ? kFpRegPair : kFpReg;
case ValueType::kRef:
case ValueType::kOptRef:
case ValueType::kRtt:
return kGpReg;
default:
return kNoReg; // unsupported type
......
......@@ -841,6 +841,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
case ValueType::kI64:
case ValueType::kOptRef:
case ValueType::kRef:
case ValueType::kRtt:
movq(dst, reg.gp());
break;
case ValueType::kF32:
......@@ -2043,6 +2044,11 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
case ValueType::kI32:
cmpl(lhs, rhs);
break;
case ValueType::kRef:
case ValueType::kOptRef:
case ValueType::kRtt:
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
V8_FALLTHROUGH;
case ValueType::kI64:
cmpq(lhs, rhs);
break;
......
......@@ -558,7 +558,7 @@ class LoadType {
constexpr ValueType value_type() const { return kValueType[val_]; }
constexpr MachineType mem_type() const { return kMemType[val_]; }
static LoadType ForValueType(ValueType type) {
static LoadType ForValueType(ValueType type, bool is_signed = false) {
switch (type.kind()) {
case ValueType::kI32:
return kI32Load;
......@@ -570,6 +570,10 @@ class LoadType {
return kF64Load;
case ValueType::kS128:
return kS128Load;
case ValueType::kI8:
return is_signed ? kI32Load8S : kI32Load8U;
case ValueType::kI16:
return is_signed ? kI32Load16S : kI32Load16U;
default:
UNREACHABLE();
}
......@@ -642,6 +646,10 @@ class StoreType {
return kF64Store;
case ValueType::kS128:
return kS128Store;
case ValueType::kI8:
return kI32Store8;
case ValueType::kI16:
return kI32Store16;
default:
UNREACHABLE();
}
......
......@@ -84,7 +84,8 @@ struct WasmModule;
V(I32PairToBigInt) \
V(I64ToBigInt) \
V(RecordWrite) \
V(ToNumber)
V(ToNumber) \
V(WasmAllocateStructWithRtt)
// Sorted, disjoint and non-overlapping memory regions. A region is of the
// form [start, end). So there's no [start, end), [end, other_end),
......
......@@ -30,11 +30,18 @@ using F = std::pair<ValueType, bool>;
class WasmGCTester {
public:
WasmGCTester()
explicit WasmGCTester(
TestExecutionTier execution_tier = TestExecutionTier::kTurbofan)
: flag_gc(&v8::internal::FLAG_experimental_wasm_gc, true),
flag_reftypes(&v8::internal::FLAG_experimental_wasm_reftypes, true),
flag_typedfuns(&v8::internal::FLAG_experimental_wasm_typed_funcref,
true),
flag_liftoff(
&v8::internal::FLAG_liftoff,
execution_tier == TestExecutionTier::kTurbofan ? false : true),
flag_liftoff_only(
&v8::internal::FLAG_liftoff_only,
execution_tier == TestExecutionTier::kLiftoff ? true : false),
zone(&allocator, ZONE_NAME),
builder_(&zone),
isolate_(CcTest::InitIsolateOnce()),
......@@ -173,6 +180,8 @@ class WasmGCTester {
const FlagScope<bool> flag_gc;
const FlagScope<bool> flag_reftypes;
const FlagScope<bool> flag_typedfuns;
const FlagScope<bool> flag_liftoff;
const FlagScope<bool> flag_liftoff_only;
v8::internal::AccountingAllocator allocator;
Zone zone;
......@@ -191,9 +200,10 @@ ValueType optref(uint32_t type_index) {
return ValueType::Ref(type_index, kNullable);
}
// TODO(7748): Use WASM_EXEC_TEST once interpreter and liftoff are supported.
TEST(WasmBasicStruct) {
WasmGCTester tester;
WASM_COMPILED_EXEC_TEST(WasmBasicStruct) {
WasmGCTester tester(execution_tier);
FlagScope<bool> flag_liftoff_reftypes(
&v8::internal::FLAG_experimental_liftoff_extern_ref, true);
const byte type_index =
tester.DefineStruct({F(kWasmI32, true), F(kWasmI32, true)});
const byte empty_struct_index = tester.DefineStruct({});
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment