Commit dc369749 authored by Jakob Kummerow's avatar Jakob Kummerow Committed by Commit Bot

[wasm-gc] Liftoff support part 4: subtyping

This adds support for the following instructions:
struct.new_default, rtt.sub, ref.test, ref.cast

Bug: v8:7748
Change-Id: I7423ddd7a83c80cb1e82c620780c27bec59ec762
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2593341
Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71805}
parent e63347c5
...@@ -231,9 +231,9 @@ builtin WasmAllocateJSArray(implicit context: Context)(size: Smi): JSArray { ...@@ -231,9 +231,9 @@ builtin WasmAllocateJSArray(implicit context: Context)(size: Smi): JSArray {
return AllocateJSArray(ElementsKind::PACKED_ELEMENTS, map, size, size); return AllocateJSArray(ElementsKind::PACKED_ELEMENTS, map, size, size);
} }
builtin WasmAllocateRtt(implicit context: Context)( builtin WasmAllocateRtt(typeIndex: intptr, parent: Map): Map {
typeIndex: Smi, parent: Map): Map { tail runtime::WasmAllocateRtt(
tail runtime::WasmAllocateRtt(context, typeIndex, parent); LoadContextFromFrame(), SmiTag(typeIndex), parent);
} }
builtin WasmAllocateStructWithRtt(rtt: Map): HeapObject { builtin WasmAllocateStructWithRtt(rtt: Map): HeapObject {
......
...@@ -5701,12 +5701,10 @@ Node* WasmGraphBuilder::RttCanon(wasm::HeapType type) { ...@@ -5701,12 +5701,10 @@ Node* WasmGraphBuilder::RttCanon(wasm::HeapType type) {
} }
Node* WasmGraphBuilder::RttSub(wasm::HeapType type, Node* parent_rtt) { Node* WasmGraphBuilder::RttSub(wasm::HeapType type, Node* parent_rtt) {
return CALL_BUILTIN( return CALL_BUILTIN(WasmAllocateRtt,
WasmAllocateRtt, graph()->NewNode(mcgraph()->common()->Int32Constant(
graph()->NewNode( type.representation())),
mcgraph()->common()->NumberConstant(type.representation())), parent_rtt);
parent_rtt,
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
} }
void AssertFalse(MachineGraph* mcgraph, GraphAssembler* gasm, Node* condition) { void AssertFalse(MachineGraph* mcgraph, GraphAssembler* gasm, Node* condition) {
......
...@@ -3859,9 +3859,9 @@ class LiftoffCompiler { ...@@ -3859,9 +3859,9 @@ class LiftoffCompiler {
unsupported(decoder, kRefTypes, "table.fill"); unsupported(decoder, kRefTypes, "table.fill");
} }
void StructNewWithRtt(FullDecoder* decoder, void StructNew(FullDecoder* decoder,
const StructIndexImmediate<validate>& imm, const StructIndexImmediate<validate>& imm, const Value& rtt,
const Value& rtt, const Value args[], Value* result) { bool initial_values_on_stack) {
ValueType struct_value_type = ValueType::Ref(imm.index, kNonNullable); ValueType struct_value_type = ValueType::Ref(imm.index, kNonNullable);
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateStructWithRtt; WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateStructWithRtt;
compiler::CallDescriptor* call_descriptor = compiler::CallDescriptor* call_descriptor =
...@@ -3882,19 +3882,31 @@ class LiftoffCompiler { ...@@ -3882,19 +3882,31 @@ class LiftoffCompiler {
for (uint32_t i = imm.struct_type->field_count(); i > 0;) { for (uint32_t i = imm.struct_type->field_count(); i > 0;) {
i--; i--;
int offset = StructFieldOffset(imm.struct_type, i); int offset = StructFieldOffset(imm.struct_type, i);
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
ValueType field_type = imm.struct_type->field(i); ValueType field_type = imm.struct_type->field(i);
LiftoffRegister value = initial_values_on_stack
? pinned.set(__ PopToRegister(pinned))
: pinned.set(__ GetUnusedRegister(
reg_class_for(field_type), pinned));
if (!initial_values_on_stack) {
if (!CheckSupportedType(decoder, field_type, "default value")) return;
SetDefaultValue(value, field_type, pinned);
}
StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_type); StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_type);
pinned.clear(value); pinned.clear(value);
} }
__ PushRegister(struct_value_type, obj); __ PushRegister(struct_value_type, obj);
} }
void StructNewWithRtt(FullDecoder* decoder,
const StructIndexImmediate<validate>& imm,
const Value& rtt, const Value args[], Value* result) {
StructNew(decoder, imm, rtt, true);
}
void StructNewDefault(FullDecoder* decoder, void StructNewDefault(FullDecoder* decoder,
const StructIndexImmediate<validate>& imm, const StructIndexImmediate<validate>& imm,
const Value& rtt, Value* result) { const Value& rtt, Value* result) {
// TODO(7748): Implement. StructNew(decoder, imm, rtt, false);
unsupported(decoder, kGC, "struct.new_default_with_rtt");
} }
void StructGet(FullDecoder* decoder, const Value& struct_obj, void StructGet(FullDecoder* decoder, const Value& struct_obj,
...@@ -4125,33 +4137,37 @@ class LiftoffCompiler { ...@@ -4125,33 +4137,37 @@ class LiftoffCompiler {
IsolateData::root_slot_offset(index), {}); IsolateData::root_slot_offset(index), {});
__ PushRegister(ValueType::Rtt(imm.type, 1), rtt); __ PushRegister(ValueType::Rtt(imm.type, 1), rtt);
} }
void RttSub(FullDecoder* decoder, const HeapTypeImmediate<validate>& imm, void RttSub(FullDecoder* decoder, const HeapTypeImmediate<validate>& imm,
const Value& parent, Value* result) { const Value& parent, Value* result) {
// TODO(7748): Implement. ValueType parent_value_type = parent.type;
unsupported(decoder, kGC, "rtt.sub"); ValueType rtt_value_type =
} ValueType::Rtt(imm.type, parent_value_type.depth() + 1);
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateRtt;
void RefTest(FullDecoder* decoder, const Value& obj, const Value& rtt, compiler::CallDescriptor* call_descriptor =
Value* result) { GetBuiltinCallDescriptor<WasmAllocateRttDescriptor>(compilation_zone_);
// TODO(7748): Implement. ValueType sig_reps[] = {rtt_value_type, kWasmI32, parent_value_type};
unsupported(decoder, kGC, "ref.test"); FunctionSig sig(1, 2, sig_reps);
} LiftoffAssembler::VarState parent_var =
void RefCast(FullDecoder* decoder, const Value& obj, const Value& rtt, __ cache_state()->stack_state.end()[-1];
Value* result) { LiftoffRegister type_reg = __ GetUnusedRegister(kGpReg, {});
// TODO(7748): Implement. __ LoadConstant(type_reg, WasmValue(imm.type.representation()));
unsupported(decoder, kGC, "ref.cast"); LiftoffAssembler::VarState type_var(kWasmI32, type_reg, 0);
} __ PrepareBuiltinCall(&sig, call_descriptor, {type_var, parent_var});
void BrOnCast(FullDecoder* decoder, const Value& obj, const Value& rtt, __ CallRuntimeStub(target);
Value* result_on_branch, uint32_t depth) { DefineSafepoint();
// Before branching, materialize all constants. This avoids repeatedly // Drop the parent RTT.
// materializing them for each conditional branch. __ cache_state()->stack_state.pop_back(1);
if (depth != decoder->control_depth() - 1) { __ PushRegister(rtt_value_type, LiftoffRegister(kReturnRegister0));
__ MaterializeMergedConstants(
decoder->control_at(depth)->br_merge()->arity);
} }
Label branch, cont_false; // Falls through on match (=successful type check).
LiftoffRegList pinned; // Returns the register containing the object.
LiftoffRegister SubtypeCheck(FullDecoder* decoder, const Value& obj,
const Value& rtt, Label* no_match,
LiftoffRegList pinned = {},
Register opt_scratch = no_reg) {
Label match;
LiftoffRegister rtt_reg = pinned.set(__ PopToRegister(pinned)); LiftoffRegister rtt_reg = pinned.set(__ PopToRegister(pinned));
LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned)); LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
...@@ -4159,35 +4175,36 @@ class LiftoffCompiler { ...@@ -4159,35 +4175,36 @@ class LiftoffCompiler {
bool rtt_is_i31 = rtt.type.heap_representation() == HeapType::kI31; bool rtt_is_i31 = rtt.type.heap_representation() == HeapType::kI31;
bool i31_check_only = obj_can_be_i31 && rtt_is_i31; bool i31_check_only = obj_can_be_i31 && rtt_is_i31;
if (i31_check_only) { if (i31_check_only) {
__ emit_smi_check(obj_reg.gp(), &cont_false, __ emit_smi_check(obj_reg.gp(), no_match,
LiftoffAssembler::kJumpOnNotSmi); LiftoffAssembler::kJumpOnNotSmi);
// Emit no further code, just fall through to taking the branch. // Emit no further code, just fall through to {match}.
} else { } else {
// Reserve all temporary registers up front, so that the cache state // Reserve all temporary registers up front, so that the cache state
// tracking doesn't get confused by the following conditional jumps. // tracking doesn't get confused by the following conditional jumps.
LiftoffRegister tmp1 = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); LiftoffRegister tmp1 =
opt_scratch != no_reg
? LiftoffRegister(opt_scratch)
: pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); LiftoffRegister tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
if (obj_can_be_i31) { if (obj_can_be_i31) {
DCHECK(!rtt_is_i31); DCHECK(!rtt_is_i31);
__ emit_smi_check(obj_reg.gp(), &cont_false, __ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi);
LiftoffAssembler::kJumpOnSmi);
} }
if (obj.type.is_nullable()) { if (obj.type.is_nullable()) {
LoadNullValue(tmp1.gp(), pinned); LoadNullValue(tmp1.gp(), pinned);
__ emit_cond_jump(kEqual, &cont_false, obj.type, obj_reg.gp(), __ emit_cond_jump(kEqual, no_match, obj.type, obj_reg.gp(), tmp1.gp());
tmp1.gp());
} }
// At this point, the object is neither null nor an i31ref. Perform // At this point, the object is neither null nor an i31ref. Perform
// a regular type check. Check for exact match first. // a regular type check. Check for exact match first.
__ LoadMap(tmp1.gp(), obj_reg.gp()); __ LoadMap(tmp1.gp(), obj_reg.gp());
// {tmp1} now holds the object's map. // {tmp1} now holds the object's map.
__ emit_cond_jump(kEqual, &branch, rtt.type, tmp1.gp(), rtt_reg.gp()); __ emit_cond_jump(kEqual, &match, rtt.type, tmp1.gp(), rtt_reg.gp());
// If the object isn't guaranteed to be an array or struct, check that. // If the object isn't guaranteed to be an array or struct, check that.
// Subsequent code wouldn't handle e.g. funcrefs. // Subsequent code wouldn't handle e.g. funcrefs.
if (!is_data_ref_type(obj.type, decoder->module_)) { if (!is_data_ref_type(obj.type, decoder->module_)) {
EmitDataRefCheck(tmp1.gp(), &cont_false, tmp2, pinned); EmitDataRefCheck(tmp1.gp(), no_match, tmp2, pinned);
} }
// Constant-time subtyping check: load exactly one candidate RTT from the // Constant-time subtyping check: load exactly one candidate RTT from the
...@@ -4205,19 +4222,59 @@ class LiftoffCompiler { ...@@ -4205,19 +4222,59 @@ class LiftoffCompiler {
// Step 3: check the list's length. // Step 3: check the list's length.
LiftoffRegister list_length = tmp2; LiftoffRegister list_length = tmp2;
__ LoadFixedArrayLengthAsInt32(list_length, tmp1.gp(), pinned); __ LoadFixedArrayLengthAsInt32(list_length, tmp1.gp(), pinned);
__ emit_i32_cond_jumpi(kUnsignedLessEqual, &cont_false, list_length.gp(), __ emit_i32_cond_jumpi(kUnsignedLessEqual, no_match, list_length.gp(),
rtt.type.depth()); rtt.type.depth());
// Step 4: load the candidate list slot into {tmp1}, and compare it. // Step 4: load the candidate list slot into {tmp1}, and compare it.
__ LoadTaggedPointer( __ LoadTaggedPointer(
tmp1.gp(), tmp1.gp(), no_reg, tmp1.gp(), tmp1.gp(), no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(rtt.type.depth()), wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(rtt.type.depth()),
pinned); pinned);
__ emit_cond_jump(kUnequal, &cont_false, rtt.type, tmp1.gp(), __ emit_cond_jump(kUnequal, no_match, rtt.type, tmp1.gp(), rtt_reg.gp());
rtt_reg.gp()); // Fall through to {match}.
// Fall through to taking the branch. }
__ bind(&match);
return obj_reg;
}
void RefTest(FullDecoder* decoder, const Value& obj, const Value& rtt,
Value* result_val) {
Label return_false, done;
LiftoffRegList pinned;
LiftoffRegister result = pinned.set(__ GetUnusedRegister(kGpReg, {}));
SubtypeCheck(decoder, obj, rtt, &return_false, pinned, result.gp());
__ LoadConstant(result, WasmValue(1));
// TODO(jkummerow): Emit near jumps on platforms where it's more efficient.
__ emit_jump(&done);
__ bind(&return_false);
__ LoadConstant(result, WasmValue(0));
__ bind(&done);
__ PushRegister(kWasmI32, result);
} }
__ bind(&branch); void RefCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
Value* result) {
Label* trap_label = AddOutOfLineTrap(decoder->position(),
WasmCode::kThrowWasmTrapIllegalCast);
LiftoffRegister obj_reg = SubtypeCheck(decoder, obj, rtt, trap_label);
__ PushRegister(ValueType::Ref(rtt.type.heap_type(), kNonNullable),
obj_reg);
}
void BrOnCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
Value* result_on_branch, uint32_t depth) {
// Before branching, materialize all constants. This avoids repeatedly
// materializing them for each conditional branch.
if (depth != decoder->control_depth() - 1) {
__ MaterializeMergedConstants(
decoder->control_at(depth)->br_merge()->arity);
}
Label cont_false;
LiftoffRegister obj_reg = SubtypeCheck(decoder, obj, rtt, &cont_false);
__ PushRegister(rtt.type.is_bottom() __ PushRegister(rtt.type.is_bottom()
? kWasmBottom ? kWasmBottom
: ValueType::Ref(rtt.type.heap_type(), kNonNullable), : ValueType::Ref(rtt.type.heap_type(), kNonNullable),
......
...@@ -86,6 +86,7 @@ struct WasmModule; ...@@ -86,6 +86,7 @@ struct WasmModule;
V(RecordWrite) \ V(RecordWrite) \
V(ToNumber) \ V(ToNumber) \
V(WasmAllocateArrayWithRtt) \ V(WasmAllocateArrayWithRtt) \
V(WasmAllocateRtt) \
V(WasmAllocateStructWithRtt) V(WasmAllocateStructWithRtt)
// Sorted, disjoint and non-overlapping memory regions. A region is of the // Sorted, disjoint and non-overlapping memory regions. A region is of the
......
...@@ -745,8 +745,9 @@ WASM_COMPILED_EXEC_TEST(WasmPackedArrayS) { ...@@ -745,8 +745,9 @@ WASM_COMPILED_EXEC_TEST(WasmPackedArrayS) {
tester.CheckResult(kF, static_cast<int16_t>(expected_outputs[3]), 3); tester.CheckResult(kF, static_cast<int16_t>(expected_outputs[3]), 3);
} }
TEST(NewDefault) { WASM_COMPILED_EXEC_TEST(NewDefault) {
WasmGCTester tester; WasmGCTester tester(execution_tier);
FLAG_experimental_liftoff_extern_ref = true;
const byte struct_type = tester.DefineStruct( const byte struct_type = tester.DefineStruct(
{F(wasm::kWasmI32, true), F(wasm::kWasmF64, true), F(optref(0), true)}); {F(wasm::kWasmI32, true), F(wasm::kWasmF64, true), F(optref(0), true)});
const byte array_type = tester.DefineArray(wasm::kWasmI32, true); const byte array_type = tester.DefineArray(wasm::kWasmI32, true);
...@@ -873,8 +874,9 @@ TEST(BasicRTT) { ...@@ -873,8 +874,9 @@ TEST(BasicRTT) {
tester.CheckResult(kRefCast, 43); tester.CheckResult(kRefCast, 43);
} }
TEST(AnyRefRtt) { WASM_COMPILED_EXEC_TEST(AnyRefRtt) {
WasmGCTester tester; WasmGCTester tester(execution_tier);
FLAG_experimental_liftoff_extern_ref = true;
ValueType any_rtt_0_type = ValueType::Rtt(HeapType::kAny, 0); ValueType any_rtt_0_type = ValueType::Rtt(HeapType::kAny, 0);
FunctionSig sig_any_canon(1, 0, &any_rtt_0_type); FunctionSig sig_any_canon(1, 0, &any_rtt_0_type);
...@@ -947,8 +949,10 @@ TEST(AnyRefRtt) { ...@@ -947,8 +949,10 @@ TEST(AnyRefRtt) {
tester.CheckResult(kCheckAnyAgainstAny, 1); tester.CheckResult(kCheckAnyAgainstAny, 1);
} }
TEST(ArrayNewMap) { WASM_COMPILED_EXEC_TEST(ArrayNewMap) {
WasmGCTester tester; WasmGCTester tester(execution_tier);
FLAG_experimental_liftoff_extern_ref = true;
const byte type_index = tester.DefineArray(kWasmI32, true); const byte type_index = tester.DefineArray(kWasmI32, true);
ValueType array_type = ValueType::Ref(type_index, kNonNullable); ValueType array_type = ValueType::Ref(type_index, kNonNullable);
...@@ -1065,8 +1069,9 @@ TEST(CallRef) { ...@@ -1065,8 +1069,9 @@ TEST(CallRef) {
tester.CheckResult(caller, 47, 5); tester.CheckResult(caller, 47, 5);
} }
TEST(RefTestCastNull) { WASM_COMPILED_EXEC_TEST(RefTestCastNull) {
WasmGCTester tester; WasmGCTester tester(execution_tier);
FLAG_experimental_liftoff_extern_ref = true;
byte type_index = tester.DefineStruct({F(wasm::kWasmI32, true)}); byte type_index = tester.DefineStruct({F(wasm::kWasmI32, true)});
const byte kRefTestNull = tester.DefineFunction( const byte kRefTestNull = tester.DefineFunction(
...@@ -1266,8 +1271,9 @@ TEST(CastsBenchmark) { ...@@ -1266,8 +1271,9 @@ TEST(CastsBenchmark) {
tester.CheckResult(Main, (kListLength * (kListLength - 1) / 2) * kLoops); tester.CheckResult(Main, (kListLength * (kListLength - 1) / 2) * kLoops);
} }
TEST(GlobalInitReferencingGlobal) { WASM_COMPILED_EXEC_TEST(GlobalInitReferencingGlobal) {
WasmGCTester tester; WasmGCTester tester(execution_tier);
FLAG_experimental_liftoff_extern_ref = true;
const byte from = tester.AddGlobal(kWasmI32, false, WasmInitExpr(42)); const byte from = tester.AddGlobal(kWasmI32, false, WasmInitExpr(42));
const byte to = const byte to =
tester.AddGlobal(kWasmI32, false, WasmInitExpr::GlobalGet(from)); tester.AddGlobal(kWasmI32, false, WasmInitExpr::GlobalGet(from));
...@@ -1280,8 +1286,9 @@ TEST(GlobalInitReferencingGlobal) { ...@@ -1280,8 +1286,9 @@ TEST(GlobalInitReferencingGlobal) {
tester.CheckResult(func, 42); tester.CheckResult(func, 42);
} }
TEST(IndirectNullSetManually) { WASM_COMPILED_EXEC_TEST(IndirectNullSetManually) {
WasmGCTester tester; WasmGCTester tester(execution_tier);
FLAG_experimental_liftoff_extern_ref = true;
byte sig_index = tester.DefineSignature(tester.sigs.i_i()); byte sig_index = tester.DefineSignature(tester.sigs.i_i());
tester.DefineTable(ValueType::Ref(sig_index, kNullable), 1, 1); tester.DefineTable(ValueType::Ref(sig_index, kNullable), 1, 1);
byte func_index = tester.DefineFunction( byte func_index = tester.DefineFunction(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment