Commit c2e9357c authored by Jakob Kummerow's avatar Jakob Kummerow Committed by Commit Bot

Reland "[wasm-gc] Initial Liftoff support"

This is a reland of bee5992a.
Fixes a TSan race report by replacing a FlagScope in tests with
direct assignment to the flag in question.

Original change's description:
> [wasm-gc] Initial Liftoff support
>
> This CL implements Liftoff support for struct.get/set,
> struct.new_with_rtt, rtt.canon, and ref.is_null, which
> is enough to make the first testcase pass.
>
> Bug: v8:7748
> Change-Id: Id09e9872d2126127192c852b3cb6d57ff9417582
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2584951
> Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
> Reviewed-by: Clemens Backes <clemensb@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#71744}

Bug: v8:7748
Change-Id: I17de6803c23a88209102385010dfdf9b88e25ace
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2593254
Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71762}
parent 4df69aca
...@@ -236,8 +236,7 @@ builtin WasmAllocateRtt(implicit context: Context)( ...@@ -236,8 +236,7 @@ builtin WasmAllocateRtt(implicit context: Context)(
tail runtime::WasmAllocateRtt(context, typeIndex, parent); tail runtime::WasmAllocateRtt(context, typeIndex, parent);
} }
builtin WasmAllocateStructWithRtt(implicit context: Context)(rtt: Map): builtin WasmAllocateStructWithRtt(rtt: Map): HeapObject {
HeapObject {
const instanceSize: intptr = const instanceSize: intptr =
unsafe::TimesTaggedSize(Convert<intptr>(rtt.instance_size_in_words)); unsafe::TimesTaggedSize(Convert<intptr>(rtt.instance_size_in_words));
const result: HeapObject = unsafe::Allocate(instanceSize); const result: HeapObject = unsafe::Allocate(instanceSize);
......
...@@ -5625,9 +5625,7 @@ Node* WasmGraphBuilder::TableFill(uint32_t table_index, Node* start, ...@@ -5625,9 +5625,7 @@ Node* WasmGraphBuilder::TableFill(uint32_t table_index, Node* start,
Node* WasmGraphBuilder::StructNewWithRtt(uint32_t struct_index, Node* WasmGraphBuilder::StructNewWithRtt(uint32_t struct_index,
const wasm::StructType* type, const wasm::StructType* type,
Node* rtt, Vector<Node*> fields) { Node* rtt, Vector<Node*> fields) {
Node* s = CALL_BUILTIN( Node* s = CALL_BUILTIN(WasmAllocateStructWithRtt, rtt);
WasmAllocateStructWithRtt, rtt,
LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer()));
for (uint32_t i = 0; i < type->field_count(); i++) { for (uint32_t i = 0; i < type->field_count(); i++) {
gasm_->StoreStructField(s, type, i, fields[i]); gasm_->StoreStructField(s, type, i, fields[i]);
} }
......
...@@ -303,6 +303,7 @@ inline void Store(LiftoffAssembler* assm, LiftoffRegister src, MemOperand dst, ...@@ -303,6 +303,7 @@ inline void Store(LiftoffAssembler* assm, LiftoffRegister src, MemOperand dst,
case ValueType::kI32: case ValueType::kI32:
case ValueType::kOptRef: case ValueType::kOptRef:
case ValueType::kRef: case ValueType::kRef:
case ValueType::kRtt:
assm->str(src.gp(), dst); assm->str(src.gp(), dst);
break; break;
case ValueType::kI64: case ValueType::kI64:
...@@ -336,6 +337,7 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src, ...@@ -336,6 +337,7 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
case ValueType::kI32: case ValueType::kI32:
case ValueType::kOptRef: case ValueType::kOptRef:
case ValueType::kRef: case ValueType::kRef:
case ValueType::kRtt:
assm->ldr(dst.gp(), src); assm->ldr(dst.gp(), src);
break; break;
case ValueType::kI64: case ValueType::kI64:
...@@ -2155,10 +2157,14 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond, ...@@ -2155,10 +2157,14 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
Label* label, ValueType type, Label* label, ValueType type,
Register lhs, Register rhs) { Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond); Condition cond = liftoff::ToCondition(liftoff_cond);
DCHECK_EQ(type, kWasmI32);
if (rhs == no_reg) { if (rhs == no_reg) {
DCHECK_EQ(type, kWasmI32);
cmp(lhs, Operand(0)); cmp(lhs, Operand(0));
} else { } else {
DCHECK(type == kWasmI32 ||
(type.is_reference_type() &&
(liftoff_cond == kEqual || liftoff_cond == kUnequal)));
cmp(lhs, rhs); cmp(lhs, rhs);
} }
b(label, cond); b(label, cond);
......
...@@ -79,6 +79,7 @@ inline CPURegister GetRegFromType(const LiftoffRegister& reg, ValueType type) { ...@@ -79,6 +79,7 @@ inline CPURegister GetRegFromType(const LiftoffRegister& reg, ValueType type) {
case ValueType::kI64: case ValueType::kI64:
case ValueType::kRef: case ValueType::kRef:
case ValueType::kOptRef: case ValueType::kOptRef:
case ValueType::kRtt:
return reg.gp().X(); return reg.gp().X();
case ValueType::kF32: case ValueType::kF32:
return reg.fp().S(); return reg.fp().S();
...@@ -1452,6 +1453,12 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond, ...@@ -1452,6 +1453,12 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
Cmp(lhs.W(), wzr); Cmp(lhs.W(), wzr);
} }
break; break;
case ValueType::kRef:
case ValueType::kOptRef:
case ValueType::kRtt:
DCHECK(rhs.is_valid());
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
V8_FALLTHROUGH;
case ValueType::kI64: case ValueType::kI64:
if (rhs.is_valid()) { if (rhs.is_valid()) {
Cmp(lhs.X(), rhs.X()); Cmp(lhs.X(), rhs.X());
......
...@@ -1121,6 +1121,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) { ...@@ -1121,6 +1121,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
case ValueType::kI32: case ValueType::kI32:
case ValueType::kOptRef: case ValueType::kOptRef:
case ValueType::kRef: case ValueType::kRef:
case ValueType::kRtt:
mov(dst, reg.gp()); mov(dst, reg.gp());
break; break;
case ValueType::kI64: case ValueType::kI64:
...@@ -2372,6 +2373,11 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond, ...@@ -2372,6 +2373,11 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
Condition cond = liftoff::ToCondition(liftoff_cond); Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs != no_reg) { if (rhs != no_reg) {
switch (type.kind()) { switch (type.kind()) {
case ValueType::kRef:
case ValueType::kOptRef:
case ValueType::kRtt:
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
V8_FALLTHROUGH;
case ValueType::kI32: case ValueType::kI32:
cmp(lhs, rhs); cmp(lhs, rhs);
break; break;
......
...@@ -730,6 +730,15 @@ class LiftoffAssembler : public TurboAssembler { ...@@ -730,6 +730,15 @@ class LiftoffAssembler : public TurboAssembler {
} }
} }
inline void emit_ptrsize_set_cond(LiftoffCondition condition, Register dst,
LiftoffRegister lhs, LiftoffRegister rhs) {
if (kSystemPointerSize == 8) {
emit_i64_set_cond(condition, dst, lhs, rhs);
} else {
emit_i32_set_cond(condition, dst, lhs.gp(), rhs.gp());
}
}
inline void emit_ptrsize_zeroextend_i32(Register dst, Register src) { inline void emit_ptrsize_zeroextend_i32(Register dst, Register src) {
if (kSystemPointerSize == 8) { if (kSystemPointerSize == 8) {
emit_type_conversion(kExprI64UConvertI32, LiftoffRegister(dst), emit_type_conversion(kExprI64UConvertI32, LiftoffRegister(dst),
......
...@@ -652,16 +652,9 @@ class LiftoffCompiler { ...@@ -652,16 +652,9 @@ class LiftoffCompiler {
++param_idx) { ++param_idx) {
ValueType type = decoder->local_type(param_idx); ValueType type = decoder->local_type(param_idx);
if (type.is_reference_type()) { if (type.is_reference_type()) {
Register isolate_root = __ GetUnusedRegister(kGpReg, {}).gp(); LiftoffRegister result = __ GetUnusedRegister(kGpReg, {});
// We can re-use the isolate_root register as result register. LoadNullValue(result.gp(), {});
Register result = isolate_root; __ Spill(__ cache_state()->stack_state.back().offset(), result, type);
LOAD_INSTANCE_FIELD(isolate_root, IsolateRoot, kSystemPointerSize);
__ LoadTaggedPointer(
result, isolate_root, no_reg,
IsolateData::root_slot_offset(RootIndex::kNullValue), {});
__ Spill(__ cache_state()->stack_state.back().offset(),
LiftoffRegister(result), type);
} }
} }
} }
...@@ -1282,9 +1275,22 @@ class LiftoffCompiler { ...@@ -1282,9 +1275,22 @@ class LiftoffCompiler {
__ emit_type_conversion(kExprI64UConvertI32, dst, c_call_dst, __ emit_type_conversion(kExprI64UConvertI32, dst, c_call_dst,
nullptr); nullptr);
}); });
case kExprRefIsNull: case kExprRefIsNull: {
unsupported(decoder, kRefTypes, "ref_is_null"); if (!FLAG_experimental_liftoff_extern_ref) {
unsupported(decoder, kRefTypes, "ref_is_null");
return;
}
LiftoffRegList pinned;
LiftoffRegister ref = pinned.set(__ PopToRegister());
LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
LoadNullValue(null.gp(), pinned);
// Prefer to overwrite one of the input registers with the result
// of the comparison.
LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {ref, null}, {});
__ emit_ptrsize_set_cond(kEqual, dst.gp(), ref, null);
__ PushRegister(kWasmI32, dst);
return; return;
}
default: default:
UNREACHABLE(); UNREACHABLE();
} }
...@@ -1696,15 +1702,9 @@ class LiftoffCompiler { ...@@ -1696,15 +1702,9 @@ class LiftoffCompiler {
unsupported(decoder, kRefTypes, "ref_null"); unsupported(decoder, kRefTypes, "ref_null");
return; return;
} }
Register isolate_root = __ GetUnusedRegister(kGpReg, {}).gp(); LiftoffRegister null = __ GetUnusedRegister(kGpReg, {});
// We can re-use the isolate_root register as result register. LoadNullValue(null.gp(), {});
Register result = isolate_root; __ PushRegister(type, null);
LOAD_INSTANCE_FIELD(isolate_root, IsolateRoot, kSystemPointerSize);
__ LoadTaggedPointer(result, isolate_root, no_reg,
IsolateData::root_slot_offset(RootIndex::kNullValue),
{});
__ PushRegister(type, LiftoffRegister(result));
} }
void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) { void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
...@@ -3837,26 +3837,67 @@ class LiftoffCompiler { ...@@ -3837,26 +3837,67 @@ class LiftoffCompiler {
void StructNewWithRtt(FullDecoder* decoder, void StructNewWithRtt(FullDecoder* decoder,
const StructIndexImmediate<validate>& imm, const StructIndexImmediate<validate>& imm,
const Value& rtt, const Value args[], Value* result) { const Value& rtt, const Value args[], Value* result) {
// TODO(7748): Implement. ValueType struct_value_type = ValueType::Ref(imm.index, kNonNullable);
unsupported(decoder, kGC, "struct.new_with_rtt"); WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateStructWithRtt;
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmAllocateStructWithRttDescriptor>(
compilation_zone_);
ValueType sig_reps[] = {struct_value_type, rtt.type};
FunctionSig sig(1, 1, sig_reps);
LiftoffAssembler::VarState rtt_value =
__ cache_state()->stack_state.end()[-1];
__ PrepareBuiltinCall(&sig, call_descriptor, {rtt_value});
__ CallRuntimeStub(target);
DefineSafepoint();
// Drop the RTT.
__ cache_state()->stack_state.pop_back(1);
LiftoffRegister obj(kReturnRegister0);
LiftoffRegList pinned = LiftoffRegList::ForRegs(obj);
for (uint32_t i = imm.struct_type->field_count(); i > 0;) {
i--;
int offset = StructFieldOffset(imm.struct_type, i);
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
ValueType field_type = imm.struct_type->field(i);
StoreObjectField(obj.gp(), offset, value, pinned, field_type);
pinned.clear(value);
}
__ PushRegister(struct_value_type, obj);
} }
void StructNewDefault(FullDecoder* decoder, void StructNewDefault(FullDecoder* decoder,
const StructIndexImmediate<validate>& imm, const StructIndexImmediate<validate>& imm,
const Value& rtt, Value* result) { const Value& rtt, Value* result) {
// TODO(7748): Implement. // TODO(7748): Implement.
unsupported(decoder, kGC, "struct.new_default_with_rtt"); unsupported(decoder, kGC, "struct.new_default_with_rtt");
} }
void StructGet(FullDecoder* decoder, const Value& struct_obj, void StructGet(FullDecoder* decoder, const Value& struct_obj,
const FieldIndexImmediate<validate>& field, bool is_signed, const FieldIndexImmediate<validate>& field, bool is_signed,
Value* result) { Value* result) {
// TODO(7748): Implement. const StructType* struct_type = field.struct_index.struct_type;
unsupported(decoder, kGC, "struct.get"); ValueType field_type = struct_type->field(field.index);
int offset = StructFieldOffset(struct_type, field.index);
LiftoffRegList pinned;
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type);
LiftoffRegister value =
pinned.set(__ GetUnusedRegister(reg_class_for(field_type), pinned));
LoadObjectField(value, obj.gp(), offset, field_type, is_signed, pinned);
__ PushRegister(field_type, value);
} }
void StructSet(FullDecoder* decoder, const Value& struct_obj, void StructSet(FullDecoder* decoder, const Value& struct_obj,
const FieldIndexImmediate<validate>& field, const FieldIndexImmediate<validate>& field,
const Value& field_value) { const Value& field_value) {
// TODO(7748): Implement. const StructType* struct_type = field.struct_index.struct_type;
unsupported(decoder, kGC, "struct.set"); ValueType field_type = struct_type->field(field.index);
int offset = StructFieldOffset(struct_type, field.index);
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type);
StoreObjectField(obj.gp(), offset, value, pinned, field_type);
} }
void ArrayNewWithRtt(FullDecoder* decoder, void ArrayNewWithRtt(FullDecoder* decoder,
...@@ -3904,8 +3945,41 @@ class LiftoffCompiler { ...@@ -3904,8 +3945,41 @@ class LiftoffCompiler {
void RttCanon(FullDecoder* decoder, const HeapTypeImmediate<validate>& imm, void RttCanon(FullDecoder* decoder, const HeapTypeImmediate<validate>& imm,
Value* result) { Value* result) {
// TODO(7748): Implement. LiftoffRegister rtt = __ GetUnusedRegister(kGpReg, {});
unsupported(decoder, kGC, "rtt.canon"); RootIndex index;
switch (imm.type.representation()) {
case wasm::HeapType::kEq:
index = RootIndex::kWasmRttEqrefMap;
break;
case wasm::HeapType::kExtern:
index = RootIndex::kWasmRttExternrefMap;
break;
case wasm::HeapType::kFunc:
index = RootIndex::kWasmRttFuncrefMap;
break;
case wasm::HeapType::kI31:
index = RootIndex::kWasmRttI31refMap;
break;
case wasm::HeapType::kAny:
index = RootIndex::kWasmRttAnyrefMap;
break;
case wasm::HeapType::kBottom:
UNREACHABLE();
default:
// User-defined type.
LOAD_TAGGED_PTR_INSTANCE_FIELD(rtt.gp(), ManagedObjectMaps);
__ LoadTaggedPointer(
rtt.gp(), rtt.gp(), no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
imm.type.ref_index()),
{});
__ PushRegister(ValueType::Rtt(imm.type, 1), rtt);
return;
}
LOAD_INSTANCE_FIELD(rtt.gp(), IsolateRoot, kSystemPointerSize);
__ LoadTaggedPointer(rtt.gp(), rtt.gp(), no_reg,
IsolateData::root_slot_offset(index), {});
__ PushRegister(ValueType::Rtt(imm.type, 1), rtt);
} }
void RttSub(FullDecoder* decoder, const HeapTypeImmediate<validate>& imm, void RttSub(FullDecoder* decoder, const HeapTypeImmediate<validate>& imm,
const Value& parent, Value* result) { const Value& parent, Value* result) {
...@@ -4141,6 +4215,51 @@ class LiftoffCompiler { ...@@ -4141,6 +4215,51 @@ class LiftoffCompiler {
__ FinishCall(imm.sig, call_descriptor); __ FinishCall(imm.sig, call_descriptor);
} }
void LoadNullValue(Register null, LiftoffRegList pinned) {
LOAD_INSTANCE_FIELD(null, IsolateRoot, kSystemPointerSize);
__ LoadTaggedPointer(null, null, no_reg,
IsolateData::root_slot_offset(RootIndex::kNullValue),
pinned);
}
void MaybeEmitNullCheck(FullDecoder* decoder, Register object,
LiftoffRegList pinned, ValueType type) {
if (!type.is_nullable()) return;
Label* trap_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapNullDereference);
LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
LoadNullValue(null.gp(), pinned);
__ emit_cond_jump(LiftoffCondition::kEqual, trap_label, type, object,
null.gp());
}
int StructFieldOffset(const StructType* struct_type, int field_index) {
return wasm::ObjectAccess::ToTagged(WasmStruct::kHeaderSize +
struct_type->field_offset(field_index));
}
void LoadObjectField(LiftoffRegister dst, Register src, int offset,
ValueType type, bool is_signed, LiftoffRegList pinned) {
if (type.is_reference_type()) {
__ LoadTaggedPointer(dst.gp(), src, no_reg, offset, pinned);
} else {
// Primitive type.
LoadType load_type = LoadType::ForValueType(type, is_signed);
__ Load(dst, src, no_reg, offset, load_type, pinned);
}
}
void StoreObjectField(Register obj, int offset, LiftoffRegister value,
LiftoffRegList pinned, ValueType type) {
if (type.is_reference_type()) {
__ StoreTaggedPointer(obj, offset, value, pinned);
} else {
// Primitive type.
StoreType store_type = StoreType::ForValueType(type);
__ Store(obj, no_reg, offset, value, store_type, pinned);
}
}
static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable; static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
LiftoffAssembler asm_; LiftoffAssembler asm_;
......
...@@ -66,6 +66,7 @@ static inline constexpr RegClass reg_class_for(ValueType::Kind kind) { ...@@ -66,6 +66,7 @@ static inline constexpr RegClass reg_class_for(ValueType::Kind kind) {
return kNeedS128RegPair ? kFpRegPair : kFpReg; return kNeedS128RegPair ? kFpRegPair : kFpReg;
case ValueType::kRef: case ValueType::kRef:
case ValueType::kOptRef: case ValueType::kOptRef:
case ValueType::kRtt:
return kGpReg; return kGpReg;
default: default:
return kNoReg; // unsupported type return kNoReg; // unsupported type
......
...@@ -841,6 +841,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) { ...@@ -841,6 +841,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
case ValueType::kI64: case ValueType::kI64:
case ValueType::kOptRef: case ValueType::kOptRef:
case ValueType::kRef: case ValueType::kRef:
case ValueType::kRtt:
movq(dst, reg.gp()); movq(dst, reg.gp());
break; break;
case ValueType::kF32: case ValueType::kF32:
...@@ -2043,6 +2044,11 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond, ...@@ -2043,6 +2044,11 @@ void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
case ValueType::kI32: case ValueType::kI32:
cmpl(lhs, rhs); cmpl(lhs, rhs);
break; break;
case ValueType::kRef:
case ValueType::kOptRef:
case ValueType::kRtt:
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
V8_FALLTHROUGH;
case ValueType::kI64: case ValueType::kI64:
cmpq(lhs, rhs); cmpq(lhs, rhs);
break; break;
......
...@@ -558,7 +558,7 @@ class LoadType { ...@@ -558,7 +558,7 @@ class LoadType {
constexpr ValueType value_type() const { return kValueType[val_]; } constexpr ValueType value_type() const { return kValueType[val_]; }
constexpr MachineType mem_type() const { return kMemType[val_]; } constexpr MachineType mem_type() const { return kMemType[val_]; }
static LoadType ForValueType(ValueType type) { static LoadType ForValueType(ValueType type, bool is_signed = false) {
switch (type.kind()) { switch (type.kind()) {
case ValueType::kI32: case ValueType::kI32:
return kI32Load; return kI32Load;
...@@ -570,6 +570,10 @@ class LoadType { ...@@ -570,6 +570,10 @@ class LoadType {
return kF64Load; return kF64Load;
case ValueType::kS128: case ValueType::kS128:
return kS128Load; return kS128Load;
case ValueType::kI8:
return is_signed ? kI32Load8S : kI32Load8U;
case ValueType::kI16:
return is_signed ? kI32Load16S : kI32Load16U;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
...@@ -642,6 +646,10 @@ class StoreType { ...@@ -642,6 +646,10 @@ class StoreType {
return kF64Store; return kF64Store;
case ValueType::kS128: case ValueType::kS128:
return kS128Store; return kS128Store;
case ValueType::kI8:
return kI32Store8;
case ValueType::kI16:
return kI32Store16;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
......
...@@ -84,7 +84,8 @@ struct WasmModule; ...@@ -84,7 +84,8 @@ struct WasmModule;
V(I32PairToBigInt) \ V(I32PairToBigInt) \
V(I64ToBigInt) \ V(I64ToBigInt) \
V(RecordWrite) \ V(RecordWrite) \
V(ToNumber) V(ToNumber) \
V(WasmAllocateStructWithRtt)
// Sorted, disjoint and non-overlapping memory regions. A region is of the // Sorted, disjoint and non-overlapping memory regions. A region is of the
// form [start, end). So there's no [start, end), [end, other_end), // form [start, end). So there's no [start, end), [end, other_end),
......
...@@ -30,11 +30,18 @@ using F = std::pair<ValueType, bool>; ...@@ -30,11 +30,18 @@ using F = std::pair<ValueType, bool>;
class WasmGCTester { class WasmGCTester {
public: public:
WasmGCTester() explicit WasmGCTester(
TestExecutionTier execution_tier = TestExecutionTier::kTurbofan)
: flag_gc(&v8::internal::FLAG_experimental_wasm_gc, true), : flag_gc(&v8::internal::FLAG_experimental_wasm_gc, true),
flag_reftypes(&v8::internal::FLAG_experimental_wasm_reftypes, true), flag_reftypes(&v8::internal::FLAG_experimental_wasm_reftypes, true),
flag_typedfuns(&v8::internal::FLAG_experimental_wasm_typed_funcref, flag_typedfuns(&v8::internal::FLAG_experimental_wasm_typed_funcref,
true), true),
flag_liftoff(
&v8::internal::FLAG_liftoff,
execution_tier == TestExecutionTier::kTurbofan ? false : true),
flag_liftoff_only(
&v8::internal::FLAG_liftoff_only,
execution_tier == TestExecutionTier::kLiftoff ? true : false),
zone(&allocator, ZONE_NAME), zone(&allocator, ZONE_NAME),
builder_(&zone), builder_(&zone),
isolate_(CcTest::InitIsolateOnce()), isolate_(CcTest::InitIsolateOnce()),
...@@ -173,6 +180,8 @@ class WasmGCTester { ...@@ -173,6 +180,8 @@ class WasmGCTester {
const FlagScope<bool> flag_gc; const FlagScope<bool> flag_gc;
const FlagScope<bool> flag_reftypes; const FlagScope<bool> flag_reftypes;
const FlagScope<bool> flag_typedfuns; const FlagScope<bool> flag_typedfuns;
const FlagScope<bool> flag_liftoff;
const FlagScope<bool> flag_liftoff_only;
v8::internal::AccountingAllocator allocator; v8::internal::AccountingAllocator allocator;
Zone zone; Zone zone;
...@@ -191,9 +200,10 @@ ValueType optref(uint32_t type_index) { ...@@ -191,9 +200,10 @@ ValueType optref(uint32_t type_index) {
return ValueType::Ref(type_index, kNullable); return ValueType::Ref(type_index, kNullable);
} }
// TODO(7748): Use WASM_EXEC_TEST once interpreter and liftoff are supported. WASM_COMPILED_EXEC_TEST(WasmBasicStruct) {
TEST(WasmBasicStruct) { WasmGCTester tester(execution_tier);
WasmGCTester tester; FLAG_experimental_liftoff_extern_ref = true;
const byte type_index = const byte type_index =
tester.DefineStruct({F(kWasmI32, true), F(kWasmI32, true)}); tester.DefineStruct({F(kWasmI32, true), F(kWasmI32, true)});
const byte empty_struct_index = tester.DefineStruct({}); const byte empty_struct_index = tester.DefineStruct({});
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment