Commit 476b527b authored by Andreas Haas's avatar Andreas Haas Committed by Commit Bot

[wasm][liftoff] Implement table.fill

This CL implements table.fill in Liftoff. For that I also changed the
TurboFan implementation to call the runtime through a builtin instead of
calling the runtime function directly. This has the advantage that we
don't have to convert the parameters to Smi in the generated code.

R=thibaudm@chromium.org

Bug: v8:7581
Change-Id: I26793509f8d44582d6a46d25d0c2fc933068f4fa
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2735314Reviewed-by: 's avatarThibaud Michaud <thibaudm@chromium.org>
Commit-Queue: Andreas Haas <ahaas@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73214}
parent fc7ca518
......@@ -11,6 +11,8 @@ extern runtime WasmTableInit(
Context, WasmInstanceObject, Object, Object, Smi, Smi, Smi): JSAny;
extern runtime WasmTableCopy(
Context, WasmInstanceObject, Object, Object, Smi, Smi, Smi): JSAny;
extern runtime WasmTableFill(
Context, WasmInstanceObject, Smi, Smi, Object, Smi): JSAny;
extern runtime WasmFunctionTableGet(
Context, WasmInstanceObject, Smi, Smi): JSAny;
extern runtime WasmFunctionTableSet(
......@@ -113,6 +115,22 @@ builtin WasmTableCopy(
}
}
builtin WasmTableFill(
table: Smi, startRaw: uint32, countRaw: uint32, value: Object): JSAny {
try {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const start: Smi =
Convert<PositiveSmi>(startRaw) otherwise TableOutOfBounds;
const count: Smi =
Convert<PositiveSmi>(countRaw) otherwise TableOutOfBounds;
tail runtime::WasmTableFill(
LoadContextFromInstance(instance), instance, table, start, value,
count);
} label TableOutOfBounds deferred {
tail ThrowWasmTrapTableOutOfBounds();
}
}
builtin WasmTableGet(tableIndex: intptr, index: int32): Object {
const instance: WasmInstanceObject = LoadInstanceFromFrame();
const entryIndex: intptr = ChangeInt32ToIntPtr(index);
......
......@@ -5536,13 +5536,10 @@ Node* WasmGraphBuilder::TableSize(uint32_t table_index) {
Node* WasmGraphBuilder::TableFill(uint32_t table_index, Node* start,
Node* value, Node* count) {
Node* args[] = {
gasm_->NumberConstant(table_index),
BuildConvertUint32ToSmiWithSaturation(start, FLAG_wasm_max_table_size),
value,
BuildConvertUint32ToSmiWithSaturation(count, FLAG_wasm_max_table_size)};
return BuildCallToRuntime(Runtime::kWasmTableFill, args, arraysize(args));
return gasm_->CallRuntimeStub(
wasm::WasmCode::kWasmTableFill,
graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)), start,
count, value);
}
Node* WasmGraphBuilder::StructNewWithRtt(uint32_t struct_index,
......
......@@ -517,15 +517,14 @@ RUNTIME_FUNCTION(Runtime_WasmTableGrow) {
RUNTIME_FUNCTION(Runtime_WasmTableFill) {
ClearThreadInWasmScope flag_scope(isolate);
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
auto instance =
Handle<WasmInstanceObject>(GetWasmInstanceOnStackTop(isolate), isolate);
CONVERT_UINT32_ARG_CHECKED(table_index, 0);
CONVERT_UINT32_ARG_CHECKED(start, 1);
CONVERT_ARG_CHECKED(Object, value_raw, 2);
DCHECK_EQ(5, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_UINT32_ARG_CHECKED(table_index, 1);
CONVERT_UINT32_ARG_CHECKED(start, 2);
CONVERT_ARG_CHECKED(Object, value_raw, 3);
// TODO(wasm): Manually box because parameters are not visited yet.
Handle<Object> value(value_raw, isolate);
CONVERT_UINT32_ARG_CHECKED(count, 3);
CONVERT_UINT32_ARG_CHECKED(count, 4);
Handle<WasmTableObject> table(
WasmTableObject::cast(instance->tables().get(table_index)), isolate);
......
......@@ -580,29 +580,29 @@ namespace internal {
F(TypedArraySet, 2, 1) \
F(TypedArraySortFast, 1, 1)
#define FOR_EACH_INTRINSIC_WASM(F, I) \
F(ThrowWasmError, 1, 1) \
F(ThrowWasmStackOverflow, 0, 1) \
F(WasmI32AtomicWait, 4, 1) \
F(WasmI64AtomicWait, 5, 1) \
F(WasmAtomicNotify, 3, 1) \
F(WasmMemoryGrow, 2, 1) \
F(WasmStackGuard, 0, 1) \
F(WasmThrow, 2, 1) \
F(WasmReThrow, 1, 1) \
F(WasmThrowJSTypeError, 0, 1) \
F(WasmRefFunc, 1, 1) \
F(WasmFunctionTableGet, 3, 1) \
F(WasmFunctionTableSet, 4, 1) \
F(WasmTableInit, 6, 1) \
F(WasmTableCopy, 6, 1) \
F(WasmTableGrow, 3, 1) \
F(WasmTableFill, 4, 1) \
F(WasmIsValidRefValue, 3, 1) \
F(WasmCompileLazy, 2, 1) \
F(WasmCompileWrapper, 2, 1) \
F(WasmTriggerTierUp, 1, 1) \
F(WasmDebugBreak, 0, 1) \
#define FOR_EACH_INTRINSIC_WASM(F, I) \
F(ThrowWasmError, 1, 1) \
F(ThrowWasmStackOverflow, 0, 1) \
F(WasmI32AtomicWait, 4, 1) \
F(WasmI64AtomicWait, 5, 1) \
F(WasmAtomicNotify, 3, 1) \
F(WasmMemoryGrow, 2, 1) \
F(WasmStackGuard, 0, 1) \
F(WasmThrow, 2, 1) \
F(WasmReThrow, 1, 1) \
F(WasmThrowJSTypeError, 0, 1) \
F(WasmRefFunc, 1, 1) \
F(WasmFunctionTableGet, 3, 1) \
F(WasmFunctionTableSet, 4, 1) \
F(WasmTableInit, 6, 1) \
F(WasmTableCopy, 6, 1) \
F(WasmTableGrow, 3, 1) \
F(WasmTableFill, 5, 1) \
F(WasmIsValidRefValue, 3, 1) \
F(WasmCompileLazy, 2, 1) \
F(WasmCompileWrapper, 2, 1) \
F(WasmTriggerTierUp, 1, 1) \
F(WasmDebugBreak, 0, 1) \
F(WasmAllocateRtt, 2, 1)
#define FOR_EACH_INTRINSIC_WEAKREF(F, I) \
......
......@@ -89,10 +89,11 @@ constexpr LoadType::LoadTypeValue kPointerLoadType =
constexpr ValueKind kPointerValueType = kSystemPointerSize == 8 ? kI64 : kI32;
#if V8_TARGET_ARCH_32_BIT || defined(V8_COMPRESS_POINTERS)
constexpr ValueKind kSmiValueType = kI32;
constexpr ValueKind kTaggedValueType = kI32;
#else
constexpr ValueKind kSmiValueType = kI64;
constexpr ValueKind kTaggedValueType = kI64;
#endif
constexpr ValueKind kSmiValueType = kTaggedValueType;
#if V8_TARGET_ARCH_ARM64
// On ARM64, the Assembler keeps track of pointers to Labels to resolve
......@@ -4400,8 +4401,35 @@ class LiftoffCompiler {
}
void TableFill(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
const Value& start, const Value& value, const Value& count) {
unsupported(decoder, kRefTypes, "table.fill");
const Value&, const Value&, const Value&) {
LiftoffRegList pinned;
LiftoffRegister table_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LoadSmi(table_index_reg, imm.index);
LiftoffAssembler::VarState table_index(kPointerValueType, table_index_reg,
0);
LiftoffAssembler::VarState count = __ cache_state()->stack_state.end()[-1];
LiftoffAssembler::VarState value = __ cache_state()->stack_state.end()[-2];
LiftoffAssembler::VarState start = __ cache_state()->stack_state.end()[-3];
WasmCode::RuntimeStubId target = WasmCode::kWasmTableFill;
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmTableFillDescriptor>(compilation_zone_);
ValueKind sig_reps[] = {kSmiValueType, kI32, kI32, kTaggedValueType};
ValueKindSig sig(0, 4, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor,
{table_index, start, count, value});
__ CallRuntimeStub(target);
DefineSafepoint();
// Pop parameters from the value stack.
__ cache_state()->stack_state.pop_back(3);
RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
}
void StructNew(FullDecoder* decoder,
......
......@@ -67,6 +67,7 @@ struct WasmModule;
V(WasmMemoryGrow) \
V(WasmTableInit) \
V(WasmTableCopy) \
V(WasmTableFill) \
V(WasmTableGet) \
V(WasmTableSet) \
V(WasmStackGuard) \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment