Commit 0c1b4c25 authored by Ben L. Titzer's avatar Ben L. Titzer Committed by Commit Bot

[wasm] Move indirect function tables into the WasmContext

This CL changes the WASM implementation to access indirect function
tables through the WasmContext, whereas previously indirect function
tables and their sizes were constants that were inlined into compiled
into code, requiring code patching. This is a necessary step for sharing
code between instances and eventually, isolates.

R=clemensh@chromium.org,mstarzinger@chromium.org

Bug: v8:7424
Change-Id: Ida4138ed92729730dfbc0a81a84d8484b233d808
Reviewed-on: https://chromium-review.googlesource.com/895683
Commit-Queue: Ben Titzer <titzer@chromium.org>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51427}
parent fb0144f6
......@@ -2529,8 +2529,12 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node* function,
Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
Node*** rets,
wasm::WasmCodePosition position) {
DCHECK_NOT_NULL(wasm_context_);
wasm::WasmCodePosition position,
Node* wasm_context) {
if (wasm_context == nullptr) {
DCHECK_NOT_NULL(wasm_context_);
wasm_context = wasm_context_.get();
}
SetNeedsStackCheck();
const size_t params = sig->parameter_count();
const size_t extra = 3; // wasm_context, effect, and control.
......@@ -2541,7 +2545,7 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
// Make room for the wasm_context parameter at index 1, just after code.
memmove(&args[2], &args[1], params * sizeof(Node*));
args[1] = wasm_context_.get();
args[1] = wasm_context;
// Add effect and control inputs.
args[params + 2] = *effect_;
......@@ -2602,13 +2606,15 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
uint32_t table_index = 0;
wasm::FunctionSig* sig = env_->module->signatures[sig_index];
EnsureFunctionTableNodes();
Node* table = nullptr;
Node* table_size = nullptr;
GetFunctionTableNodes(table_index, &table, &table_size);
MachineOperatorBuilder* machine = jsgraph()->machine();
Node* key = args[0];
// Bounds check against the table size.
Node* size = function_tables_[table_index].size;
Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
Node* in_bounds =
graph()->NewNode(machine->Uint32LessThan(), key, table_size);
TrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
// Mask the key to prevent SSCA.
......@@ -2617,36 +2623,72 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
Node* neg_key =
graph()->NewNode(machine->Word32Xor(), key, Int32Constant(-1));
Node* masked_diff = graph()->NewNode(
machine->Word32And(), graph()->NewNode(machine->Int32Sub(), key, size),
neg_key);
machine->Word32And(),
graph()->NewNode(machine->Int32Sub(), key, table_size), neg_key);
Node* mask =
graph()->NewNode(machine->Word32Sar(), masked_diff, Int32Constant(31));
key = graph()->NewNode(machine->Word32And(), key, mask);
}
Node* table_address = function_tables_[table_index].table_addr;
Node* table = graph()->NewNode(
jsgraph()->machine()->Load(MachineType::AnyTagged()), table_address,
jsgraph()->IntPtrConstant(0), *effect_, *control_);
// Load signature from the table and check.
// The table is a FixedArray; signatures are encoded as SMIs.
// [sig1, code1, sig2, code2, sig3, code3, ...]
static_assert(compiler::kFunctionTableEntrySize == 2, "consistency");
static_assert(compiler::kFunctionTableSignatureOffset == 0, "consistency");
static_assert(compiler::kFunctionTableCodeOffset == 1, "consistency");
int32_t canonical_sig_num = env_->module->signature_ids[sig_index];
if (WASM_CONTEXT_TABLES) {
// The table entries are {IndirectFunctionTableEntry} structs.
Node* scaled_key =
graph()->NewNode(machine->Int32Mul(), key,
Int32Constant(sizeof(IndirectFunctionTableEntry)));
const Operator* add = nullptr;
if (machine->Is64()) {
scaled_key = graph()->NewNode(machine->ChangeInt32ToInt64(), scaled_key);
add = machine->Int64Add();
} else {
add = machine->Int32Add();
}
Node* entry_address = graph()->NewNode(add, table, scaled_key);
Node* loaded_sig = graph()->NewNode(
machine->Load(MachineType::Int32()), entry_address,
Int32Constant(offsetof(IndirectFunctionTableEntry, sig_id)), *effect_,
*control_);
Node* sig_match = graph()->NewNode(machine->WordEqual(), loaded_sig,
Int32Constant(canonical_sig_num));
TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
Node* target = graph()->NewNode(
machine->Load(MachineType::Pointer()), entry_address,
Int32Constant(offsetof(IndirectFunctionTableEntry, target)), *effect_,
*control_);
Node* loaded_context = graph()->NewNode(
machine->Load(MachineType::Pointer()), entry_address,
Int32Constant(offsetof(IndirectFunctionTableEntry, context)), *effect_,
*control_);
args[0] = target;
return BuildWasmCall(sig, args, rets, position, loaded_context);
}
// The table entries are elements of a fixed array.
ElementAccess access = AccessBuilder::ForFixedArrayElement();
const int fixed_offset = access.header_size - access.tag();
Node* key_offset = graph()->NewNode(machine->Word32Shl(), key,
Int32Constant(kPointerSizeLog2 + 1));
Node* load_sig =
Node* loaded_sig =
graph()->NewNode(machine->Load(MachineType::AnyTagged()), table,
graph()->NewNode(machine->Int32Add(), key_offset,
Int32Constant(fixed_offset)),
*effect_, *control_);
int32_t canonical_sig_num = env_->module->signature_ids[sig_index];
CHECK_GE(canonical_sig_num, 0);
Node* sig_match = graph()->NewNode(machine->WordEqual(), load_sig,
Node* sig_match = graph()->NewNode(machine->WordEqual(), loaded_sig,
jsgraph()->SmiConstant(canonical_sig_num));
TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
// Load code object from the table. It is held by a Foreign.
......@@ -2655,15 +2697,7 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
graph()->NewNode(machine->Int32Add(), key_offset,
Uint32Constant(fixed_offset + kPointerSize)),
*effect_, *control_);
if (FLAG_wasm_jit_to_native) {
Node* address = graph()->NewNode(
machine->Load(MachineType::Pointer()), entry,
Int32Constant(Foreign::kForeignAddressOffset - kHeapObjectTag),
*effect_, *control_);
args[0] = address;
} else {
args[0] = entry;
}
return BuildWasmCall(sig, args, rets, position);
}
......@@ -3615,20 +3649,44 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
jsgraph()->Int32Constant(WhichPowerOf2(wasm::kWasmPageSize)));
}
void WasmGraphBuilder::EnsureFunctionTableNodes() {
if (function_tables_.size() > 0) return;
size_t tables_size = env_->function_tables.size();
for (size_t i = 0; i < tables_size; ++i) {
wasm::GlobalHandleAddress function_handle_address =
env_->function_tables[i];
Node* table_addr = jsgraph()->RelocatableIntPtrConstant(
reinterpret_cast<intptr_t>(function_handle_address),
RelocInfo::WASM_GLOBAL_HANDLE);
uint32_t table_size = env_->module->function_tables[i].initial_size;
Node* size = jsgraph()->RelocatableInt32Constant(
static_cast<uint32_t>(table_size),
RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
function_tables_.push_back({table_addr, size});
void WasmGraphBuilder::GetFunctionTableNodes(uint32_t table_index, Node** table,
Node** table_size) {
if (WASM_CONTEXT_TABLES) {
// The table address and size are stored in the WasmContext.
// Don't bother caching them, since they are only used in indirect calls,
// which would cause them to be spilled on the stack anyway.
*table = graph()->NewNode(
jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_.get(),
jsgraph()->Int32Constant(
static_cast<int32_t>(offsetof(WasmContext, table))),
*effect_, *control_);
*table_size = graph()->NewNode(
jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_.get(),
jsgraph()->Int32Constant(
static_cast<int32_t>(offsetof(WasmContext, table_size))),
*effect_, *control_);
} else {
// The function table nodes are relocatable constants.
if (function_tables_.size() == 0) {
size_t tables_size = env_->function_tables.size();
for (size_t i = 0; i < tables_size; ++i) {
wasm::GlobalHandleAddress function_handle_address =
env_->function_tables[i];
Node* table_addr = jsgraph()->RelocatableIntPtrConstant(
reinterpret_cast<intptr_t>(function_handle_address),
RelocInfo::WASM_GLOBAL_HANDLE);
uint32_t table_size = env_->module->function_tables[i].initial_size;
Node* size = jsgraph()->RelocatableInt32Constant(
static_cast<uint32_t>(table_size),
RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
function_tables_.push_back({table_addr, size});
}
}
*table_size = function_tables_[table_index].size;
*table =
graph()->NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()),
function_tables_[table_index].table_addr,
jsgraph()->IntPtrConstant(0), *effect_, *control_);
}
}
......
......@@ -372,7 +372,8 @@ class WasmGraphBuilder {
Node* ToJS(Node* node, wasm::ValueType type);
Node* FromJS(Node* node, Node* js_context, wasm::ValueType type);
Node* Invert(Node* node);
void EnsureFunctionTableNodes();
void GetFunctionTableNodes(uint32_t table_index, Node** table,
Node** table_size);
//-----------------------------------------------------------------------
// Operations that concern the linear memory.
......@@ -506,7 +507,8 @@ class WasmGraphBuilder {
template <typename... Args>
Node* BuildCCall(MachineSignature* sig, Node* function, Args... args);
Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args, Node*** rets,
wasm::WasmCodePosition position);
wasm::WasmCodePosition position,
Node* wasm_context = nullptr);
Node* BuildF32CopySign(Node* left, Node* right);
Node* BuildF64CopySign(Node* left, Node* right);
......
......@@ -218,7 +218,7 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
compiler::CallDescriptor* call_desc,
Register target) {
BAILOUT("CallIndirect");
}
......
......@@ -218,7 +218,7 @@ void LiftoffAssembler::CallRuntime(Zone* zone, Runtime::FunctionId fid) {
}
void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
compiler::CallDescriptor* call_desc,
Register target) {
BAILOUT("CallIndirect");
}
......
......@@ -467,7 +467,8 @@ void LiftoffAssembler::SpillAllRegisters() {
void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
compiler::CallDescriptor* call_descriptor,
uint32_t* max_used_spill_slot,
Register* target) {
Register* target,
LiftoffRegister* explicit_context) {
uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
// Input 0 is the call target.
constexpr size_t kInputShift = 1;
......@@ -483,13 +484,23 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
}
StackTransferRecipe stack_transfers(this);
LiftoffRegList param_regs;
// Move the explicit context (if any) into the correct context register.
compiler::LinkageLocation context_loc =
call_descriptor->GetInputLocation(kInputShift);
DCHECK(context_loc.IsRegister() && !context_loc.IsAnyRegister());
LiftoffRegister context_reg(Register::from_code(context_loc.AsRegister()));
param_regs.set(context_reg);
if (explicit_context && *explicit_context != context_reg) {
stack_transfers.MoveRegister(context_reg, *explicit_context, kWasmIntPtr);
}
// Now move all parameter values into the right slot for the call.
// Don't pop values yet, such that the stack height is still correct when
// executing the {stack_transfers}.
// Process parameters backwards, such that pushes of caller frame slots are
// in the correct order.
LiftoffRegList param_regs;
uint32_t param_base = cache_state_.stack_height() - num_params;
uint32_t call_desc_input_idx =
static_cast<uint32_t>(call_descriptor->InputCount());
......@@ -527,12 +538,6 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
// {call_desc_input_idx} should point after the context parameter now.
DCHECK_EQ(call_desc_input_idx, kInputShift + 1);
compiler::LinkageLocation context_loc =
call_descriptor->GetInputLocation(kInputShift);
DCHECK(context_loc.IsRegister() && !context_loc.IsAnyRegister());
Register context_reg = Register::from_code(context_loc.AsRegister());
param_regs.set(LiftoffRegister(context_reg));
// If the target register overlaps with a parameter register, then move the
// target to another free register, or spill to the stack.
if (target && param_regs.has(LiftoffRegister(*target))) {
......@@ -563,8 +568,10 @@ void LiftoffAssembler::PrepareCall(wasm::FunctionSig* sig,
// Reset register use counters.
cache_state_.reset_used_registers();
// Fill the wasm context into the right register.
FillContextInto(context_reg);
// Reload the context from the stack.
if (!explicit_context) {
FillContextInto(context_reg.gp());
}
}
void LiftoffAssembler::FinishCall(wasm::FunctionSig* sig,
......
......@@ -310,7 +310,8 @@ class LiftoffAssembler : public TurboAssembler {
// TODO(clemensh): Remove {max_used_spill_slot} once we support arbitrary
// stack sizes.
void PrepareCall(wasm::FunctionSig*, compiler::CallDescriptor*,
uint32_t* max_used_spill_slot, Register* target = nullptr);
uint32_t* max_used_spill_slot, Register* target = nullptr,
LiftoffRegister* explicit_context = nullptr);
// Process return values of the call.
void FinishCall(wasm::FunctionSig*, compiler::CallDescriptor*);
......
......@@ -1141,12 +1141,14 @@ class LiftoffCompiler {
void CallIndirect(Decoder* decoder, const Value& index_val,
const CallIndirectOperand<validate>& operand,
const Value args[], Value returns[]) {
if (operand.sig->return_count() > 1)
if (operand.sig->return_count() > 1) {
return unsupported(decoder, "multi-return");
}
if (operand.sig->return_count() == 1 &&
!CheckSupportedType(decoder, kTypes_ilfd, operand.sig->GetReturn(0),
"return"))
"return")) {
return;
}
// Assume only one table for now.
uint32_t table_index = 0;
......@@ -1169,63 +1171,106 @@ class LiftoffCompiler {
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister scratch = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
// Bounds check against the table size.
{
uint32_t table_size =
env_->module->function_tables[table_index].initial_size;
Label* trap_label = AddOutOfLineTrap(decoder->position(),
Builtins::kThrowWasmTrapFuncInvalid);
LiftoffRegister* explicit_context = nullptr;
__ LoadConstant(tmp_const, WasmValue(table_size),
RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
__ emit_cond_jump(kUnsignedGreaterEqual, trap_label, kWasmI32, index.gp(),
tmp_const.gp());
}
// Bounds check against the table size.
Label* invalid_func_label = AddOutOfLineTrap(
decoder->position(), Builtins::kThrowWasmTrapFuncInvalid);
wasm::GlobalHandleAddress function_table_handle_address =
env_->function_tables[table_index];
__ LoadConstant(table, WasmPtrValue(function_table_handle_address),
RelocInfo::WASM_GLOBAL_HANDLE);
static constexpr LoadType kPointerLoadType =
kPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
__ Load(table, table.gp(), no_reg, 0, kPointerLoadType, pinned);
// Load signature from the table and check.
// The table is a FixedArray; signatures are encoded as SMIs.
// [sig1, code1, sig2, code2, sig3, code3, ...]
static_assert(compiler::kFunctionTableEntrySize == 2, "consistency");
static_assert(compiler::kFunctionTableSignatureOffset == 0, "consistency");
static_assert(compiler::kFunctionTableCodeOffset == 1, "consistency");
constexpr int kFixedArrayOffset = FixedArray::kHeaderSize - kHeapObjectTag;
__ LoadConstant(tmp_const, WasmValue(kPointerSizeLog2 + 1));
// Shift index such that it's the offset of the signature in the FixedArray.
__ emit_i32_shl(index.gp(), index.gp(), tmp_const.gp(), pinned);
// Load the signature.
__ Load(scratch, table.gp(), index.gp(), kFixedArrayOffset,
kPointerLoadType, pinned);
static constexpr int kFixedArrayOffset =
FixedArray::kHeaderSize - kHeapObjectTag;
uint32_t canonical_sig_num = env_->module->signature_ids[operand.sig_index];
DCHECK_GE(canonical_sig_num, 0);
DCHECK_GE(kMaxInt, canonical_sig_num);
__ LoadConstant(tmp_const, WasmPtrValue(Smi::FromInt(canonical_sig_num)));
if (WASM_CONTEXT_TABLES) {
// Compare against table size stored in {wasm_context->table_size}.
__ LoadFromContext(tmp_const.gp(), offsetof(WasmContext, table_size),
sizeof(uint32_t));
__ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32,
index.gp(), tmp_const.gp());
// Load the table from {wasm_context->table}
__ LoadFromContext(table.gp(), offsetof(WasmContext, table),
kPointerSize);
// Load the signature from {wasm_context->table[$index].sig_id}
// == wasm_context.table + $index * #sizeof(IndirectionFunctionTableEntry)
// + #offsetof(sig_id)
__ LoadConstant(
tmp_const,
WasmValue(static_cast<uint32_t>(sizeof(IndirectFunctionTableEntry))));
__ emit_i32_mul(index.gp(), index.gp(), tmp_const.gp());
__ Load(scratch, table.gp(), index.gp(),
offsetof(IndirectFunctionTableEntry, sig_id), LoadType::kI32Load,
pinned);
__ LoadConstant(tmp_const, WasmValue(canonical_sig_num));
Label* trap_label = AddOutOfLineTrap(
decoder->position(), Builtins::kThrowWasmTrapFuncSigMismatch);
__ emit_cond_jump(kUnequal, trap_label, LiftoffAssembler::kWasmIntPtr,
scratch.gp(), tmp_const.gp());
Label* sig_mismatch_label = AddOutOfLineTrap(
decoder->position(), Builtins::kThrowWasmTrapFuncSigMismatch);
__ emit_cond_jump(kUnequal, sig_mismatch_label,
LiftoffAssembler::kWasmIntPtr, scratch.gp(),
tmp_const.gp());
// Load code object.
__ Load(scratch, table.gp(), index.gp(), kFixedArrayOffset + kPointerSize,
kPointerLoadType, pinned);
if (FLAG_wasm_jit_to_native) {
// The table holds a Foreign pointing to the instruction start.
__ Load(scratch, scratch.gp(), no_reg,
Foreign::kForeignAddressOffset - kHeapObjectTag, kPointerLoadType,
// Load the target address from {wasm_context->table[$index].target}
__ Load(scratch, table.gp(), index.gp(),
offsetof(IndirectFunctionTableEntry, target), kPointerLoadType,
pinned);
// Load the context from {wasm_context->table[$index].context}
// TODO(wasm): directly allocate the correct context register to avoid
// any potential moves.
__ Load(tmp_const, table.gp(), index.gp(),
offsetof(IndirectFunctionTableEntry, context), kPointerLoadType,
pinned);
explicit_context = &tmp_const;
} else {
// Compare against table size, which is a patchable constant.
uint32_t table_size =
env_->module->function_tables[table_index].initial_size;
__ LoadConstant(tmp_const, WasmValue(table_size),
RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
__ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32,
index.gp(), tmp_const.gp());
wasm::GlobalHandleAddress function_table_handle_address =
env_->function_tables[table_index];
__ LoadConstant(table, WasmPtrValue(function_table_handle_address),
RelocInfo::WASM_GLOBAL_HANDLE);
__ Load(table, table.gp(), no_reg, 0, kPointerLoadType, pinned);
// Load signature from the table and check.
// The table is a FixedArray; signatures are encoded as SMIs.
// [sig1, code1, sig2, code2, sig3, code3, ...]
static_assert(compiler::kFunctionTableEntrySize == 2, "consistency");
static_assert(compiler::kFunctionTableSignatureOffset == 0,
"consistency");
static_assert(compiler::kFunctionTableCodeOffset == 1, "consistency");
__ LoadConstant(tmp_const, WasmValue(kPointerSizeLog2 + 1));
// Shift index such that it's the offset of the signature in the
// FixedArray.
__ emit_i32_shl(index.gp(), index.gp(), tmp_const.gp(), pinned);
// Load the signature.
__ Load(scratch, table.gp(), index.gp(), kFixedArrayOffset,
kPointerLoadType, pinned);
__ LoadConstant(tmp_const, WasmPtrValue(Smi::FromInt(canonical_sig_num)));
Label* sig_mismatch_label = AddOutOfLineTrap(
decoder->position(), Builtins::kThrowWasmTrapFuncSigMismatch);
__ emit_cond_jump(kUnequal, sig_mismatch_label,
LiftoffAssembler::kWasmIntPtr, scratch.gp(),
tmp_const.gp());
// Load code object.
__ Load(scratch, table.gp(), index.gp(), kFixedArrayOffset + kPointerSize,
kPointerLoadType, pinned);
// Move the pointer from the Code object to the instruction start.
__ LoadConstant(tmp_const,
WasmPtrValue(Code::kHeaderSize - kHeapObjectTag));
......@@ -1242,7 +1287,8 @@ class LiftoffCompiler {
uint32_t max_used_spill_slot = 0;
Register target = scratch.gp();
__ PrepareCall(operand.sig, call_descriptor, &max_used_spill_slot, &target);
__ PrepareCall(operand.sig, call_descriptor, &max_used_spill_slot, &target,
explicit_context);
__ CallIndirect(operand.sig, call_descriptor, target);
if (max_used_spill_slot >
__ num_locals() + LiftoffAssembler::kMaxValueStackHeight) {
......
This diff is collapsed.
......@@ -950,7 +950,6 @@ std::unique_ptr<NativeModule> NativeModule::Clone() {
UNREACHABLE();
}
}
ret->specialization_data_ = specialization_data_;
return ret;
}
......@@ -1021,14 +1020,14 @@ intptr_t WasmCodeManager::remaining_uncommitted() const {
NativeModuleModificationScope::NativeModuleModificationScope(
NativeModule* native_module)
: native_module_(native_module) {
if (native_module_) {
if (native_module_ && (native_module_->modification_scope_depth_++) == 0) {
bool success = native_module_->SetExecutable(false);
CHECK(success);
}
}
NativeModuleModificationScope::~NativeModuleModificationScope() {
if (native_module_) {
if (native_module_ && (native_module_->modification_scope_depth_--) == 1) {
bool success = native_module_->SetExecutable(true);
CHECK(success);
}
......
......@@ -245,19 +245,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCompiledModule* compiled_module() const;
void SetCompiledModule(Handle<WasmCompiledModule>);
// Shorthand accessors to the specialization data content.
std::vector<wasm::GlobalHandleAddress>& function_tables() {
return specialization_data_.function_tables;
}
std::vector<wasm::GlobalHandleAddress>& empty_function_tables() {
return specialization_data_.empty_function_tables;
}
uint32_t num_imported_functions() const { return num_imported_functions_; }
size_t num_function_tables() const {
return specialization_data_.empty_function_tables.size();
}
size_t committed_memory() const { return committed_memory_; }
const size_t instance_id = 0;
......@@ -267,6 +255,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
friend class WasmCodeManager;
friend class NativeModuleSerializer;
friend class NativeModuleDeserializer;
friend class NativeModuleModificationScope;
struct WasmCodeUniquePtrComparer {
bool operator()(const std::unique_ptr<WasmCode>& a,
......@@ -325,14 +314,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
size_t committed_memory_ = 0;
bool can_request_more_memory_;
bool is_executable_ = false;
// Specialization data that needs to be serialized and cloned.
// Keeping it groupped together because it makes cloning of all these
// elements a 1 line copy.
struct {
std::vector<wasm::GlobalHandleAddress> function_tables;
std::vector<wasm::GlobalHandleAddress> empty_function_tables;
} specialization_data_;
int modification_scope_depth_ = 0;
};
class V8_EXPORT_PRIVATE WasmCodeManager final {
......
......@@ -681,7 +681,9 @@ wasm::WasmInterpreter* WasmDebugInfo::SetupForTesting(
auto interp_handle =
Managed<wasm::InterpreterHandle>::Allocate(isolate, isolate, *debug_info);
debug_info->set(kInterpreterHandleIndex, *interp_handle);
return interp_handle->get()->interpreter();
auto ret = interp_handle->get()->interpreter();
ret->SetCallIndirectTestMode();
return ret;
}
bool WasmDebugInfo::IsWasmDebugInfo(Object* object) {
......
......@@ -968,6 +968,9 @@ class CodeMap {
// This handle is set and reset by the SetInstanceObject() /
// ClearInstanceObject() method, which is used by the HeapObjectsScope.
Handle<WasmInstanceObject> instance_;
// TODO(wasm): Remove this testing wart. It is needed because interpreter
// entry stubs are not generated in testing the interpreter in cctests.
bool call_indirect_through_module_ = false;
public:
CodeMap(Isolate* isolate, const WasmModule* module,
......@@ -986,6 +989,12 @@ class CodeMap {
}
}
bool call_indirect_through_module() { return call_indirect_through_module_; }
void set_call_indirect_through_module(bool val) {
call_indirect_through_module_ = val;
}
void SetInstanceObject(Handle<WasmInstanceObject> instance) {
DCHECK(instance_.is_null());
instance_ = instance;
......@@ -2553,7 +2562,8 @@ class ThreadImpl {
}
if (code->kind() == wasm::WasmCode::kWasmToJsWrapper) {
return CallExternalJSFunction(isolate, WasmCodeWrapper(code), signature);
} else if (code->kind() == wasm::WasmCode::kWasmToWasmWrapper) {
} else if (code->kind() == wasm::WasmCode::kWasmToWasmWrapper ||
code->kind() == wasm::WasmCode::kInterpreterStub) {
return CallExternalWasmFunction(isolate, WasmCodeWrapper(code),
signature);
}
......@@ -2582,23 +2592,8 @@ class ThreadImpl {
ExternalCallResult CallIndirectFunction(uint32_t table_index,
uint32_t entry_index,
uint32_t sig_index) {
bool no_func_tables = !codemap()->has_instance();
if (FLAG_wasm_jit_to_native) {
no_func_tables = no_func_tables || codemap()
->instance()
->compiled_module()
->GetNativeModule()
->function_tables()
.empty();
} else {
no_func_tables =
no_func_tables ||
!codemap()->instance()->compiled_module()->has_function_tables();
}
if (no_func_tables) {
// No instance. Rely on the information stored in the WasmModule.
// TODO(wasm): This is only needed for testing. Refactor testing to use
// the same paths as production.
if (codemap()->call_indirect_through_module()) {
// Rely on the information stored in the WasmModule.
InterpreterCode* code =
codemap()->GetIndirectCode(table_index, entry_index);
if (!code) return {ExternalCallResult::INVALID_FUNC};
......@@ -2632,7 +2627,7 @@ class ThreadImpl {
DCHECK_EQ(canonical_sig_index,
module()->signature_map.Find(module()->signatures[sig_index]));
if (!FLAG_wasm_jit_to_native) {
if (!WASM_CONTEXT_TABLES) {
// Check signature.
FixedArray* fun_tables = compiled_module->function_tables();
if (table_index >= static_cast<uint32_t>(fun_tables->length())) {
......@@ -2659,33 +2654,23 @@ class ThreadImpl {
target_gc = Code::cast(fun_table->get(
compiler::FunctionTableCodeOffset(static_cast<int>(entry_index))));
} else {
// Check signature.
std::vector<GlobalHandleAddress>& fun_tables =
compiled_module->GetNativeModule()->function_tables();
if (table_index >= fun_tables.size()) {
// The function table is stored in the wasm context.
// TODO(wasm): the wasm interpreter currently supports only one table.
CHECK_EQ(0, table_index);
// Bounds check against table size.
if (entry_index >= wasm_context_->table_size) {
return {ExternalCallResult::INVALID_FUNC};
}
// Reconstitute the global handle to the function table, from the
// address stored in the respective table of tables.
FixedArray* fun_table =
*reinterpret_cast<FixedArray**>(fun_tables[table_index]);
// Function tables store <smi, code> pairs.
int num_funcs_in_table =
fun_table->length() / compiler::kFunctionTableEntrySize;
if (entry_index >= static_cast<uint32_t>(num_funcs_in_table)) {
return {ExternalCallResult::INVALID_FUNC};
}
int found_sig = Smi::ToInt(fun_table->get(
compiler::FunctionTableSigOffset(static_cast<int>(entry_index))));
if (static_cast<uint32_t>(found_sig) != canonical_sig_index) {
// Signature check.
int32_t entry_sig = wasm_context_->table[entry_index].sig_id;
if (entry_sig != static_cast<int32_t>(canonical_sig_index)) {
return {ExternalCallResult::SIGNATURE_MISMATCH};
}
// Load the target address (first instruction of code).
Address first_instr = wasm_context_->table[entry_index].target;
// TODO(titzer): load the wasm context instead of relying on the
// target code being specialized to the target instance.
// Get code object.
Address first_instr =
Foreign::cast(fun_table->get(compiler::FunctionTableCodeOffset(
static_cast<int>(entry_index))))
->foreign_address();
target =
isolate->wasm_engine()->code_manager()->GetCodeFromStartAddress(
first_instr);
......@@ -2978,6 +2963,10 @@ void WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
internals_->codemap_.SetFunctionCode(function, start, end);
}
void WasmInterpreter::SetCallIndirectTestMode() {
internals_->codemap_.set_call_indirect_through_module(true);
}
ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
Zone* zone, const WasmModule* module, const byte* start, const byte* end) {
// Create some dummy structures, to avoid special-casing the implementation
......
......@@ -215,6 +215,7 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
// Manually adds code to the interpreter for the given function.
void SetFunctionCodeForTesting(const WasmFunction* function,
const byte* start, const byte* end);
void SetCallIndirectTestMode();
// Computes the control transfers for the given bytecode. Used internally in
// the interpreter, but exposed for testing.
......
......@@ -156,7 +156,7 @@ WasmFunction* GetWasmFunctionForExport(Isolate* isolate,
Handle<Object> GetOrCreateIndirectCallWrapper(
Isolate* isolate, Handle<WasmInstanceObject> owning_instance,
WasmCodeWrapper wasm_code, uint32_t index, FunctionSig* sig) {
WasmCodeWrapper wasm_code, uint32_t func_index, FunctionSig* sig) {
Address new_context_address =
reinterpret_cast<Address>(owning_instance->wasm_context()->get());
if (!wasm_code.IsCodeObject()) {
......@@ -172,6 +172,8 @@ Handle<Object> GetOrCreateIndirectCallWrapper(
wasm::WasmCode* exported_wrapper =
native_module->GetExportedWrapper(wasm_code.GetWasmCode()->index());
if (exported_wrapper == nullptr) {
wasm::NativeModuleModificationScope native_modification_scope(
native_module);
Handle<Code> new_wrapper = compiler::CompileWasmToWasmWrapper(
isolate, wasm_code, sig, new_context_address);
exported_wrapper = native_module->AddExportedWrapper(
......@@ -180,10 +182,11 @@ Handle<Object> GetOrCreateIndirectCallWrapper(
Address target = exported_wrapper->instructions().start();
return isolate->factory()->NewForeign(target, TENURED);
}
CodeSpaceMemoryModificationScope gc_modification_scope(isolate->heap());
Handle<Code> code = compiler::CompileWasmToWasmWrapper(
isolate, wasm_code, sig, new_context_address);
AttachWasmFunctionInfo(isolate, code, owning_instance,
static_cast<int>(index));
static_cast<int>(func_index));
return code;
}
......
......@@ -275,7 +275,7 @@ WasmFunction* GetWasmFunctionForExport(Isolate* isolate, Handle<Object> target);
Handle<Object> GetOrCreateIndirectCallWrapper(
Isolate* isolate, Handle<WasmInstanceObject> owning_instance,
WasmCodeWrapper wasm_code, uint32_t index, FunctionSig* sig);
WasmCodeWrapper wasm_code, uint32_t func_index, FunctionSig* sig);
void UnpackAndRegisterProtectedInstructionsGC(Isolate* isolate,
Handle<FixedArray> code_table);
......
This diff is collapsed.
......@@ -38,6 +38,8 @@ class WasmCompiledModule;
class WasmDebugInfo;
class WasmInstanceObject;
#define WASM_CONTEXT_TABLES FLAG_wasm_jit_to_native
#define DECL_OOL_QUERY(type) static bool Is##type(Object* object);
#define DECL_OOL_CAST(type) static type* cast(Object* object);
......@@ -55,6 +57,15 @@ class WasmInstanceObject;
static const int k##name##Offset = \
kSize + (k##name##Index - kFieldCount) * kPointerSize;
// An entry in an indirect dispatch table.
struct IndirectFunctionTableEntry {
int32_t sig_id = 0;
WasmContext* context = nullptr;
Address target = nullptr;
MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(IndirectFunctionTableEntry)
};
// Wasm context used to store the mem_size and mem_start address of the linear
// memory. These variables can be accessed at C++ level at graph build time
// (e.g., initialized during instance building / changed at runtime by
......@@ -67,14 +78,27 @@ struct WasmContext {
uint32_t mem_size = 0; // TODO(titzer): uintptr_t?
uint32_t mem_mask = 0; // TODO(titzer): uintptr_t?
byte* globals_start = nullptr;
// TODO(wasm): pad these entries to a power of two.
IndirectFunctionTableEntry* table = nullptr;
uint32_t table_size = 0;
inline void SetRawMemory(void* mem_start, size_t mem_size) {
void SetRawMemory(void* mem_start, size_t mem_size) {
DCHECK_LE(mem_size, wasm::kV8MaxWasmMemoryPages * wasm::kWasmPageSize);
this->mem_start = static_cast<byte*>(mem_start);
this->mem_size = static_cast<uint32_t>(mem_size);
this->mem_mask = base::bits::RoundUpToPowerOfTwo32(this->mem_size) - 1;
DCHECK_LE(mem_size, this->mem_mask + 1);
}
~WasmContext() {
if (table) free(table);
mem_start = nullptr;
mem_size = 0;
mem_mask = 0;
globals_start = nullptr;
table = nullptr;
table_size = 0;
}
};
// Representation of a WebAssembly.Module JavaScript-level object.
......@@ -137,9 +161,13 @@ class WasmTableObject : public JSObject {
static void Set(Isolate* isolate, Handle<WasmTableObject> table,
int32_t index, Handle<JSFunction> function);
static void UpdateDispatchTables(Handle<WasmTableObject> table, int index,
wasm::FunctionSig* sig,
Handle<Object> code_or_foreign);
static void UpdateDispatchTables(Isolate* isolate,
Handle<WasmTableObject> table,
int table_index, wasm::FunctionSig* sig,
Handle<WasmInstanceObject> from_instance,
WasmCodeWrapper wasm_code, int func_index);
static void ClearDispatchTables(Handle<WasmTableObject> table, int index);
};
// Representation of a WebAssembly.Memory JavaScript-level object.
......@@ -481,9 +509,7 @@ class WasmCompiledModule : public FixedArray {
MACRO(SMALL_CONST_NUMBER, uint32_t, num_imported_functions) \
MACRO(CONST_OBJECT, FixedArray, code_table) \
MACRO(OBJECT, FixedArray, function_tables) \
MACRO(OBJECT, FixedArray, signature_tables) \
MACRO(CONST_OBJECT, FixedArray, empty_function_tables) \
MACRO(CONST_OBJECT, FixedArray, empty_signature_tables)
MACRO(CONST_OBJECT, FixedArray, empty_function_tables)
// TODO(mtrofin): this is unnecessary when we stop needing
// FLAG_wasm_jit_to_native, because we have instance_id on NativeModule.
......
......@@ -209,12 +209,7 @@ NativeModuleSerializer::NativeModuleSerializer(Isolate* isolate,
size_t NativeModuleSerializer::MeasureHeader() const {
return sizeof(uint32_t) + // total wasm fct count
sizeof(
uint32_t) + // imported fcts - i.e. index of first wasm function
sizeof(uint32_t) + // table count
native_module_->specialization_data_.function_tables.size()
// function table, containing pointers
* sizeof(GlobalHandleAddress);
sizeof(uint32_t); // imported fcts - i.e. index of first wasm function
}
void NativeModuleSerializer::BufferHeader() {
......@@ -224,13 +219,6 @@ void NativeModuleSerializer::BufferHeader() {
Writer writer(remaining_);
writer.Write(native_module_->FunctionCount());
writer.Write(native_module_->num_imported_functions());
writer.Write(static_cast<uint32_t>(
native_module_->specialization_data_.function_tables.size()));
for (size_t i = 0,
e = native_module_->specialization_data_.function_tables.size();
i < e; ++i) {
writer.Write(native_module_->specialization_data_.function_tables[i]);
}
}
size_t NativeModuleSerializer::GetCodeHeaderSize() {
......@@ -554,16 +542,6 @@ bool NativeModuleDeserializer::ReadHeader() {
bool ok = functions == native_module_->FunctionCount() &&
imports == native_module_->num_imported_functions();
if (!ok) return false;
size_t table_count = reader.Read<uint32_t>();
std::vector<GlobalHandleAddress> funcs(table_count);
for (size_t i = 0; i < table_count; ++i) {
funcs[i] = reader.Read<GlobalHandleAddress>();
}
native_module_->function_tables() = funcs;
// resize, so that from here on the native module can be
// asked about num_function_tables().
native_module_->empty_function_tables().resize(table_count);
unread_ = unread_ + (start_size - reader.current_buffer().size());
return true;
......
......@@ -159,11 +159,22 @@ void TestingModuleBuilder::AddIndirectFunctionTable(
table_size * compiler::kFunctionTableEntrySize);
function_tables_.push_back(
isolate_->global_handles()->Create(func_table).address());
if (WASM_CONTEXT_TABLES) {
WasmContext* wasm_context = instance_object()->wasm_context()->get();
wasm_context->table = reinterpret_cast<IndirectFunctionTableEntry*>(
calloc(table_size, sizeof(IndirectFunctionTableEntry)));
wasm_context->table_size = table_size;
for (uint32_t i = 0; i < table_size; i++) {
wasm_context->table[i].sig_id = -1;
}
}
}
void TestingModuleBuilder::PopulateIndirectFunctionTable() {
if (interpret()) return;
// Initialize the fixed arrays in instance->function_tables.
WasmContext* wasm_context = instance_object()->wasm_context()->get();
for (uint32_t i = 0; i < function_tables_.size(); i++) {
WasmIndirectFunctionTable& table = test_module_.function_tables[i];
Handle<FixedArray> function_table(
......@@ -171,17 +182,16 @@ void TestingModuleBuilder::PopulateIndirectFunctionTable() {
int table_size = static_cast<int>(table.values.size());
for (int j = 0; j < table_size; j++) {
WasmFunction& function = test_module_.functions[table.values[j]];
function_table->set(
compiler::FunctionTableSigOffset(j),
Smi::FromInt(test_module_.signature_map.Find(function.sig)));
if (FLAG_wasm_jit_to_native) {
Handle<Foreign> foreign_holder = isolate_->factory()->NewForeign(
native_module_->GetCode(function.func_index)
->instructions()
.start(),
TENURED);
function_table->set(compiler::FunctionTableCodeOffset(j),
*foreign_holder);
int sig_id = test_module_.signature_map.Find(function.sig);
function_table->set(compiler::FunctionTableSigOffset(j),
Smi::FromInt(sig_id));
if (WASM_CONTEXT_TABLES) {
auto start = native_module_->GetCode(function.func_index)
->instructions()
.start();
wasm_context->table[j].context = wasm_context;
wasm_context->table[j].sig_id = sig_id;
wasm_context->table[j].target = start;
} else {
function_table->set(compiler::FunctionTableCodeOffset(j),
*function_code_[function.func_index]);
......
......@@ -33,7 +33,7 @@ function AddFunctions(builder) {
function js_div(a, b) { return (a / b) | 0; }
(function ExportedTableTest() {
print("ExportedTableTest...");
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
......@@ -102,9 +102,9 @@ function js_div(a, b) { return (a / b) | 0; }
})();
(function ImportedTableTest() {
(function ImportedTableTest1() {
let kTableSize = 10;
print("ImportedTableTest...");
print(arguments.callee.name);
var builder = new WasmModuleBuilder();
let d = builder.addImport("q", "js_div", kSig_i_ii);
......@@ -172,9 +172,9 @@ function js_div(a, b) { return (a / b) | 0; }
}
})();
(function ImportedTableTest() {
(function ImportedTableTest2() {
let kTableSize = 10;
print("ManualTableTest...");
print(arguments.callee.name);
var builder = new WasmModuleBuilder();
......@@ -240,7 +240,7 @@ function js_div(a, b) { return (a / b) | 0; }
(function CumulativeTest() {
print("CumulativeTest...");
print(arguments.callee.name);
let kTableSize = 10;
let table = new WebAssembly.Table(
......@@ -251,7 +251,7 @@ function js_div(a, b) { return (a / b) | 0; }
builder.addImportedTable("x", "table", kTableSize, kTableSize);
let g = builder.addImportedGlobal("x", "base", kWasmI32);
let sig_index = builder.addType(kSig_i_v);
builder.addFunction("g", sig_index)
let f = builder.addFunction("f", sig_index)
.addBody([
kExprGetGlobal, g
]);
......@@ -260,7 +260,7 @@ function js_div(a, b) { return (a / b) | 0; }
kExprGetLocal, 0,
kExprCallIndirect, sig_index, kTableZero]) // --
.exportAs("main");
builder.addFunctionTableInit(g, true, [g]);
builder.addFunctionTableInit(g, true, [f.index]);
let module = new WebAssembly.Module(builder.toBuffer());
......@@ -283,7 +283,7 @@ function js_div(a, b) { return (a / b) | 0; }
})();
(function TwoWayTest() {
print("TwoWayTest...");
print(arguments.callee.name);
let kTableSize = 3;
// Module {m1} defines the table and exports it.
......@@ -342,7 +342,7 @@ function js_div(a, b) { return (a / b) | 0; }
})();
(function MismatchedTableSize() {
print("MismatchedTableSize...");
print(arguments.callee.name);
let kTableSize = 5;
for (var expsize = 1; expsize < 4; expsize++) {
......@@ -374,7 +374,7 @@ function js_div(a, b) { return (a / b) | 0; }
})();
(function TableGrowBoundsCheck() {
print("TableGrowBoundsCheck");
print(arguments.callee.name);
var kMaxSize = 30, kInitSize = 5;
let table = new WebAssembly.Table({element: "anyfunc",
initial: kInitSize, maximum: kMaxSize});
......@@ -398,7 +398,7 @@ function js_div(a, b) { return (a / b) | 0; }
})();
(function CumulativeGrowTest() {
print("CumulativeGrowTest...");
print(arguments.callee.name);
let table = new WebAssembly.Table({
element: "anyfunc", initial: 10, maximum: 30});
var builder = new WasmModuleBuilder();
......@@ -460,7 +460,7 @@ function js_div(a, b) { return (a / b) | 0; }
(function TestImportTooLarge() {
print("TestImportTooLarge...");
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addImportedTable("t", "t", 1, 2);
......@@ -478,7 +478,7 @@ function js_div(a, b) { return (a / b) | 0; }
})();
(function TableImportLargerThanCompiled() {
print("TableImportLargerThanCompiled...");
print(arguments.callee.name);
var kMaxSize = 30, kInitSize = 5;
var builder = new WasmModuleBuilder();
builder.addImportedTable("x", "table", 1, 35);
......@@ -492,7 +492,7 @@ function js_div(a, b) { return (a / b) | 0; }
})();
(function ModulesShareTableAndGrow() {
print("ModulesShareTableAndGrow...");
print(arguments.callee.name);
let module1 = (() => {
let builder = new WasmModuleBuilder();
builder.addImportedTable("x", "table", 1, 35);
......@@ -525,7 +525,7 @@ function js_div(a, b) { return (a / b) | 0; }
(function MultipleElementSegments() {
let kTableSize = 10;
print("MultipleElementSegments...");
print(arguments.callee.name);
let mul = (a, b) => a * b;
let add = (a, b) => a + b;
......@@ -603,7 +603,8 @@ function js_div(a, b) { return (a / b) | 0; }
})();
(function IndirectCallIntoOtherInstance() {
print("IndirectCallIntoOtherInstance...");
print(arguments.callee.name);
var mem_1 = new WebAssembly.Memory({initial: 1});
var mem_2 = new WebAssembly.Memory({initial: 1});
var view_1 = new Int32Array(mem_1.buffer);
......@@ -644,7 +645,7 @@ function js_div(a, b) { return (a / b) | 0; }
(function ImportedFreestandingTable() {
print("ImportedFreestandingTable...");
print(arguments.callee.name);
function forceGc() {
gc();
......@@ -709,7 +710,8 @@ function js_div(a, b) { return (a / b) | 0; }
// Remove this test when v8:7232 is addressed comprehensively.
(function TablesAreImmutableInWasmCallstacks() {
print('TablesAreImmutableInWasmCallstacks...');
print(arguments.callee.name);
let table = new WebAssembly.Table({initial:2, element:'anyfunc'});
let builder = new WasmModuleBuilder();
......@@ -743,3 +745,93 @@ function js_div(a, b) { return (a / b) | 0; }
table.set(0, null);
assertEquals(null, table.get(0));
})();
(function ImportedWasmFunctionPutIntoTable() {
print(arguments.callee.name);
let wasm_mul = (() => {
let builder = new WasmModuleBuilder();
builder.addFunction("mul", kSig_i_ii)
.addBody(
[kExprGetLocal, 0,
kExprGetLocal, 1,
kExprI32Mul])
.exportFunc();
return builder.instantiate().exports.mul;
})();
let builder = new WasmModuleBuilder();
let j = builder.addImport("q", "js_div", kSig_i_ii);
let w = builder.addImport("q", "wasm_mul", kSig_i_ii);
builder.addFunction("main", kSig_i_ii)
.addBody([
kExprI32Const, 33, // --
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
kExprCallIndirect, 0, kTableZero]) // --
.exportAs("main");
builder.setFunctionTableBounds(10, 10);
let g = builder.addImportedGlobal("q", "base", kWasmI32);
builder.addFunctionTableInit(g, true, [j, w]);
let module = new WebAssembly.Module(builder.toBuffer());
for (var i = 0; i < 5; i++) {
let instance = new WebAssembly.Instance(module, {q: {base: i, js_div: js_div, wasm_mul: wasm_mul}});
let j = i + 1;
assertThrows(() => {instance.exports.main(j, i-1)});
assertEquals((33/j)|0, instance.exports.main(j, i+0));
assertEquals((33*j)|0, instance.exports.main(j, i+1));
assertThrows(() => {instance.exports.main(j, i+2)});
}
})();
(function ImportedWasmFunctionPutIntoImportedTable() {
print(arguments.callee.name);
let kTableSize = 10;
let wasm_mul = (() => {
let builder = new WasmModuleBuilder();
builder.addFunction("mul", kSig_i_ii)
.addBody(
[kExprGetLocal, 0,
kExprGetLocal, 1,
kExprI32Mul])
.exportFunc();
return builder.instantiate().exports.mul;
})();
let table = new WebAssembly.Table({element: "anyfunc",
initial: kTableSize,
maximum: kTableSize});
let builder = new WasmModuleBuilder();
let j = builder.addImport("q", "js_div", kSig_i_ii);
let w = builder.addImport("q", "wasm_mul", kSig_i_ii);
builder.addImportedTable("q", "table", kTableSize, kTableSize);
builder.addFunction("main", kSig_i_ii)
.addBody([
kExprI32Const, 44, // --
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
kExprCallIndirect, 0, kTableZero]) // --
.exportAs("main");
let g = builder.addImportedGlobal("q", "base", kWasmI32);
builder.addFunctionTableInit(g, true, [j, w]);
let module = new WebAssembly.Module(builder.toBuffer());
for (var i = 0; i < 5; i++) {
let instance = new WebAssembly.Instance(module, {q: {base: i, js_div: js_div, wasm_mul: wasm_mul, table: table}});
let j = i + 1;
assertEquals((44/j)|0, instance.exports.main(j, i+0));
assertEquals((44*j)|0, instance.exports.main(j, i+1));
assertThrows(() => {instance.exports.main(j, i+2)});
}
})();
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment