Commit c71fd20c authored by Mircea Trofin's avatar Mircea Trofin Committed by Commit Bot

[wasm] Data structures for JIT-ing wasm to native memory.

This CL introduces the structures for JIT-ing wasm on the native heap.
They are described in detail at go/wasm-on-native-heap-stage-1

Briefly:
- WasmCodeManager manages memory for modules and offers an interior
pointer lookup (i.e. PC -> WasmCode)
- WasmCode represents code, including reloc info. It holds wasm
specific data, like function index, and runtime information, like trap
handler info.
- NativeModule manages memory for one module.

Tests cover the allocation and lookup aspects, following that current
regression tests cover the JITed code. A separate CL will enable JITing
using the new data structures.

Bug: v8:6876
Change-Id: I1731238409001fe97c97eafb7a12fd3922da6a42
Reviewed-on: https://chromium-review.googlesource.com/767581
Commit-Queue: Mircea Trofin <mtrofin@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49501}
parent 8c68bc83
......@@ -312,6 +312,18 @@ void RelocInfo::set_global_handle(Isolate* isolate, Address address,
set_embedded_address(isolate, address, icache_flush_mode);
}
Address RelocInfo::wasm_call_address() const {
DCHECK_EQ(rmode_, WASM_CALL);
return Assembler::target_address_at(pc_, constant_pool_);
}
void RelocInfo::set_wasm_call_address(Isolate* isolate, Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, WASM_CALL);
Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
icache_flush_mode);
}
Address RelocInfo::global_handle() const {
DCHECK_EQ(rmode_, WASM_GLOBAL_HANDLE);
return embedded_address();
......@@ -337,7 +349,7 @@ void RelocInfo::update_wasm_function_table_size_reference(
void RelocInfo::set_target_address(Isolate* isolate, Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
Assembler::set_target_address_at(isolate, pc_, host_, target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
......@@ -644,6 +656,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "wasm function table size reference";
case WASM_GLOBAL_HANDLE:
return "global handle";
case WASM_CALL:
return "internal wasm call";
case NUMBER_OF_MODES:
case PC_JUMP:
UNREACHABLE();
......@@ -725,6 +739,7 @@ void RelocInfo::Verify(Isolate* isolate) {
case WASM_CONTEXT_REFERENCE:
case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
case WASM_GLOBAL_HANDLE:
case WASM_CALL:
case NONE32:
case NONE64:
break;
......
......@@ -366,6 +366,7 @@ class RelocInfo {
WASM_CONTEXT_REFERENCE,
WASM_FUNCTION_TABLE_SIZE_REFERENCE,
WASM_GLOBAL_HANDLE,
WASM_CALL,
RUNTIME_ENTRY,
COMMENT,
......@@ -421,6 +422,7 @@ class RelocInfo {
static inline bool IsRuntimeEntry(Mode mode) {
return mode == RUNTIME_ENTRY;
}
static inline bool IsWasmCall(Mode mode) { return mode == WASM_CALL; }
// Is the relocation mode affected by GC?
static inline bool IsGCRelocMode(Mode mode) {
return mode <= LAST_GCED_ENUM;
......@@ -498,6 +500,7 @@ class RelocInfo {
Address wasm_context_reference() const;
uint32_t wasm_function_table_size_reference() const;
Address global_handle() const;
Address wasm_call_address() const;
void set_wasm_context_reference(
Isolate* isolate, Address address,
......@@ -513,6 +516,9 @@ class RelocInfo {
void set_global_handle(
Isolate* isolate, Address address,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_wasm_call_address(
Isolate*, Address,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
......
......@@ -476,6 +476,11 @@ DEFINE_BOOL(wasm_disable_structured_cloning, false,
"disable wasm structured cloning")
DEFINE_INT(wasm_num_compilation_tasks, 10,
"number of parallel compilation tasks for wasm")
DEFINE_BOOL(wasm_trace_native_heap, false, "trace wasm native heap events")
DEFINE_BOOL(wasm_jit_to_native, false,
"JIT wasm code to native (not JS GC) memory")
DEFINE_BOOL(wasm_trace_serialization, false,
"trace serialization/deserialization")
DEFINE_BOOL(wasm_async_compilation, true,
"enable actual asynchronous compilation for WebAssembly.compile")
DEFINE_BOOL(wasm_stream_compilation, false,
......
......@@ -1456,8 +1456,8 @@
'wasm/streaming-decoder.h',
'wasm/wasm-api.cc',
'wasm/wasm-api.h',
'wasm/wasm-code-specialization.h',
'wasm/wasm-code-specialization.cc',
'wasm/wasm-code-specialization.h',
'wasm/wasm-debug.cc',
'wasm/wasm-external-refs.cc',
'wasm/wasm-external-refs.h',
......
......@@ -4,10 +4,68 @@
#include "src/wasm/wasm-heap.h"
#include "src/assembler-inl.h"
#include "src/base/atomic-utils.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/disassembler.h"
#include "src/globals.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-objects.h"
#define TRACE_HEAP(...) \
do { \
if (FLAG_wasm_trace_native_heap) PrintF(__VA_ARGS__); \
} while (false)
namespace v8 {
namespace internal {
namespace wasm {
namespace {
size_t native_module_ids = 0;
#if V8_TARGET_ARCH_X64
#define __ masm->
constexpr bool kModuleCanAllocateMoreMemory = false;
void GenerateJumpTrampoline(MacroAssembler* masm, Address target) {
__ movq(kScratchRegister, reinterpret_cast<uint64_t>(target));
__ jmp(kScratchRegister);
}
#undef __
#else
const bool kModuleCanAllocateMoreMemory = true;
#endif
void PatchTrampolineAndStubCalls(
const WasmCode* original_code, const WasmCode* new_code,
const std::unordered_map<Address, Address, AddressHasher>& reverse_lookup) {
RelocIterator orig_it(
original_code->instructions(), original_code->reloc_info(),
original_code->constant_pool(), RelocInfo::kCodeTargetMask);
for (RelocIterator it(new_code->instructions(), new_code->reloc_info(),
new_code->constant_pool(), RelocInfo::kCodeTargetMask);
!it.done(); it.next(), orig_it.next()) {
Address old_target = orig_it.rinfo()->target_address();
#if V8_TARGET_ARCH_X64
auto found = reverse_lookup.find(old_target);
DCHECK(found != reverse_lookup.end());
Address new_target = found->second;
#else
Address new_target = old_target;
#endif
it.rinfo()->set_target_address(nullptr, new_target, SKIP_WRITE_BARRIER,
SKIP_ICACHE_FLUSH);
}
}
} // namespace
DisjointAllocationPool::DisjointAllocationPool(Address start, Address end) {
ranges_.push_back({start, end});
}
......@@ -96,6 +154,732 @@ DisjointAllocationPool DisjointAllocationPool::Extract(size_t size,
return ret;
}
Address WasmCode::constant_pool() const {
if (FLAG_enable_embedded_constant_pool) {
if (constant_pool_offset_ < instructions().size()) {
return instructions().start() + constant_pool_offset_;
}
}
return nullptr;
}
size_t WasmCode::trap_handler_index() const {
CHECK(HasTrapHandlerIndex());
return static_cast<size_t>(trap_handler_index_);
}
void WasmCode::set_trap_handler_index(size_t value) {
trap_handler_index_ = value;
}
bool WasmCode::HasTrapHandlerIndex() const { return trap_handler_index_ >= 0; }
void WasmCode::ResetTrapHandlerIndex() { trap_handler_index_ = -1; }
// TODO(mtrofin): rework the dependency on isolate and code in
// Disassembler::Decode.
void WasmCode::Disassemble(Isolate* isolate, const char* name,
std::ostream& os) const {
os << name << std::endl;
Disassembler::Decode(isolate, &os, instructions().start(),
instructions().end(), nullptr);
}
void WasmCode::Print(Isolate* isolate) const {
OFStream os(stdout);
Disassemble(isolate, "", os);
}
WasmCode::~WasmCode() {
// Depending on finalizer order, the WasmCompiledModule finalizer may be
// called first, case in which we release here. If the InstanceFinalizer is
// called first, the handlers will be cleared in Reset, as-if the NativeModule
// may be later used again (which would be the case if the WasmCompiledModule
// were still held by a WasmModuleObject)
if (HasTrapHandlerIndex()) {
CHECK_LT(trap_handler_index(),
static_cast<size_t>(std::numeric_limits<int>::max()));
trap_handler::ReleaseHandlerData(static_cast<int>(trap_handler_index()));
}
}
NativeModule::NativeModule(uint32_t num_functions, uint32_t num_imports,
bool can_request_more, VirtualMemory* mem,
WasmCodeManager* code_manager)
: instance_id(native_module_ids++),
code_table_(num_functions),
num_imported_functions_(num_imports),
free_memory_(reinterpret_cast<Address>(mem->address()),
reinterpret_cast<Address>(mem->end())),
wasm_code_manager_(code_manager),
can_request_more_memory_(can_request_more) {
VirtualMemory my_mem;
owned_memory_.push_back(my_mem);
owned_memory_.back().TakeControl(mem);
owned_code_.reserve(num_functions);
}
void NativeModule::ResizeCodeTableForTest(size_t last_index) {
size_t new_size = last_index + 1;
if (new_size > FunctionCount()) {
Isolate* isolate = compiled_module()->GetIsolate();
code_table_.resize(new_size);
int grow_by = static_cast<int>(new_size) -
compiled_module()->source_positions()->length();
compiled_module()->set_source_positions(
isolate->factory()->CopyFixedArrayAndGrow(
compiled_module()->source_positions(), grow_by, TENURED));
compiled_module()->set_handler_table(
isolate->factory()->CopyFixedArrayAndGrow(
compiled_module()->handler_table(), grow_by, TENURED));
}
}
WasmCode* NativeModule::GetCode(uint32_t index) const {
return code_table_[index];
}
uint32_t NativeModule::FunctionCount() const {
DCHECK_LE(code_table_.size(), std::numeric_limits<uint32_t>::max());
return static_cast<uint32_t>(code_table_.size());
}
WasmCode* NativeModule::AddOwnedCode(
Vector<const byte> orig_instructions,
std::unique_ptr<const byte[]>&& reloc_info, size_t reloc_size,
Maybe<uint32_t> index, WasmCode::Kind kind, size_t constant_pool_offset,
uint32_t stack_slots, size_t safepoint_table_offset,
std::shared_ptr<ProtectedInstructions> protected_instructions,
bool is_liftoff) {
// both allocation and insertion in owned_code_ happen in the same critical
// section, thus ensuring owned_code_'s elements are rarely if ever moved.
base::LockGuard<base::Mutex> lock(&allocation_mutex_);
Address executable_buffer = AllocateForCode(orig_instructions.size());
if (executable_buffer == nullptr) return nullptr;
memcpy(executable_buffer, orig_instructions.start(),
orig_instructions.size());
std::unique_ptr<WasmCode> code(new WasmCode(
{executable_buffer, orig_instructions.size()}, std::move(reloc_info),
reloc_size, this, index, kind, constant_pool_offset, stack_slots,
safepoint_table_offset, protected_instructions, is_liftoff));
WasmCode* ret = code.get();
// TODO(mtrofin): We allocate in increasing address order, and
// even if we end up with segmented memory, we may end up only with a few
// large moves - if, for example, a new segment is below the current ones.
auto insert_before = std::upper_bound(owned_code_.begin(), owned_code_.end(),
code, owned_code_comparer_);
owned_code_.insert(insert_before, std::move(code));
return ret;
}
WasmCode* NativeModule::AddCodeCopy(Handle<Code> code, WasmCode::Kind kind,
uint32_t index) {
WasmCode* ret = AddAnonymousCode(code, kind);
SetCodeTable(index, ret);
ret->index_ = Just(index);
compiled_module()->ptr_to_source_positions()->set(
static_cast<int>(index), code->source_position_table());
compiled_module()->ptr_to_handler_table()->set(static_cast<int>(index),
code->handler_table());
return ret;
}
WasmCode* NativeModule::AddInterpreterWrapper(Handle<Code> code,
uint32_t index) {
WasmCode* ret = AddAnonymousCode(code, WasmCode::InterpreterStub);
ret->index_ = Just(index);
return ret;
}
WasmCode* NativeModule::SetLazyBuiltin(Handle<Code> code) {
DCHECK_NULL(lazy_builtin_);
lazy_builtin_ = AddAnonymousCode(code, WasmCode::LazyStub);
for (uint32_t i = num_imported_functions(), e = FunctionCount(); i < e; ++i) {
SetCodeTable(i, lazy_builtin_);
}
return lazy_builtin_;
}
WasmCompiledModule* NativeModule::compiled_module() const {
return *compiled_module_;
}
void NativeModule::SetCompiledModule(
Handle<WasmCompiledModule> compiled_module) {
DCHECK(compiled_module_.is_null());
compiled_module_ = compiled_module;
}
WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
WasmCode::Kind kind) {
std::unique_ptr<byte[]> reloc_info;
if (code->relocation_size() > 0) {
reloc_info.reset(new byte[code->relocation_size()]);
memcpy(reloc_info.get(), code->relocation_start(), code->relocation_size());
}
WasmCode* ret = AddOwnedCode(
{code->instruction_start(),
static_cast<size_t>(code->instruction_size())},
std::move(reloc_info), static_cast<size_t>(code->relocation_size()),
Nothing<uint32_t>(), kind, code->constant_pool_offset(),
(code->is_turbofanned() ? code->stack_slots() : 0),
(code->is_turbofanned() ? code->safepoint_table_offset() : 0), {});
if (ret == nullptr) return nullptr;
intptr_t delta = ret->instructions().start() - code->instruction_start();
int mask = RelocInfo::kApplyMask | RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
RelocIterator orig_it(*code, mask);
for (RelocIterator it(ret->instructions(), ret->reloc_info(),
ret->constant_pool(), mask);
!it.done(); it.next(), orig_it.next()) {
if (RelocInfo::IsCodeTarget(it.rinfo()->rmode())) {
Code* call_target =
Code::GetCodeFromTargetAddress(orig_it.rinfo()->target_address());
it.rinfo()->set_target_address(nullptr,
GetLocalAddressFor(handle(call_target)),
SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else {
if (RelocInfo::IsEmbeddedObject(it.rinfo()->rmode())) {
DCHECK(Heap::IsImmovable(it.rinfo()->target_object()));
} else {
it.rinfo()->apply(delta);
}
}
}
return ret;
}
WasmCode* NativeModule::AddCode(
const CodeDesc& desc, uint32_t frame_slots, uint32_t index,
size_t safepoint_table_offset,
std::shared_ptr<ProtectedInstructions> protected_instructions,
bool is_liftoff) {
std::unique_ptr<byte[]> reloc_info;
if (desc.reloc_size) {
reloc_info.reset(new byte[desc.reloc_size]);
memcpy(reloc_info.get(), desc.buffer + desc.buffer_size - desc.reloc_size,
desc.reloc_size);
}
TurboAssembler* origin = reinterpret_cast<TurboAssembler*>(desc.origin);
WasmCode* ret = AddOwnedCode(
{desc.buffer, static_cast<size_t>(desc.instr_size)},
std::move(reloc_info), static_cast<size_t>(desc.reloc_size), Just(index),
WasmCode::Function, desc.instr_size - desc.constant_pool_size,
frame_slots, safepoint_table_offset, protected_instructions, is_liftoff);
if (ret == nullptr) return nullptr;
SetCodeTable(index, ret);
// TODO(mtrofin): this is a copy and paste from Code::CopyFrom.
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
RelocInfo::kApplyMask;
// Needed to find target_object and runtime_entry on X64
AllowDeferredHandleDereference embedding_raw_address;
for (RelocIterator it(ret->instructions(), ret->reloc_info(),
ret->constant_pool(), mode_mask);
!it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
Handle<HeapObject> p = it.rinfo()->target_object_handle(origin);
DCHECK_EQ(*p, p->GetIsolate()->heap()->undefined_value());
it.rinfo()->set_target_object(*p, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsCodeTarget(mode)) {
// rewrite code handles to direct pointers to the first instruction in the
// code object
Handle<Object> p = it.rinfo()->target_object_handle(origin);
Code* code = Code::cast(*p);
it.rinfo()->set_target_address(nullptr, GetLocalAddressFor(handle(code)),
SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
Address p = it.rinfo()->target_runtime_entry(origin);
it.rinfo()->set_target_runtime_entry(
origin->isolate(), p, SKIP_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else {
intptr_t delta = ret->instructions().start() - desc.buffer;
it.rinfo()->apply(delta);
}
}
return ret;
}
#if V8_TARGET_ARCH_X64
Address NativeModule::CreateTrampolineTo(Handle<Code> code) {
MacroAssembler masm(code->GetIsolate(), nullptr, 0, CodeObjectRequired::kNo);
Address dest = code->instruction_start();
GenerateJumpTrampoline(&masm, dest);
CodeDesc code_desc;
masm.GetCode(nullptr, &code_desc);
WasmCode* wasm_code = AddOwnedCode(
{code_desc.buffer, static_cast<size_t>(code_desc.instr_size)}, nullptr, 0,
Nothing<uint32_t>(), WasmCode::Trampoline, 0, 0, 0, {});
if (wasm_code == nullptr) return nullptr;
Address ret = wasm_code->instructions().start();
trampolines_.emplace(std::make_pair(dest, ret));
return ret;
}
#else
Address NativeModule::CreateTrampolineTo(Handle<Code> code) {
Address ret = code->instruction_start();
trampolines_.insert(std::make_pair(ret, ret));
return ret;
}
#endif
Address NativeModule::GetLocalAddressFor(Handle<Code> code) {
if (!Heap::IsImmovable(*code)) {
DCHECK(code->kind() == Code::STUB &&
CodeStub::MajorKeyFromKey(code->stub_key()) == CodeStub::DoubleToI);
uint32_t key = code->stub_key();
auto copy = stubs_.find(key);
if (copy == stubs_.end()) {
WasmCode* ret = AddAnonymousCode(code, WasmCode::CopiedStub);
copy = stubs_.emplace(std::make_pair(key, ret)).first;
}
return copy->second->instructions().start();
} else {
Address index = code->instruction_start();
auto trampoline_iter = trampolines_.find(index);
if (trampoline_iter == trampolines_.end()) {
return CreateTrampolineTo(code);
} else {
return trampoline_iter->second;
}
}
}
WasmCode* NativeModule::GetExportedWrapper(uint32_t index) {
auto found = exported_wasm_to_wasm_wrappers_.find(index);
if (found != exported_wasm_to_wasm_wrappers_.end()) {
return found->second;
}
return nullptr;
}
WasmCode* NativeModule::AddExportedWrapper(Handle<Code> code, uint32_t index) {
WasmCode* ret = AddAnonymousCode(code, WasmCode::WasmToWasmWrapper);
ret->index_ = Just(index);
exported_wasm_to_wasm_wrappers_.insert(std::make_pair(index, ret));
return ret;
}
void NativeModule::LinkAll() {
for (uint32_t index = 0; index < code_table_.size(); ++index) {
Link(index);
}
}
void NativeModule::Link(uint32_t index) {
WasmCode* code = code_table_[index];
// skip imports
if (!code) return;
int mode_mask = RelocInfo::ModeMask(RelocInfo::WASM_CALL);
for (RelocIterator it(code->instructions(), code->reloc_info(),
code->constant_pool(), mode_mask);
!it.done(); it.next()) {
uint32_t index =
*(reinterpret_cast<uint32_t*>(it.rinfo()->target_address_address()));
const WasmCode* target = GetCode(index);
if (target == nullptr) continue;
Address target_addr = target->instructions().start();
DCHECK_NOT_NULL(target);
it.rinfo()->set_wasm_call_address(nullptr, target_addr,
ICacheFlushMode::SKIP_ICACHE_FLUSH);
}
}
Address NativeModule::AllocateForCode(size_t size) {
// this happens under a lock assumed by the caller.
size = RoundUp(size, kCodeAlignment);
DisjointAllocationPool mem = free_memory_.Allocate(size);
if (mem.IsEmpty()) {
if (!can_request_more_memory_) return nullptr;
Address hint = owned_memory_.empty()
? nullptr
: reinterpret_cast<Address>(owned_memory_.back().end());
VirtualMemory empty_mem;
owned_memory_.push_back(empty_mem);
VirtualMemory& new_mem = owned_memory_.back();
wasm_code_manager_->TryAllocate(size, &new_mem, hint);
if (!new_mem.IsReserved()) return nullptr;
DisjointAllocationPool mem_pool(
reinterpret_cast<Address>(new_mem.address()),
reinterpret_cast<Address>(new_mem.end()));
wasm_code_manager_->AssignRanges(new_mem.address(), new_mem.end(), this);
free_memory_.Merge(std::move(mem_pool));
mem = free_memory_.Allocate(size);
if (mem.IsEmpty()) return nullptr;
}
Address ret = mem.ranges().front().first;
Address end = ret + size;
Address commit_start = RoundUp(ret, base::OS::AllocatePageSize());
Address commit_end = RoundUp(end, base::OS::AllocatePageSize());
// {commit_start} will be either ret or the start of the next page.
// {commit_end} will be the start of the page after the one in which
// the allocation ends.
// We start from an aligned start, and we know we allocated vmem in
// page multiples.
// We just need to commit what's not committed. The page in which we
// start is already committed (or we start at the beginning of a page).
// The end needs to be committed all through the end of the page.
if (commit_start < commit_end) {
#if V8_OS_WIN
// On Windows, we cannot commit a range that straddles different
// reservations of virtual memory. Because we bump-allocate, and because, if
// we need more memory, we append that memory at the end of the
// owned_memory_ list, we traverse that list in reverse order to find the
// reservation(s) that guide how to chunk the region to commit.
for (auto it = owned_memory_.crbegin(), rend = owned_memory_.crend();
it != rend && commit_start < commit_end; ++it) {
if (commit_end > it->end() || it->address() >= commit_end) continue;
Address start =
std::max(commit_start, reinterpret_cast<Address>(it->address()));
size_t commit_size = static_cast<size_t>(commit_end - start);
DCHECK(IsAligned(commit_size, base::OS::AllocatePageSize()));
if (!wasm_code_manager_->Commit(start, commit_size)) {
return nullptr;
}
committed_memory_ += commit_size;
commit_end = start;
}
#else
size_t commit_size = static_cast<size_t>(commit_end - commit_start);
DCHECK(IsAligned(commit_size, base::OS::AllocatePageSize()));
if (!wasm_code_manager_->Commit(commit_start, commit_size)) {
return nullptr;
}
committed_memory_ += commit_size;
#endif
}
DCHECK(IsAligned(reinterpret_cast<intptr_t>(ret), kCodeAlignment));
allocated_memory_.Merge(std::move(mem));
TRACE_HEAP("ID: %zu. Code alloc: %p,+%zu\n", instance_id,
reinterpret_cast<void*>(ret), size);
return ret;
}
WasmCode* NativeModule::Lookup(Address pc) {
if (owned_code_.empty()) return nullptr;
// Make a fake WasmCode temp, to look into owned_code_
std::unique_ptr<WasmCode> temp(new WasmCode(pc));
auto iter = std::upper_bound(owned_code_.begin(), owned_code_.end(), temp,
owned_code_comparer_);
if (iter == owned_code_.begin()) return nullptr;
--iter;
WasmCode* candidate = (*iter).get();
DCHECK_NOT_NULL(candidate);
if (candidate->instructions().start() <= pc &&
pc < candidate->instructions().start() +
candidate->instructions().size()) {
return candidate;
}
return nullptr;
}
WasmCode* NativeModule::CloneLazyBuiltinInto(uint32_t index) {
DCHECK_NOT_NULL(lazy_builtin());
WasmCode* ret = CloneCode(lazy_builtin());
SetCodeTable(index, ret);
ret->index_ = Just(index);
return ret;
}
bool NativeModule::CloneTrampolinesAndStubs(const NativeModule* other) {
for (auto& pair : other->trampolines_) {
Address key = pair.first;
Address local =
GetLocalAddressFor(handle(Code::GetCodeFromTargetAddress(key)));
if (local == nullptr) return false;
trampolines_.emplace(std::make_pair(key, local));
}
for (auto& pair : other->stubs_) {
uint32_t key = pair.first;
WasmCode* clone = CloneCode(pair.second);
if (!clone) return false;
stubs_.emplace(std::make_pair(key, clone));
}
return true;
}
WasmCode* NativeModule::CloneCode(const WasmCode* original_code) {
std::unique_ptr<byte[]> reloc_info;
if (original_code->reloc_info().size() > 0) {
reloc_info.reset(new byte[original_code->reloc_info().size()]);
memcpy(reloc_info.get(), original_code->reloc_info().start(),
original_code->reloc_info().size());
}
WasmCode* ret = AddOwnedCode(
original_code->instructions(), std::move(reloc_info),
original_code->reloc_info().size(), original_code->index_,
original_code->kind(), original_code->constant_pool_offset_,
original_code->stack_slots(), original_code->safepoint_table_offset_,
original_code->protected_instructions_);
if (ret == nullptr) return nullptr;
if (!ret->IsAnonymous()) {
SetCodeTable(ret->index(), ret);
}
intptr_t delta =
ret->instructions().start() - original_code->instructions().start();
for (RelocIterator it(ret->instructions(), ret->reloc_info(),
ret->constant_pool(), RelocInfo::kApplyMask);
!it.done(); it.next()) {
it.rinfo()->apply(delta);
}
return ret;
}
void NativeModule::SetCodeTable(uint32_t index, wasm::WasmCode* code) {
code_table_[index] = code;
}
NativeModule::~NativeModule() {
TRACE_HEAP("Deleting native module: %p\n", reinterpret_cast<void*>(this));
wasm_code_manager_->FreeNativeModuleMemories(this);
}
WasmCodeManager::WasmCodeManager(v8::Isolate* isolate, size_t max_committed)
: isolate_(isolate) {
DCHECK_LE(max_committed, kMaxWasmCodeMemory);
remaining_uncommitted_.SetValue(max_committed);
}
bool WasmCodeManager::Commit(Address start, size_t size) {
DCHECK(
IsAligned(reinterpret_cast<size_t>(start), base::OS::AllocatePageSize()));
DCHECK(IsAligned(size, base::OS::AllocatePageSize()));
if (size > std::numeric_limits<intptr_t>::max()) return false;
// reserve the size.
intptr_t new_value = remaining_uncommitted_.Decrement(size);
if (new_value < 0) {
remaining_uncommitted_.Increment(size);
return false;
}
// TODO(v8:7105) Enable W^X instead of setting W|X permissions below.
bool ret = base::OS::CommitRegion(start, size) &&
base::OS::SetPermissions(
start, size, base::OS::MemoryPermission::kReadWriteExecute);
if (!ret) {
// Highly unlikely.
remaining_uncommitted_.Increment(size);
return false;
}
// This API assumes main thread
isolate_->AdjustAmountOfExternalAllocatedMemory(size);
if (WouldGCHelp()) {
// This API does not assume main thread, and would schedule
// a GC if called from a different thread, instead of synchronously
// doing one.
isolate_->MemoryPressureNotification(MemoryPressureLevel::kCritical);
}
return ret;
}
bool WasmCodeManager::WouldGCHelp() const {
// If all we have is one module, or none, no GC would help.
// GC would help if there's some remaining native modules that
// would be collected.
if (active_ <= 1) return false;
// We have an expectation on the largest size a native function
// may have.
constexpr size_t kMaxNativeFunction = 32 * MB;
intptr_t remaining = remaining_uncommitted_.Value();
DCHECK_GE(remaining, 0);
return static_cast<size_t>(remaining) < kMaxNativeFunction;
}
void WasmCodeManager::AssignRanges(void* start, void* end,
NativeModule* native_module) {
lookup_map_.insert(std::make_pair(
reinterpret_cast<Address>(start),
std::make_pair(reinterpret_cast<Address>(end), native_module)));
}
void WasmCodeManager::TryAllocate(size_t size, VirtualMemory* ret, void* hint) {
DCHECK_GT(size, 0);
size = RoundUp(size, base::OS::AllocatePageSize());
if (hint == nullptr) hint = base::OS::GetRandomMmapAddr();
if (!AlignedAllocVirtualMemory(
size, static_cast<size_t>(base::OS::AllocatePageSize()), hint, ret)) {
DCHECK(!ret->IsReserved());
}
TRACE_HEAP("VMem alloc: %p:%p (%zu)\n", ret->address(), ret->end(),
ret->size());
}
size_t WasmCodeManager::GetAllocationChunk(const WasmModule& module) {
// TODO(mtrofin): this should pick up its 'maximal code range size'
// from something embedder-provided
if (kRequiresCodeRange) return kMaxWasmCodeMemory;
DCHECK(kModuleCanAllocateMoreMemory);
size_t ret = base::OS::AllocatePageSize();
// a ballpark guesstimate on native inflation factor.
constexpr size_t kMultiplier = 4;
for (auto& function : module.functions) {
ret += kMultiplier * function.code.length();
}
return ret;
}
std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
const WasmModule& module) {
size_t code_size = GetAllocationChunk(module);
return NewNativeModule(
code_size, static_cast<uint32_t>(module.functions.size()),
module.num_imported_functions, kModuleCanAllocateMoreMemory);
}
std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
size_t size_estimate, uint32_t num_functions,
uint32_t num_imported_functions, bool can_request_more) {
VirtualMemory mem;
TryAllocate(size_estimate, &mem);
if (mem.IsReserved()) {
void* start = mem.address();
size_t size = mem.size();
void* end = mem.end();
std::unique_ptr<NativeModule> ret(new NativeModule(
num_functions, num_imported_functions, can_request_more, &mem, this));
TRACE_HEAP("New Module: ID:%zu. Mem: %p,+%zu\n", ret->instance_id, start,
size);
AssignRanges(start, end, ret.get());
++active_;
return ret;
}
return nullptr;
}
std::unique_ptr<NativeModule> NativeModule::Clone() {
std::unique_ptr<NativeModule> ret = wasm_code_manager_->NewNativeModule(
owned_memory_.front().size(), FunctionCount(), num_imported_functions(),
can_request_more_memory_);
TRACE_HEAP("%zu cloned from %zu\n", ret->instance_id, instance_id);
if (!ret) return ret;
if (lazy_builtin() != nullptr) {
ret->lazy_builtin_ = ret->CloneCode(lazy_builtin());
}
if (!ret->CloneTrampolinesAndStubs(this)) return nullptr;
std::unordered_map<Address, Address, AddressHasher> reverse_lookup;
for (auto& pair : trampolines_) {
Address old_dest = pair.second;
auto local = ret->trampolines_.find(pair.first);
DCHECK(local != ret->trampolines_.end());
Address new_dest = local->second;
reverse_lookup.emplace(old_dest, new_dest);
}
for (auto& pair : stubs_) {
Address old_dest = pair.second->instructions().start();
auto local = ret->stubs_.find(pair.first);
DCHECK(local != ret->stubs_.end());
Address new_dest = local->second->instructions().start();
reverse_lookup.emplace(old_dest, new_dest);
}
for (auto& pair : ret->stubs_) {
WasmCode* new_stub = pair.second;
WasmCode* old_stub = stubs_.find(pair.first)->second;
PatchTrampolineAndStubCalls(old_stub, new_stub, reverse_lookup);
}
if (lazy_builtin_ != nullptr) {
PatchTrampolineAndStubCalls(lazy_builtin_, ret->lazy_builtin_,
reverse_lookup);
}
for (uint32_t i = num_imported_functions(), e = FunctionCount(); i < e; ++i) {
const WasmCode* original_code = GetCode(i);
switch (original_code->kind()) {
case WasmCode::LazyStub: {
if (original_code->IsAnonymous()) {
ret->SetCodeTable(i, ret->lazy_builtin());
} else {
if (!ret->CloneLazyBuiltinInto(i)) return nullptr;
}
} break;
case WasmCode::Function: {
WasmCode* new_code = ret->CloneCode(original_code);
if (new_code == nullptr) return nullptr;
PatchTrampolineAndStubCalls(original_code, new_code, reverse_lookup);
} break;
default:
UNREACHABLE();
}
}
ret->specialization_data_ = specialization_data_;
return ret;
}
void WasmCodeManager::FreeNativeModuleMemories(NativeModule* native_module) {
DCHECK_GE(active_, 1);
--active_;
TRACE_HEAP("Freeing %zu\n", native_module->instance_id);
for (auto& vmem : native_module->owned_memory_) {
lookup_map_.erase(reinterpret_cast<Address>(vmem.address()));
Free(&vmem);
DCHECK(!vmem.IsReserved());
}
// No need to tell the GC anything if we're destroying the heap,
// which we currently indicate by having the isolate_ as null
if (isolate_ == nullptr) return;
size_t freed_mem = native_module->committed_memory_;
DCHECK(IsAligned(freed_mem, base::OS::AllocatePageSize()));
remaining_uncommitted_.Increment(freed_mem);
isolate_->AdjustAmountOfExternalAllocatedMemory(
-static_cast<int64_t>(freed_mem));
}
// TODO(wasm): We can make this more efficient if needed. For
// example, we can preface the first instruction with a pointer to
// the WasmCode. In the meantime, we have a separate API so we can
// easily identify those places where we know we have the first
// instruction PC.
WasmCode* WasmCodeManager::GetCodeFromStartAddress(Address pc) const {
return LookupCode(pc);
}
WasmCode* WasmCodeManager::LookupCode(Address pc) const {
if (lookup_map_.empty()) return nullptr;
auto iter = lookup_map_.upper_bound(pc);
if (iter == lookup_map_.begin()) return nullptr;
--iter;
Address range_start = iter->first;
Address range_end = iter->second.first;
NativeModule* candidate = iter->second.second;
DCHECK_NOT_NULL(candidate);
if (range_start <= pc && pc < range_end) {
return candidate->Lookup(pc);
}
return nullptr;
}
void WasmCodeManager::Free(VirtualMemory* mem) {
DCHECK(mem->IsReserved());
void* start = mem->address();
void* end = mem->end();
size_t size = mem->size();
mem->Release();
TRACE_HEAP("VMem Release: %p:%p (%zu)\n", start, end, size);
}
intptr_t WasmCodeManager::remaining_uncommitted() const {
return remaining_uncommitted_.Value();
}
} // namespace wasm
} // namespace internal
} // namespace v8
#undef TRACE_HEAP
......@@ -5,15 +5,37 @@
#ifndef V8_WASM_HEAP_H_
#define V8_WASM_HEAP_H_
#include <functional>
#include <list>
#include <map>
#include <unordered_map>
#include <unordered_set>
#include "src/base/macros.h"
#include "src/handles.h"
#include "src/trap-handler/trap-handler.h"
#include "src/vector.h"
namespace v8 {
class Isolate;
namespace internal {
struct CodeDesc;
class Code;
class WasmCompiledModule;
namespace wasm {
using GlobalHandleAddress = Address;
class NativeModule;
struct WasmModule;
struct AddressHasher {
size_t operator()(const Address& addr) const {
return std::hash<intptr_t>()(reinterpret_cast<intptr_t>(addr));
}
};
// Sorted, disjoint and non-overlapping memory ranges. A range is of the
// form [start, end). So there's no [start, end), [end, other_end),
// because that should have been reduced to [start, other_end).
......@@ -60,6 +82,309 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool)
};
using ProtectedInstructions =
std::vector<trap_handler::ProtectedInstructionData>;
class V8_EXPORT_PRIVATE WasmCode final {
public:
enum Kind {
Function,
WasmToWasmWrapper,
WasmToJsWrapper,
LazyStub,
InterpreterStub,
CopiedStub,
Trampoline
};
Vector<byte> instructions() const { return instructions_; }
Vector<const byte> reloc_info() const {
return {reloc_info_.get(), reloc_size_};
}
uint32_t index() const { return index_.ToChecked(); }
// Anonymous functions are functions that don't carry an index, like
// trampolines.
bool IsAnonymous() const { return index_.IsNothing(); }
Kind kind() const { return kind_; }
NativeModule* owner() const { return owner_; }
Address constant_pool() const;
size_t constant_pool_offset() const { return constant_pool_offset_; }
size_t safepoint_table_offset() const { return safepoint_table_offset_; }
uint32_t stack_slots() const { return stack_slots_; }
bool is_liftoff() const { return is_liftoff_; }
size_t trap_handler_index() const;
void set_trap_handler_index(size_t);
bool HasTrapHandlerIndex() const;
void ResetTrapHandlerIndex();
const ProtectedInstructions& protected_instructions() const {
return *protected_instructions_.get();
}
void Disassemble(Isolate* isolate, const char* name, std::ostream& os) const;
void Print(Isolate* isolate) const;
~WasmCode();
private:
friend class NativeModule;
friend class NativeModuleDeserializer;
// A constructor used just for implementing Lookup.
WasmCode(Address pc) : instructions_(pc, 0), index_(Nothing<uint32_t>()) {}
WasmCode(Vector<byte> instructions,
std::unique_ptr<const byte[]>&& reloc_info, size_t reloc_size,
NativeModule* owner, Maybe<uint32_t> index, Kind kind,
size_t constant_pool_offset, uint32_t stack_slots,
size_t safepoint_table_offset,
std::shared_ptr<ProtectedInstructions> protected_instructions,
bool is_liftoff = false)
: instructions_(instructions),
reloc_info_(std::move(reloc_info)),
reloc_size_(reloc_size),
owner_(owner),
index_(index),
kind_(kind),
constant_pool_offset_(constant_pool_offset),
stack_slots_(stack_slots),
safepoint_table_offset_(safepoint_table_offset),
protected_instructions_(protected_instructions),
is_liftoff_(is_liftoff) {}
WasmCode(const WasmCode&) = delete;
WasmCode& operator=(const WasmCode&) = delete;
Vector<byte> instructions_;
std::unique_ptr<const byte[]> reloc_info_;
size_t reloc_size_ = 0;
NativeModule* owner_ = nullptr;
Maybe<uint32_t> index_;
Kind kind_;
size_t constant_pool_offset_ = 0;
uint32_t stack_slots_ = 0;
// we care about safepoint data for wasm-to-js functions,
// since there may be stack/register tagged values for large number
// conversions.
size_t safepoint_table_offset_ = 0;
intptr_t trap_handler_index_ = -1;
std::shared_ptr<ProtectedInstructions> protected_instructions_;
bool is_liftoff_;
};
class WasmCodeManager;
// Note that we currently need to add code on the main thread, because we may
// trigger a GC if we believe there's a chance the GC would clear up native
// modules. The code is ready for concurrency otherwise, we just need to be
// careful about this GC consideration. See WouldGCHelp and
// WasmCodeManager::Commit.
class V8_EXPORT_PRIVATE NativeModule final {
public:
std::unique_ptr<NativeModule> Clone();
WasmCode* AddCode(const CodeDesc& desc, uint32_t frame_count, uint32_t index,
size_t safepoint_table_offset,
std::shared_ptr<ProtectedInstructions>,
bool is_liftoff = false);
// A way to copy over JS-allocated code. This is because we compile
// certain wrappers using a different pipeline.
WasmCode* AddCodeCopy(Handle<Code> code, WasmCode::Kind kind, uint32_t index);
// Add an interpreter wrapper. For the same reason as AddCodeCopy, we
// currently compile these using a different pipeline and we can't get a
// CodeDesc here. When adding interpreter wrappers, we do not insert them in
// the code_table, however, we let them self-identify as the {index} function
WasmCode* AddInterpreterWrapper(Handle<Code> code, uint32_t index);
// When starting lazy compilation, provide the WasmLazyCompile builtin by
// calling SetLazyBuiltin. It will initialize the code table with it, and the
// lazy_builtin_ field. The latter is used when creating entries for exported
// functions and indirect callable functions, so that they may be identified
// by the runtime.
WasmCode* SetLazyBuiltin(Handle<Code> code);
// ExportedWrappers are WasmToWasmWrappers for functions placed on import
// tables. We construct them as-needed.
WasmCode* GetExportedWrapper(uint32_t index);
WasmCode* AddExportedWrapper(Handle<Code> code, uint32_t index);
// FunctionCount is WasmModule::functions.size().
uint32_t FunctionCount() const;
WasmCode* GetCode(uint32_t index) const;
WasmCode* lazy_builtin() const { return lazy_builtin_; }
// We special-case lazy cloning because we currently rely on making copies
// of the lazy builtin, to be able to identify, in the runtime, which function
// the lazy builtin is a placeholder of. If we used trampolines, we would call
// the runtime function from a common pc. We could, then, figure who the
// caller was if the trampolines called rather than jumped to the common
// builtin. The logic for seeking though frames would change, though.
// TODO(mtrofin): perhaps we can do exactly that - either before or after
// this change.
WasmCode* CloneLazyBuiltinInto(uint32_t);
// For cctests, where we build both WasmModule and the runtime objects
// on the fly, and bypass the instance builder pipeline.
void ResizeCodeTableForTest(size_t);
void LinkAll();
void Link(uint32_t index);
// TODO(mtrofin): needed until we sort out exception handlers and
// source positions, which are still on the GC-heap.
WasmCompiledModule* compiled_module() const;
void SetCompiledModule(Handle<WasmCompiledModule>);
// Shorthand accessors to the specialization data content.
std::vector<wasm::GlobalHandleAddress>& function_tables() {
return specialization_data_.function_tables;
}
std::vector<wasm::GlobalHandleAddress>& signature_tables() {
return specialization_data_.signature_tables;
}
std::vector<wasm::GlobalHandleAddress>& empty_function_tables() {
return specialization_data_.empty_function_tables;
}
std::vector<wasm::GlobalHandleAddress>& empty_signature_tables() {
return specialization_data_.empty_signature_tables;
}
uint32_t num_imported_functions() const { return num_imported_functions_; }
size_t num_function_tables() const {
return specialization_data_.empty_function_tables.size();
}
size_t committed_memory() const { return committed_memory_; }
const size_t instance_id = 0;
~NativeModule();
private:
friend class WasmCodeManager;
friend class NativeModuleSerializer;
friend class NativeModuleDeserializer;
struct WasmCodeUniquePtrComparer {
bool operator()(const std::unique_ptr<WasmCode>& a,
const std::unique_ptr<WasmCode>& b) {
DCHECK(a);
DCHECK(b);
return a->instructions().start() < b->instructions().start();
}
};
static base::AtomicNumber<uint32_t> next_id_;
NativeModule(const NativeModule&) = delete;
NativeModule& operator=(const NativeModule&) = delete;
NativeModule(uint32_t num_functions, uint32_t num_imports,
bool can_request_more, VirtualMemory* vmem,
WasmCodeManager* code_manager);
WasmCode* AddAnonymousCode(Handle<Code>, WasmCode::Kind kind);
Address AllocateForCode(size_t size);
// Primitive for adding code to the native module. All code added to a native
// module is owned by that module. Various callers get to decide on how the
// code is obtained (CodeDesc vs, as a point in time, Code*), the kind,
// whether it has an index or is anonymous, etc.
WasmCode* AddOwnedCode(Vector<const byte> orig_instructions,
std::unique_ptr<const byte[]>&& reloc_info,
size_t reloc_size, Maybe<uint32_t> index,
WasmCode::Kind kind, size_t constant_pool_offset,
uint32_t stack_slots, size_t safepoint_table_offset,
std::shared_ptr<ProtectedInstructions>,
bool is_liftoff = false);
void SetCodeTable(uint32_t, wasm::WasmCode*);
WasmCode* CloneCode(const WasmCode*);
bool CloneTrampolinesAndStubs(const NativeModule* other);
WasmCode* Lookup(Address);
Address GetLocalAddressFor(Handle<Code>);
Address CreateTrampolineTo(Handle<Code>);
std::vector<std::unique_ptr<WasmCode>> owned_code_;
std::unordered_map<uint32_t, WasmCode*> exported_wasm_to_wasm_wrappers_;
WasmCodeUniquePtrComparer owned_code_comparer_;
std::vector<WasmCode*> code_table_;
uint32_t num_imported_functions_;
std::unordered_map<Address, Address, AddressHasher> trampolines_;
std::unordered_map<uint32_t, WasmCode*> stubs_;
DisjointAllocationPool free_memory_;
DisjointAllocationPool allocated_memory_;
std::list<VirtualMemory> owned_memory_;
WasmCodeManager* wasm_code_manager_;
wasm::WasmCode* lazy_builtin_ = nullptr;
base::Mutex allocation_mutex_;
Handle<WasmCompiledModule> compiled_module_;
size_t committed_memory_ = 0;
bool can_request_more_memory_;
// Specialization data that needs to be serialized and cloned.
// Keeping it groupped together because it makes cloning of all these
// elements a 1 line copy.
struct {
std::vector<wasm::GlobalHandleAddress> function_tables;
std::vector<wasm::GlobalHandleAddress> signature_tables;
std::vector<wasm::GlobalHandleAddress> empty_function_tables;
std::vector<wasm::GlobalHandleAddress> empty_signature_tables;
} specialization_data_;
};
class V8_EXPORT_PRIVATE WasmCodeManager final {
public:
// The only reason we depend on Isolate is to report native memory used
// and held by a GC-ed object. We'll need to mitigate that when we
// start sharing wasm heaps.
WasmCodeManager(v8::Isolate*, size_t max_committed);
// Create a new NativeModule. The caller is responsible for its
// lifetime. The native module will be given some memory for code,
// which will be page size aligned. The size of the initial memory
// is determined with a heuristic based on the total size of wasm
// code. The native module may later request more memory.
std::unique_ptr<NativeModule> NewNativeModule(const WasmModule&);
std::unique_ptr<NativeModule> NewNativeModule(size_t memory_estimate,
uint32_t num_functions,
uint32_t num_imported_functions,
bool can_request_more);
WasmCode* LookupCode(Address pc) const;
WasmCode* GetCodeFromStartAddress(Address pc) const;
intptr_t remaining_uncommitted() const;
private:
static const size_t kMaxWasmCodeMemory = 256 * MB;
friend class NativeModule;
WasmCodeManager(const WasmCodeManager&) = delete;
WasmCodeManager& operator=(const WasmCodeManager&) = delete;
void TryAllocate(size_t size, VirtualMemory*, void* hint = nullptr);
bool Commit(Address, size_t);
// Currently, we uncommit a whole module, so all we need is account
// for the freed memory size. We do that in FreeNativeModuleMemories.
// There's no separate Uncommit.
void FreeNativeModuleMemories(NativeModule*);
void Free(VirtualMemory* mem);
void AssignRanges(void* start, void* end, NativeModule*);
size_t GetAllocationChunk(const WasmModule& module);
bool WouldGCHelp() const;
std::map<Address, std::pair<Address, NativeModule*>> lookup_map_;
// count of NativeModules not yet collected. Helps determine if it's
// worth requesting a GC on memory pressure.
size_t active_ = 0;
base::AtomicNumber<intptr_t> remaining_uncommitted_;
v8::Isolate* isolate_;
};
} // namespace wasm
} // namespace internal
} // namespace v8
......
......@@ -381,22 +381,28 @@ class WasmCompiledModule : public FixedArray {
// for deserialization, and if they are serializable.
// By default, instance values go to WasmInstanceObject, however, if
// we embed the generated code with a value, then we track that value here.
#define CORE_WCM_PROPERTY_TABLE(MACRO) \
MACRO(WASM_OBJECT, WasmSharedModuleData, shared) \
MACRO(OBJECT, Context, native_context) \
#define CORE_WCM_PROPERTY_TABLE(MACRO) \
MACRO(WASM_OBJECT, WasmSharedModuleData, shared) \
MACRO(OBJECT, Context, native_context) \
MACRO(CONST_OBJECT, FixedArray, export_wrappers) \
MACRO(OBJECT, FixedArray, weak_exported_functions) \
MACRO(WASM_OBJECT, WasmCompiledModule, next_instance) \
MACRO(WASM_OBJECT, WasmCompiledModule, prev_instance) \
MACRO(WEAK_LINK, WasmInstanceObject, owning_instance) \
MACRO(WEAK_LINK, WasmModuleObject, wasm_module) \
MACRO(OBJECT, FixedArray, handler_table) \
MACRO(OBJECT, FixedArray, source_positions) \
MACRO(OBJECT, Foreign, native_module) \
MACRO(OBJECT, FixedArray, lazy_compile_data)
#define GC_WCM_PROPERTY_TABLE(MACRO) \
MACRO(SMALL_CONST_NUMBER, uint32_t, num_imported_functions) \
MACRO(CONST_OBJECT, FixedArray, code_table) \
MACRO(CONST_OBJECT, FixedArray, export_wrappers) \
MACRO(OBJECT, FixedArray, weak_exported_functions) \
MACRO(OBJECT, FixedArray, function_tables) \
MACRO(OBJECT, FixedArray, signature_tables) \
MACRO(CONST_OBJECT, FixedArray, empty_function_tables) \
MACRO(CONST_OBJECT, FixedArray, empty_signature_tables) \
MACRO(SMALL_CONST_NUMBER, uint32_t, initial_pages) \
MACRO(WASM_OBJECT, WasmCompiledModule, next_instance) \
MACRO(WASM_OBJECT, WasmCompiledModule, prev_instance) \
MACRO(WEAK_LINK, JSObject, owning_instance) \
MACRO(WEAK_LINK, WasmModuleObject, wasm_module)
MACRO(SMALL_CONST_NUMBER, uint32_t, initial_pages)
#if DEBUG
#define DEBUG_ONLY_TABLE(MACRO) MACRO(SMALL_CONST_NUMBER, uint32_t, instance_id)
......@@ -409,6 +415,7 @@ class WasmCompiledModule : public FixedArray {
#define WCM_PROPERTY_TABLE(MACRO) \
CORE_WCM_PROPERTY_TABLE(MACRO) \
GC_WCM_PROPERTY_TABLE(MACRO) \
DEBUG_ONLY_TABLE(MACRO)
private:
......
......@@ -151,6 +151,241 @@ TEST_F(DisjointAllocationPoolTest, MergingSkipLargerSrcWithGap) {
CheckLooksLike(a, {{10, 15}, {20, 35}, {36, 40}});
}
class WasmCodeManagerTest : public TestWithIsolate {
public:
using NativeModulePtr = std::unique_ptr<NativeModule>;
enum ModuleStyle : int { Fixed = 0, Growable = 1 };
const std::vector<ModuleStyle> styles() const {
return std::vector<ModuleStyle>({Fixed, Growable});
}
// We pretend all our modules have 10 functions and no imports, just so
// we can size up the code_table.
NativeModulePtr AllocFixedModule(WasmCodeManager* manager, size_t size) {
return manager->NewNativeModule(size, 10, 0, false);
}
NativeModulePtr AllocGrowableModule(WasmCodeManager* manager, size_t size) {
return manager->NewNativeModule(size, 10, 0, true);
}
NativeModulePtr AllocModule(WasmCodeManager* manager, size_t size,
ModuleStyle style) {
switch (style) {
case Fixed:
return AllocFixedModule(manager, size);
case Growable:
return AllocGrowableModule(manager, size);
default:
UNREACHABLE();
}
}
WasmCode* AddCode(NativeModule* native_module, uint32_t index, size_t size) {
CodeDesc desc;
memset(reinterpret_cast<void*>(&desc), 0, sizeof(CodeDesc));
std::unique_ptr<byte[]> exec_buff(new byte[size]);
desc.buffer = exec_buff.get();
desc.instr_size = static_cast<int>(size);
return native_module->AddCode(desc, 0, index, 0, {}, false);
}
size_t page() const { return base::OS::AllocatePageSize(); }
v8::Isolate* v8_isolate() const {
return reinterpret_cast<v8::Isolate*>(isolate());
}
};
TEST_F(WasmCodeManagerTest, EmptyCase) {
for (auto style : styles()) {
WasmCodeManager manager(v8_isolate(), 0 * page());
CHECK_EQ(0, manager.remaining_uncommitted());
NativeModulePtr native_module = AllocModule(&manager, 1 * page(), style);
CHECK(native_module);
WasmCode* code = AddCode(native_module.get(), 0, 10);
CHECK_NULL(code);
CHECK_EQ(0, manager.remaining_uncommitted());
native_module.reset();
CHECK_EQ(0, manager.remaining_uncommitted());
}
}
TEST_F(WasmCodeManagerTest, AllocateAndGoOverLimit) {
for (auto style : styles()) {
WasmCodeManager manager(v8_isolate(), 1 * page());
CHECK_EQ(1 * page(), manager.remaining_uncommitted());
NativeModulePtr native_module = AllocModule(&manager, 1 * page(), style);
CHECK(native_module);
CHECK_EQ(1 * page(), manager.remaining_uncommitted());
uint32_t index = 0;
WasmCode* code = AddCode(native_module.get(), index++, 1 * kCodeAlignment);
CHECK_NOT_NULL(code);
CHECK_EQ(0, manager.remaining_uncommitted());
code = AddCode(native_module.get(), index++, 3 * kCodeAlignment);
CHECK_NOT_NULL(code);
CHECK_EQ(0, manager.remaining_uncommitted());
code = AddCode(native_module.get(), index++, page() - 4 * kCodeAlignment);
CHECK_NOT_NULL(code);
CHECK_EQ(0, manager.remaining_uncommitted());
code = AddCode(native_module.get(), index++, 1 * kCodeAlignment);
CHECK_NULL(code);
CHECK_EQ(0, manager.remaining_uncommitted());
native_module.reset();
CHECK_EQ(1 * page(), manager.remaining_uncommitted());
}
}
TEST_F(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
for (auto style : styles()) {
WasmCodeManager manager(v8_isolate(), 1 * page());
NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), style);
NativeModulePtr nm2 = AllocModule(&manager, 1 * page(), style);
CHECK(nm1);
CHECK(nm2);
WasmCode* code = AddCode(nm1.get(), 0, 1 * page());
CHECK_NOT_NULL(code);
code = AddCode(nm2.get(), 0, 1 * page());
CHECK_NULL(code);
}
}
TEST_F(WasmCodeManagerTest, DifferentHeapsApplyLimitsIndependently) {
for (auto style : styles()) {
WasmCodeManager manager1(v8_isolate(), 1 * page());
WasmCodeManager manager2(v8_isolate(), 2 * page());
NativeModulePtr nm1 = AllocModule(&manager1, 1 * page(), style);
NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), style);
CHECK(nm1);
CHECK(nm2);
WasmCode* code = AddCode(nm1.get(), 0, 1 * page());
CHECK_NOT_NULL(code);
CHECK_EQ(0, manager1.remaining_uncommitted());
code = AddCode(nm2.get(), 0, 1 * page());
CHECK_NOT_NULL(code);
}
}
TEST_F(WasmCodeManagerTest, GrowingVsFixedModule) {
for (auto style : styles()) {
WasmCodeManager manager(v8_isolate(), 3 * page());
NativeModulePtr nm = AllocModule(&manager, 1 * page(), style);
WasmCode* code = AddCode(nm.get(), 0, 1 * page() + kCodeAlignment);
if (style == Fixed) {
CHECK_NULL(code);
CHECK_EQ(manager.remaining_uncommitted(), 3 * page());
} else {
CHECK_NOT_NULL(code);
CHECK_EQ(manager.remaining_uncommitted(), 1 * page());
}
}
}
TEST_F(WasmCodeManagerTest, CommitIncrements) {
for (auto style : styles()) {
WasmCodeManager manager(v8_isolate(), 10 * page());
NativeModulePtr nm = AllocModule(&manager, 3 * page(), style);
WasmCode* code = AddCode(nm.get(), 0, kCodeAlignment);
CHECK_NOT_NULL(code);
CHECK_EQ(manager.remaining_uncommitted(), 9 * page());
code = AddCode(nm.get(), 1, 2 * page());
CHECK_NOT_NULL(code);
CHECK_EQ(manager.remaining_uncommitted(), 7 * page());
code = AddCode(nm.get(), 2, page() - kCodeAlignment);
CHECK_NOT_NULL(code);
CHECK_EQ(manager.remaining_uncommitted(), 7 * page());
}
}
TEST_F(WasmCodeManagerTest, Lookup) {
for (auto style : styles()) {
WasmCodeManager manager(v8_isolate(), 2 * page());
NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), style);
NativeModulePtr nm2 = AllocModule(&manager, 1 * page(), style);
WasmCode* code1_0 = AddCode(nm1.get(), 0, kCodeAlignment);
CHECK_EQ(nm1.get(), code1_0->owner());
WasmCode* code1_1 = AddCode(nm1.get(), 1, kCodeAlignment);
WasmCode* code2_0 = AddCode(nm2.get(), 0, kCodeAlignment);
WasmCode* code2_1 = AddCode(nm2.get(), 1, kCodeAlignment);
CHECK_EQ(nm2.get(), code2_1->owner());
CHECK_EQ(0, code1_0->index());
CHECK_EQ(1, code1_1->index());
CHECK_EQ(0, code2_0->index());
CHECK_EQ(1, code2_1->index());
// we know the manager object is allocated here, so we shouldn't
// find any WasmCode* associated with that ptr.
WasmCode* not_found =
manager.LookupCode(reinterpret_cast<Address>(&manager));
CHECK_NULL(not_found);
WasmCode* found = manager.LookupCode(code1_0->instructions().start());
CHECK_EQ(found, code1_0);
found = manager.LookupCode(code2_1->instructions().start() +
(code2_1->instructions().size() / 2));
CHECK_EQ(found, code2_1);
found = manager.LookupCode(code2_1->instructions().start() +
code2_1->instructions().size() - 1);
CHECK_EQ(found, code2_1);
found = manager.LookupCode(code2_1->instructions().start() +
code2_1->instructions().size());
CHECK_NULL(found);
Address mid_code1_1 =
code1_1->instructions().start() + (code1_1->instructions().size() / 2);
CHECK_EQ(code1_1, manager.LookupCode(mid_code1_1));
nm1.reset();
CHECK_NULL(manager.LookupCode(mid_code1_1));
}
}
TEST_F(WasmCodeManagerTest, MultiManagerLookup) {
for (auto style : styles()) {
WasmCodeManager manager1(v8_isolate(), 2 * page());
WasmCodeManager manager2(v8_isolate(), 2 * page());
NativeModulePtr nm1 = AllocModule(&manager1, 1 * page(), style);
NativeModulePtr nm2 = AllocModule(&manager2, 1 * page(), style);
WasmCode* code1_0 = AddCode(nm1.get(), 0, kCodeAlignment);
CHECK_EQ(nm1.get(), code1_0->owner());
WasmCode* code1_1 = AddCode(nm1.get(), 1, kCodeAlignment);
WasmCode* code2_0 = AddCode(nm2.get(), 0, kCodeAlignment);
WasmCode* code2_1 = AddCode(nm2.get(), 1, kCodeAlignment);
CHECK_EQ(nm2.get(), code2_1->owner());
CHECK_EQ(0, code1_0->index());
CHECK_EQ(1, code1_1->index());
CHECK_EQ(0, code2_0->index());
CHECK_EQ(1, code2_1->index());
CHECK_EQ(code1_0, manager1.LookupCode(code1_0->instructions().start()));
CHECK_NULL(manager2.LookupCode(code1_0->instructions().start()));
}
}
TEST_F(WasmCodeManagerTest, LookupWorksAfterRewrite) {
for (auto style : styles()) {
WasmCodeManager manager(v8_isolate(), 2 * page());
NativeModulePtr nm1 = AllocModule(&manager, 1 * page(), style);
WasmCode* code0 = AddCode(nm1.get(), 0, kCodeAlignment);
WasmCode* code1 = AddCode(nm1.get(), 1, kCodeAlignment);
CHECK_EQ(0, code0->index());
CHECK_EQ(1, code1->index());
CHECK_EQ(code1, manager.LookupCode(code1->instructions().start()));
WasmCode* code1_1 = AddCode(nm1.get(), 1, kCodeAlignment);
CHECK_EQ(1, code1_1->index());
CHECK_EQ(code1, manager.LookupCode(code1->instructions().start()));
CHECK_EQ(code1_1, manager.LookupCode(code1_1->instructions().start()));
}
}
} // namespace wasm_heap_unittest
} // namespace wasm
} // namespace internal
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment