Commit a3218aa7 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[wasm] Add support to allocate in a specific code region

For multiple code spaces with multiple jump tables, we need to allocate
the space for the jump tables within specific regions (the new code
spaces) so they are within reachable distance.
This CL adds support for that, and uses it for the first jump table.

R=mstarzinger@chromium.org

Bug: v8:9477
Change-Id: Ibdf05c9500c2dfdb2c5f5f920b4422339aaab810
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1733069
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63074}
parent ad73bbe9
...@@ -45,6 +45,13 @@ class AddressRegion { ...@@ -45,6 +45,13 @@ class AddressRegion {
return contains(region.address_, region.size_); return contains(region.address_, region.size_);
} }
base::AddressRegion GetOverlap(AddressRegion region) const {
Address overlap_start = std::max(begin(), region.begin());
Address overlap_end =
std::max(overlap_start, std::min(end(), region.end()));
return {overlap_start, overlap_end - overlap_start};
}
bool operator==(AddressRegion other) const { bool operator==(AddressRegion other) const {
return address_ == other.address_ && size_ == other.size_; return address_ == other.address_ && size_ == other.size_;
} }
......
...@@ -88,13 +88,30 @@ base::AddressRegion DisjointAllocationPool::Merge(base::AddressRegion region) { ...@@ -88,13 +88,30 @@ base::AddressRegion DisjointAllocationPool::Merge(base::AddressRegion region) {
} }
base::AddressRegion DisjointAllocationPool::Allocate(size_t size) { base::AddressRegion DisjointAllocationPool::Allocate(size_t size) {
return AllocateInRegion(size,
{kNullAddress, std::numeric_limits<size_t>::max()});
}
base::AddressRegion DisjointAllocationPool::AllocateInRegion(
size_t size, base::AddressRegion region) {
for (auto it = regions_.begin(), end = regions_.end(); it != end; ++it) { for (auto it = regions_.begin(), end = regions_.end(); it != end; ++it) {
if (size > it->size()) continue; base::AddressRegion overlap = it->GetOverlap(region);
base::AddressRegion ret{it->begin(), size}; if (size > overlap.size()) continue;
base::AddressRegion ret{overlap.begin(), size};
if (size == it->size()) { if (size == it->size()) {
// We use the full region --> erase the region from {regions_}.
regions_.erase(it); regions_.erase(it);
} else { } else if (ret.begin() == it->begin()) {
// We return a region at the start --> shrink remaining region from front.
*it = base::AddressRegion{it->begin() + size, it->size() - size}; *it = base::AddressRegion{it->begin() + size, it->size() - size};
} else if (ret.end() == it->end()) {
// We return a region at the end --> shrink remaining region.
*it = base::AddressRegion{it->begin(), it->size() - size};
} else {
// We return something in the middle --> split the remaining region.
regions_.insert(
it, base::AddressRegion{it->begin(), ret.begin() - it->begin()});
*it = base::AddressRegion{ret.end(), it->end() - ret.end()};
} }
return ret; return ret;
} }
...@@ -460,16 +477,26 @@ base::SmallVector<base::AddressRegion, 1> SplitRangeByReservationsIfNeeded( ...@@ -460,16 +477,26 @@ base::SmallVector<base::AddressRegion, 1> SplitRangeByReservationsIfNeeded(
Vector<byte> WasmCodeAllocator::AllocateForCode(NativeModule* native_module, Vector<byte> WasmCodeAllocator::AllocateForCode(NativeModule* native_module,
size_t size) { size_t size) {
return AllocateForCodeInRegion(
native_module, size, {kNullAddress, std::numeric_limits<size_t>::max()});
}
Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
NativeModule* native_module, size_t size, base::AddressRegion region) {
base::MutexGuard lock(&mutex_); base::MutexGuard lock(&mutex_);
DCHECK_EQ(code_manager_, native_module->engine()->code_manager()); DCHECK_EQ(code_manager_, native_module->engine()->code_manager());
DCHECK_LT(0, size); DCHECK_LT(0, size);
v8::PageAllocator* page_allocator = GetPlatformPageAllocator(); v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
// This happens under a lock assumed by the caller.
size = RoundUp<kCodeAlignment>(size); size = RoundUp<kCodeAlignment>(size);
base::AddressRegion code_space = free_code_space_.Allocate(size); base::AddressRegion code_space =
free_code_space_.AllocateInRegion(size, region);
if (code_space.is_empty()) { if (code_space.is_empty()) {
if (!can_request_more_memory_) { const bool in_specific_region =
V8::FatalProcessOutOfMemory(nullptr, "wasm code reservation"); region.size() < std::numeric_limits<size_t>::max();
if (!can_request_more_memory_ || in_specific_region) {
auto error = in_specific_region ? "wasm code reservation in region"
: "wasm code reservation";
V8::FatalProcessOutOfMemory(nullptr, error);
UNREACHABLE(); UNREACHABLE();
} }
...@@ -665,8 +692,9 @@ void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) { ...@@ -665,8 +692,9 @@ void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
CHECK_EQ(1, code_space_data_.size()); CHECK_EQ(1, code_space_data_.size());
// Re-allocate jump table. // Re-allocate jump table.
code_space_data_[0].jump_table = CreateEmptyJumpTable( code_space_data_[0].jump_table = CreateEmptyJumpTableInRegion(
JumpTableAssembler::SizeForNumberOfSlots(max_functions)); JumpTableAssembler::SizeForNumberOfSlots(max_functions),
code_space_data_[0].region);
main_jump_table_ = code_space_data_[0].jump_table; main_jump_table_ = code_space_data_[0].jump_table;
} }
...@@ -700,8 +728,10 @@ void NativeModule::UseLazyStub(uint32_t func_index) { ...@@ -700,8 +728,10 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
if (!lazy_compile_table_) { if (!lazy_compile_table_) {
uint32_t num_slots = module_->num_declared_functions; uint32_t num_slots = module_->num_declared_functions;
WasmCodeRefScope code_ref_scope; WasmCodeRefScope code_ref_scope;
lazy_compile_table_ = CreateEmptyJumpTable( DCHECK_EQ(1, code_space_data_.size());
JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots)); lazy_compile_table_ = CreateEmptyJumpTableInRegion(
JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots),
code_space_data_[0].region);
JumpTableAssembler::GenerateLazyCompileTable( JumpTableAssembler::GenerateLazyCompileTable(
lazy_compile_table_->instruction_start(), num_slots, lazy_compile_table_->instruction_start(), num_slots,
module_->num_imported_functions, module_->num_imported_functions,
...@@ -725,9 +755,10 @@ void NativeModule::SetRuntimeStubs(Isolate* isolate) { ...@@ -725,9 +755,10 @@ void NativeModule::SetRuntimeStubs(Isolate* isolate) {
DCHECK_EQ(kNullAddress, runtime_stub_entries_[0]); // Only called once. DCHECK_EQ(kNullAddress, runtime_stub_entries_[0]); // Only called once.
#ifdef V8_EMBEDDED_BUILTINS #ifdef V8_EMBEDDED_BUILTINS
WasmCodeRefScope code_ref_scope; WasmCodeRefScope code_ref_scope;
WasmCode* jump_table = DCHECK_EQ(1, code_space_data_.size());
CreateEmptyJumpTable(JumpTableAssembler::SizeForNumberOfStubSlots( WasmCode* jump_table = CreateEmptyJumpTableInRegion(
WasmCode::kRuntimeStubCount)); JumpTableAssembler::SizeForNumberOfStubSlots(WasmCode::kRuntimeStubCount),
code_space_data_[0].region);
Address base = jump_table->instruction_start(); Address base = jump_table->instruction_start();
EmbeddedData embedded_data = EmbeddedData::FromBlob(); EmbeddedData embedded_data = EmbeddedData::FromBlob();
#define RUNTIME_STUB(Name) Builtins::k##Name, #define RUNTIME_STUB(Name) Builtins::k##Name,
...@@ -1065,11 +1096,13 @@ bool NativeModule::HasCode(uint32_t index) const { ...@@ -1065,11 +1096,13 @@ bool NativeModule::HasCode(uint32_t index) const {
return code_table_[index - module_->num_imported_functions] != nullptr; return code_table_[index - module_->num_imported_functions] != nullptr;
} }
WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) { WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
uint32_t jump_table_size, base::AddressRegion region) {
// Only call this if we really need a jump table. // Only call this if we really need a jump table.
DCHECK_LT(0, jump_table_size); DCHECK_LT(0, jump_table_size);
Vector<uint8_t> code_space = Vector<uint8_t> code_space =
code_allocator_.AllocateForCode(this, jump_table_size); code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region);
DCHECK(!code_space.empty());
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size()); ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
std::unique_ptr<WasmCode> code{new WasmCode{ std::unique_ptr<WasmCode> code{new WasmCode{
this, // native_module this, // native_module
...@@ -1122,8 +1155,8 @@ void NativeModule::AddCodeSpace(base::AddressRegion region) { ...@@ -1122,8 +1155,8 @@ void NativeModule::AddCodeSpace(base::AddressRegion region) {
has_functions && is_first_code_space && !implicit_alloc_disabled; has_functions && is_first_code_space && !implicit_alloc_disabled;
if (needs_jump_table) { if (needs_jump_table) {
jump_table = CreateEmptyJumpTable( jump_table = CreateEmptyJumpTableInRegion(
JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions)); JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region);
CHECK(region.contains(jump_table->instruction_start())); CHECK(region.contains(jump_table->instruction_start()));
} }
......
...@@ -60,6 +60,10 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final { ...@@ -60,6 +60,10 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
// failure. // failure.
base::AddressRegion Allocate(size_t size); base::AddressRegion Allocate(size_t size);
// Allocate a contiguous region of size {size} within {region}. Return an
// empty pool on failure.
base::AddressRegion AllocateInRegion(size_t size, base::AddressRegion);
bool IsEmpty() const { return regions_.empty(); } bool IsEmpty() const { return regions_.empty(); }
const std::list<base::AddressRegion>& regions() const { return regions_; } const std::list<base::AddressRegion>& regions() const { return regions_; }
...@@ -294,6 +298,11 @@ class WasmCodeAllocator { ...@@ -294,6 +298,11 @@ class WasmCodeAllocator {
// Allocate code space. Returns a valid buffer or fails with OOM (crash). // Allocate code space. Returns a valid buffer or fails with OOM (crash).
Vector<byte> AllocateForCode(NativeModule*, size_t size); Vector<byte> AllocateForCode(NativeModule*, size_t size);
// Allocate code space within a specific region. Returns a valid buffer or
// fails with OOM (crash).
Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size,
base::AddressRegion);
// Sets permissions of all owned code space to executable, or read-write (if // Sets permissions of all owned code space to executable, or read-write (if
// {executable} is false). Returns true on success. // {executable} is false). Returns true on success.
V8_EXPORT_PRIVATE bool SetExecutable(bool executable); V8_EXPORT_PRIVATE bool SetExecutable(bool executable);
...@@ -517,7 +526,8 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -517,7 +526,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode* AddAndPublishAnonymousCode(Handle<Code>, WasmCode::Kind kind, WasmCode* AddAndPublishAnonymousCode(Handle<Code>, WasmCode::Kind kind,
const char* name = nullptr); const char* name = nullptr);
WasmCode* CreateEmptyJumpTable(uint32_t jump_table_size); WasmCode* CreateEmptyJumpTableInRegion(uint32_t jump_table_size,
base::AddressRegion);
// Called by the {WasmCodeAllocator} to register a new code space. // Called by the {WasmCodeAllocator} to register a new code space.
void AddCodeSpace(base::AddressRegion); void AddCodeSpace(base::AddressRegion);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment