Commit c2835df6 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[wasm] Remove trap handler fallback

The trap handler fallback is flaky, and was never enabled since it
never worked reliably. This CL removes
a) the --wasm-trap-handler-fallback flag,
b) the distinction between soft and hard address space limit,
c) methods to check whether memory has guard regions (it will always
  have them on 64 bit architectures),
d) associated runtime functions,
e) the trap handler fallback tests,
f) recompilation logic for the fallback.

R=titzer@chromium.org

Bug: v8:8746
Change-Id: I7f4682b8cd5470906dd8579ff1fdc9b1a3c0f0e7
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1570023Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60904}
parent dd29683f
...@@ -669,8 +669,6 @@ DEFINE_IMPLICATION(future, wasm_shared_code) ...@@ -669,8 +669,6 @@ DEFINE_IMPLICATION(future, wasm_shared_code)
DEFINE_BOOL(wasm_trap_handler, true, DEFINE_BOOL(wasm_trap_handler, true,
"use signal handlers to catch out of bounds memory access in wasm" "use signal handlers to catch out of bounds memory access in wasm"
" (currently Linux x86_64 only)") " (currently Linux x86_64 only)")
DEFINE_BOOL(wasm_trap_handler_fallback, false,
"Use bounds checks if guarded memory is not available")
DEFINE_BOOL(wasm_fuzzer_gen_test, false, DEFINE_BOOL(wasm_fuzzer_gen_test, false,
"Generate a test case when running a wasm fuzzer") "Generate a test case when running a wasm fuzzer")
DEFINE_IMPLICATION(wasm_fuzzer_gen_test, single_threaded) DEFINE_IMPLICATION(wasm_fuzzer_gen_test, single_threaded)
......
...@@ -1250,13 +1250,5 @@ RUNTIME_FUNCTION(Runtime_FreezeWasmLazyCompilation) { ...@@ -1250,13 +1250,5 @@ RUNTIME_FUNCTION(Runtime_FreezeWasmLazyCompilation) {
return ReadOnlyRoots(isolate).undefined_value(); return ReadOnlyRoots(isolate).undefined_value();
} }
RUNTIME_FUNCTION(Runtime_WasmMemoryHasFullGuardRegion) {
DCHECK_EQ(1, args.length());
DisallowHeapAllocation no_gc;
CONVERT_ARG_CHECKED(WasmMemoryObject, memory, 0);
return isolate->heap()->ToBoolean(memory->has_full_guard_region(isolate));
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -516,7 +516,6 @@ namespace internal { ...@@ -516,7 +516,6 @@ namespace internal {
F(WasmGetNumberOfInstances, 1, 1) \ F(WasmGetNumberOfInstances, 1, 1) \
F(WasmNumInterpretedCalls, 1, 1) \ F(WasmNumInterpretedCalls, 1, 1) \
F(WasmTraceMemory, 1, 1) \ F(WasmTraceMemory, 1, 1) \
F(WasmMemoryHasFullGuardRegion, 1, 1) \
F(SetWasmThreadsEnabled, 1, 1) F(SetWasmThreadsEnabled, 1, 1)
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F, I) \ #define FOR_EACH_INTRINSIC_TYPEDARRAY(F, I) \
......
...@@ -1054,14 +1054,6 @@ std::shared_ptr<NativeModule> CompileToNativeModule( ...@@ -1054,14 +1054,6 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
return native_module; return native_module;
} }
void CompileNativeModuleWithExplicitBoundsChecks(Isolate* isolate,
ErrorThrower* thrower,
const WasmModule* wasm_module,
NativeModule* native_module) {
native_module->DisableTrapHandler();
CompileNativeModule(isolate, thrower, wasm_module, native_module);
}
AsyncCompileJob::AsyncCompileJob( AsyncCompileJob::AsyncCompileJob(
Isolate* isolate, const WasmFeatures& enabled, Isolate* isolate, const WasmFeatures& enabled,
std::unique_ptr<byte[]> bytes_copy, size_t length, Handle<Context> context, std::unique_ptr<byte[]> bytes_copy, size_t length, Handle<Context> context,
......
...@@ -42,11 +42,6 @@ std::shared_ptr<NativeModule> CompileToNativeModule( ...@@ -42,11 +42,6 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes, std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
Handle<FixedArray>* export_wrappers_out); Handle<FixedArray>* export_wrappers_out);
void CompileNativeModuleWithExplicitBoundsChecks(Isolate* isolate,
ErrorThrower* thrower,
const WasmModule* wasm_module,
NativeModule* native_module);
V8_EXPORT_PRIVATE V8_EXPORT_PRIVATE
void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module, void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
Handle<FixedArray> export_wrappers); Handle<FixedArray> export_wrappers);
......
...@@ -85,11 +85,6 @@ class InstanceBuilder { ...@@ -85,11 +85,6 @@ class InstanceBuilder {
Handle<WasmExportedFunction> start_function_; Handle<WasmExportedFunction> start_function_;
std::vector<SanitizedImport> sanitized_imports_; std::vector<SanitizedImport> sanitized_imports_;
UseTrapHandler use_trap_handler() const {
return module_object_->native_module()->use_trap_handler() ? kUseTrapHandler
: kNoTrapHandler;
}
// Helper routines to print out errors with imports. // Helper routines to print out errors with imports.
#define ERROR_THROWER_WITH_MESSAGE(TYPE) \ #define ERROR_THROWER_WITH_MESSAGE(TYPE) \
void Report##TYPE(const char* error, uint32_t index, \ void Report##TYPE(const char* error, uint32_t index, \
...@@ -246,12 +241,11 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() { ...@@ -246,12 +241,11 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Record build time into correct bucket, then build instance. // Record build time into correct bucket, then build instance.
TimedHistogramScope wasm_instantiate_module_time_scope(SELECT_WASM_COUNTER( TimedHistogramScope wasm_instantiate_module_time_scope(SELECT_WASM_COUNTER(
isolate_->counters(), module_->origin, wasm_instantiate, module_time)); isolate_->counters(), module_->origin, wasm_instantiate, module_time));
NativeModule* native_module = module_object_->native_module();
//-------------------------------------------------------------------------- //--------------------------------------------------------------------------
// Allocate the memory array buffer. // Allocate the memory array buffer.
//-------------------------------------------------------------------------- //--------------------------------------------------------------------------
// We allocate the memory buffer before cloning or reusing the compiled module
// so we will know whether we need to recompile with bounds checks.
uint32_t initial_pages = module_->initial_pages; uint32_t initial_pages = module_->initial_pages;
auto initial_pages_counter = SELECT_WASM_COUNTER( auto initial_pages_counter = SELECT_WASM_COUNTER(
isolate_->counters(), module_->origin, wasm, min_mem_pages_count); isolate_->counters(), module_->origin, wasm, min_mem_pages_count);
...@@ -272,10 +266,11 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() { ...@@ -272,10 +266,11 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
Handle<JSArrayBuffer> memory = memory_.ToHandleChecked(); Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
memory->set_is_detachable(false); memory->set_is_detachable(false);
DCHECK_IMPLIES(use_trap_handler(), module_->origin == kAsmJsOrigin || DCHECK_IMPLIES(native_module->use_trap_handler(),
memory->is_wasm_memory() || module_->origin == kAsmJsOrigin ||
memory->backing_store() == nullptr); memory->is_wasm_memory() ||
} else if (initial_pages > 0 || use_trap_handler()) { memory->backing_store() == nullptr);
} else if (initial_pages > 0 || native_module->use_trap_handler()) {
// We need to unconditionally create a guard region if using trap handlers, // We need to unconditionally create a guard region if using trap handlers,
// even when the size is zero to prevent null-dereference issues // even when the size is zero to prevent null-dereference issues
// (e.g. https://crbug.com/769637). // (e.g. https://crbug.com/769637).
...@@ -288,39 +283,9 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() { ...@@ -288,39 +283,9 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
} }
} }
//--------------------------------------------------------------------------
// Recompile module if using trap handlers but could not get guarded memory
//--------------------------------------------------------------------------
if (module_->origin == kWasmOrigin && use_trap_handler()) {
// Make sure the memory has suitable guard regions.
WasmMemoryTracker* const memory_tracker =
isolate_->wasm_engine()->memory_tracker();
if (!memory_tracker->HasFullGuardRegions(
memory_.ToHandleChecked()->backing_store())) {
if (!FLAG_wasm_trap_handler_fallback) {
thrower_->LinkError(
"Provided memory is lacking guard regions but fallback was "
"disabled.");
return {};
}
TRACE("Recompiling module without bounds checks\n");
ErrorThrower thrower(isolate_, "recompile");
auto native_module = module_object_->native_module();
CompileNativeModuleWithExplicitBoundsChecks(isolate_, &thrower, module_,
native_module);
if (thrower.error()) {
return {};
}
DCHECK(!native_module->use_trap_handler());
}
}
//-------------------------------------------------------------------------- //--------------------------------------------------------------------------
// Create the WebAssembly.Instance object. // Create the WebAssembly.Instance object.
//-------------------------------------------------------------------------- //--------------------------------------------------------------------------
NativeModule* native_module = module_object_->native_module();
TRACE("New module instantiation for %p\n", native_module); TRACE("New module instantiation for %p\n", native_module);
Handle<WasmInstanceObject> instance = Handle<WasmInstanceObject> instance =
WasmInstanceObject::New(isolate_, module_object_); WasmInstanceObject::New(isolate_, module_object_);
......
...@@ -1057,20 +1057,6 @@ uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot( ...@@ -1057,20 +1057,6 @@ uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(
return module_->num_imported_functions + slot_idx; return module_->num_imported_functions + slot_idx;
} }
void NativeModule::DisableTrapHandler() {
// Switch {use_trap_handler_} from true to false.
DCHECK(use_trap_handler_);
use_trap_handler_ = kNoTrapHandler;
// Clear the code table (just to increase the chances to hit an error if we
// forget to re-add all code).
uint32_t num_wasm_functions = module_->num_declared_functions;
memset(code_table_.get(), 0, num_wasm_functions * sizeof(WasmCode*));
// TODO(clemensh): Actually free the owned code, such that the memory can be
// recycled.
}
const char* NativeModule::GetRuntimeStubName(Address runtime_stub_entry) const { const char* NativeModule::GetRuntimeStubName(Address runtime_stub_entry) const {
#define RETURN_NAME(Name) \ #define RETURN_NAME(Name) \
if (runtime_stub_entries_[WasmCode::k##Name] == runtime_stub_entry) { \ if (runtime_stub_entries_[WasmCode::k##Name] == runtime_stub_entry) { \
...@@ -1148,10 +1134,7 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) { ...@@ -1148,10 +1134,7 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator(); v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
DCHECK_GT(size, 0); DCHECK_GT(size, 0);
size = RoundUp(size, page_allocator->AllocatePageSize()); size = RoundUp(size, page_allocator->AllocatePageSize());
if (!memory_tracker_->ReserveAddressSpace(size, if (!memory_tracker_->ReserveAddressSpace(size)) return {};
WasmMemoryTracker::kHardLimit)) {
return {};
}
if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr(); if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
VirtualMemory mem(page_allocator, size, hint, VirtualMemory mem(page_allocator, size, hint,
......
...@@ -342,14 +342,6 @@ class V8_EXPORT_PRIVATE NativeModule final { ...@@ -342,14 +342,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
return jump_table_->contains(address); return jump_table_->contains(address);
} }
// Transition this module from code relying on trap handlers (i.e. without
// explicit memory bounds checks) to code that does not require trap handlers
// (i.e. code with explicit bounds checks).
// This method must only be called if {use_trap_handler()} is true (it will be
// false afterwards). All code in this {NativeModule} needs to be re-added
// after calling this method.
void DisableTrapHandler();
// Returns the target to call for the given function (returns a jump table // Returns the target to call for the given function (returns a jump table
// slot within {jump_table_}). // slot within {jump_table_}).
Address GetCallTargetForFunction(uint32_t func_index) const; Address GetCallTargetForFunction(uint32_t func_index) const;
......
...@@ -27,16 +27,6 @@ void AddAllocationStatusSample(Isolate* isolate, ...@@ -27,16 +27,6 @@ void AddAllocationStatusSample(Isolate* isolate,
static_cast<int>(status)); static_cast<int>(status));
} }
size_t GetAllocationLength(uint32_t size, bool require_full_guard_regions) {
if (require_full_guard_regions) {
return RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize());
} else {
return RoundUp(
base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size)),
kWasmPageSize);
}
}
bool RunWithGCAndRetry(const std::function<bool()>& fn, Heap* heap, bool RunWithGCAndRetry(const std::function<bool()>& fn, Heap* heap,
bool* did_retry) { bool* did_retry) {
// Try up to three times; getting rid of dead JSArrayBuffer allocations might // Try up to three times; getting rid of dead JSArrayBuffer allocations might
...@@ -60,15 +50,13 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap, ...@@ -60,15 +50,13 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
size_t* allocation_length) { size_t* allocation_length) {
using AllocationStatus = WasmMemoryTracker::AllocationStatus; using AllocationStatus = WasmMemoryTracker::AllocationStatus;
#if V8_TARGET_ARCH_64_BIT #if V8_TARGET_ARCH_64_BIT
bool require_full_guard_regions = true; constexpr bool kRequireFullGuardRegions = true;
#else #else
bool require_full_guard_regions = false; constexpr bool kRequireFullGuardRegions = false;
#endif #endif
// Let the WasmMemoryTracker know we are going to reserve a bunch of // Let the WasmMemoryTracker know we are going to reserve a bunch of
// address space. // address space.
// TODO(7881): do not use static_cast<uint32_t>() here size_t reservation_size = std::max(max_size, size);
uint32_t reservation_size =
static_cast<uint32_t>((max_size > size) ? max_size : size);
bool did_retry = false; bool did_retry = false;
auto reserve_memory_space = [&] { auto reserve_memory_space = [&] {
...@@ -79,40 +67,32 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap, ...@@ -79,40 +67,32 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
// To protect against 32-bit integer overflow issues, we also // To protect against 32-bit integer overflow issues, we also
// protect the 2GiB before the valid part of the memory buffer. // protect the 2GiB before the valid part of the memory buffer.
*allocation_length = *allocation_length =
GetAllocationLength(reservation_size, require_full_guard_regions); kRequireFullGuardRegions
? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize())
: RoundUp(base::bits::RoundUpToPowerOfTwo(reservation_size),
kWasmPageSize);
DCHECK_GE(*allocation_length, size); DCHECK_GE(*allocation_length, size);
DCHECK_GE(*allocation_length, kWasmPageSize); DCHECK_GE(*allocation_length, kWasmPageSize);
auto limit = require_full_guard_regions ? WasmMemoryTracker::kSoftLimit return memory_tracker->ReserveAddressSpace(*allocation_length);
: WasmMemoryTracker::kHardLimit;
return memory_tracker->ReserveAddressSpace(*allocation_length, limit);
}; };
if (!RunWithGCAndRetry(reserve_memory_space, heap, &did_retry)) { if (!RunWithGCAndRetry(reserve_memory_space, heap, &did_retry)) {
// Reset reservation_size to initial size so that at least the initial size // Reset reservation_size to initial size so that at least the initial size
// can be allocated if maximum size reservation is not possible. // can be allocated if maximum size reservation is not possible.
reservation_size = static_cast<uint32_t>(size); reservation_size = size;
// If we fail to allocate guard regions and the fallback is enabled, then // We are over the address space limit. Fail.
// retry without full guard regions. //
bool fail = true; // When running under the correctness fuzzer (i.e.
if (require_full_guard_regions && FLAG_wasm_trap_handler_fallback) { // --abort-on-stack-or-string-length-overflow is preset), we crash
require_full_guard_regions = false; // instead so it is not incorrectly reported as a correctness
fail = !RunWithGCAndRetry(reserve_memory_space, heap, &did_retry); // violation. See https://crbug.com/828293#c4
} if (FLAG_abort_on_stack_or_string_length_overflow) {
if (fail) { FATAL("could not allocate wasm memory");
// We are over the address space limit. Fail.
//
// When running under the correctness fuzzer (i.e.
// --abort-on-stack-or-string-length-overflow is preset), we crash
// instead so it is not incorrectly reported as a correctness
// violation. See https://crbug.com/828293#c4
if (FLAG_abort_on_stack_or_string_length_overflow) {
FATAL("could not allocate wasm memory");
}
AddAllocationStatusSample(
heap->isolate(), AllocationStatus::kAddressSpaceLimitReachedFailure);
return nullptr;
} }
AddAllocationStatusSample(
heap->isolate(), AllocationStatus::kAddressSpaceLimitReachedFailure);
return nullptr;
} }
// The Reserve makes the whole region inaccessible by default. // The Reserve makes the whole region inaccessible by default.
...@@ -130,7 +110,7 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap, ...@@ -130,7 +110,7 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
} }
byte* memory = reinterpret_cast<byte*>(*allocation_base); byte* memory = reinterpret_cast<byte*>(*allocation_base);
if (require_full_guard_regions) { if (kRequireFullGuardRegions) {
memory += kNegativeGuardSize; memory += kNegativeGuardSize;
} }
...@@ -157,14 +137,11 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap, ...@@ -157,14 +137,11 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
#if V8_TARGET_ARCH_MIPS64 #if V8_TARGET_ARCH_MIPS64
// MIPS64 has a user space of 2^40 bytes on most processors, // MIPS64 has a user space of 2^40 bytes on most processors,
// address space limits needs to be smaller. // address space limits needs to be smaller.
constexpr size_t kAddressSpaceSoftLimit = 0x2100000000L; // 132 GiB constexpr size_t kAddressSpaceLimit = 0x4000000000L; // 256 GiB
constexpr size_t kAddressSpaceHardLimit = 0x4000000000L; // 256 GiB
#elif V8_TARGET_ARCH_64_BIT #elif V8_TARGET_ARCH_64_BIT
constexpr size_t kAddressSpaceSoftLimit = 0x6000000000L; // 384 GiB constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
constexpr size_t kAddressSpaceHardLimit = 0x10100000000L; // 1 TiB + 4 GiB
#else #else
constexpr size_t kAddressSpaceSoftLimit = 0x90000000; // 2 GiB + 256 MiB constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB
constexpr size_t kAddressSpaceHardLimit = 0xC0000000; // 3 GiB
#endif #endif
} // namespace } // namespace
...@@ -192,10 +169,8 @@ void WasmMemoryTracker::FreeBackingStoreForTesting(base::AddressRegion memory, ...@@ -192,10 +169,8 @@ void WasmMemoryTracker::FreeBackingStoreForTesting(base::AddressRegion memory,
reinterpret_cast<void*>(memory.begin()), memory.size())); reinterpret_cast<void*>(memory.begin()), memory.size()));
} }
bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes, bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
ReservationLimit limit) { size_t reservation_limit = kAddressSpaceLimit;
size_t reservation_limit =
limit == kSoftLimit ? kAddressSpaceSoftLimit : kAddressSpaceHardLimit;
while (true) { while (true) {
size_t old_count = reserved_address_space_.load(); size_t old_count = reserved_address_space_.load();
if (old_count > reservation_limit) return false; if (old_count > reservation_limit) return false;
...@@ -269,21 +244,6 @@ bool WasmMemoryTracker::IsWasmSharedMemory(const void* buffer_start) { ...@@ -269,21 +244,6 @@ bool WasmMemoryTracker::IsWasmSharedMemory(const void* buffer_start) {
return (result != allocations_.end() && result->second.is_shared); return (result != allocations_.end() && result->second.is_shared);
} }
bool WasmMemoryTracker::HasFullGuardRegions(const void* buffer_start) {
base::MutexGuard scope_lock(&mutex_);
const auto allocation = allocations_.find(buffer_start);
if (allocation == allocations_.end()) {
return false;
}
Address start = reinterpret_cast<Address>(buffer_start);
Address limit =
reinterpret_cast<Address>(allocation->second.allocation_base) +
allocation->second.allocation_length;
return start + kWasmMaxHeapOffset < limit;
}
void WasmMemoryTracker::MarkWasmMemoryNotGrowable( void WasmMemoryTracker::MarkWasmMemoryNotGrowable(
Handle<JSArrayBuffer> buffer) { Handle<JSArrayBuffer> buffer) {
base::MutexGuard scope_lock(&mutex_); base::MutexGuard scope_lock(&mutex_);
......
...@@ -31,10 +31,7 @@ class WasmMemoryTracker { ...@@ -31,10 +31,7 @@ class WasmMemoryTracker {
// ReserveAddressSpace attempts to increase the reserved address space counter // ReserveAddressSpace attempts to increase the reserved address space counter
// by {num_bytes}. Returns true if successful (meaning it is okay to go ahead // by {num_bytes}. Returns true if successful (meaning it is okay to go ahead
// and reserve {num_bytes} bytes), false otherwise. // and reserve {num_bytes} bytes), false otherwise.
// Use {kSoftLimit} if you can implement a fallback which needs less reserved bool ReserveAddressSpace(size_t num_bytes);
// memory.
enum ReservationLimit { kSoftLimit, kHardLimit };
bool ReserveAddressSpace(size_t num_bytes, ReservationLimit limit);
void RegisterAllocation(Isolate* isolate, void* allocation_base, void RegisterAllocation(Isolate* isolate, void* allocation_base,
size_t allocation_length, void* buffer_start, size_t allocation_length, void* buffer_start,
...@@ -105,10 +102,6 @@ class WasmMemoryTracker { ...@@ -105,10 +102,6 @@ class WasmMemoryTracker {
bool IsWasmSharedMemory(const void* buffer_start); bool IsWasmSharedMemory(const void* buffer_start);
// Returns whether the given buffer is a Wasm memory with guard regions large
// enough to safely use trap handlers.
bool HasFullGuardRegions(const void* buffer_start);
// Returns a pointer to a Wasm buffer's allocation data, or nullptr if the // Returns a pointer to a Wasm buffer's allocation data, or nullptr if the
// buffer is not tracked. // buffer is not tracked.
V8_EXPORT_PRIVATE const AllocationData* FindAllocationData( V8_EXPORT_PRIVATE const AllocationData* FindAllocationData(
......
...@@ -1092,16 +1092,7 @@ MaybeHandle<JSArrayBuffer> MemoryGrowBuffer(Isolate* isolate, ...@@ -1092,16 +1092,7 @@ MaybeHandle<JSArrayBuffer> MemoryGrowBuffer(Isolate* isolate,
if (!wasm::NewArrayBuffer(isolate, new_size).ToHandle(&new_buffer)) { if (!wasm::NewArrayBuffer(isolate, new_size).ToHandle(&new_buffer)) {
return {}; return {};
} }
wasm::WasmMemoryTracker* const memory_tracker =
isolate->wasm_engine()->memory_tracker();
// If the old buffer had full guard regions, we can only safely use the new
// buffer if it also has full guard regions. Otherwise, we'd have to
// recompile all the instances using this memory to insert bounds checks.
void* old_mem_start = old_buffer->backing_store(); void* old_mem_start = old_buffer->backing_store();
if (memory_tracker->HasFullGuardRegions(old_mem_start) &&
!memory_tracker->HasFullGuardRegions(new_buffer->backing_store())) {
return {};
}
size_t old_size = old_buffer->byte_length(); size_t old_size = old_buffer->byte_length();
if (old_size == 0) return new_buffer; if (old_size == 0) return new_buffer;
memcpy(new_buffer->backing_store(), old_mem_start, old_size); memcpy(new_buffer->backing_store(), old_mem_start, old_size);
...@@ -1192,30 +1183,6 @@ MaybeHandle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate, ...@@ -1192,30 +1183,6 @@ MaybeHandle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
return New(isolate, buffer, maximum); return New(isolate, buffer, maximum);
} }
bool WasmMemoryObject::has_full_guard_region(Isolate* isolate) {
const wasm::WasmMemoryTracker::AllocationData* allocation =
isolate->wasm_engine()->memory_tracker()->FindAllocationData(
array_buffer()->backing_store());
CHECK_NOT_NULL(allocation);
Address allocation_base =
reinterpret_cast<Address>(allocation->allocation_base);
Address buffer_start = reinterpret_cast<Address>(allocation->buffer_start);
// Return whether the allocation covers every possible Wasm heap index.
//
// We always have the following relationship:
// allocation_base <= buffer_start <= buffer_start + memory_size <=
// allocation_base + allocation_length
// (in other words, the buffer fits within the allocation)
//
// The space between buffer_start + memory_size and allocation_base +
// allocation_length is the guard region. Here we make sure the guard region
// is large enough for any Wasm heap offset.
return buffer_start + wasm::kWasmMaxHeapOffset <=
allocation_base + allocation->allocation_length;
}
void WasmMemoryObject::AddInstance(Isolate* isolate, void WasmMemoryObject::AddInstance(Isolate* isolate,
Handle<WasmMemoryObject> memory, Handle<WasmMemoryObject> memory,
Handle<WasmInstanceObject> instance) { Handle<WasmInstanceObject> instance) {
......
...@@ -335,10 +335,6 @@ class WasmMemoryObject : public JSObject { ...@@ -335,10 +335,6 @@ class WasmMemoryObject : public JSObject {
Handle<WasmInstanceObject> object); Handle<WasmInstanceObject> object);
inline bool has_maximum_pages(); inline bool has_maximum_pages();
// Return whether the underlying backing store has guard regions large enough
// to be used with trap handlers.
bool has_full_guard_region(Isolate* isolate);
V8_EXPORT_PRIVATE static Handle<WasmMemoryObject> New( V8_EXPORT_PRIVATE static Handle<WasmMemoryObject> New(
Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, uint32_t maximum); Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, uint32_t maximum);
......
...@@ -235,9 +235,6 @@ ...@@ -235,9 +235,6 @@
# BUG(v8:8169) # BUG(v8:8169)
'external-backing-store-gc': [SKIP], 'external-backing-store-gc': [SKIP],
# BUG(v8:8746)
'wasm/trap-handler-fallback': [PASS, FAIL],
}], # ALWAYS }], # ALWAYS
['novfp3 == True', { ['novfp3 == True', {
......
...@@ -178,7 +178,7 @@ function testAddressSpaceLimit() { ...@@ -178,7 +178,7 @@ function testAddressSpaceLimit() {
// 1TiB + 4 GiB, see wasm-memory.h // 1TiB + 4 GiB, see wasm-memory.h
const kMaxAddressSpace = 1 * 1024 * 1024 * 1024 * 1024 const kMaxAddressSpace = 1 * 1024 * 1024 * 1024 * 1024
+ 4 * 1024 * 1024 * 1024; + 4 * 1024 * 1024 * 1024;
const kAddressSpacePerMemory = 8 * 1024 * 1024 * 1024; const kAddressSpacePerMemory = 10 * 1024 * 1024 * 1024;
let last_memory; let last_memory;
try { try {
...@@ -193,10 +193,7 @@ function testAddressSpaceLimit() { ...@@ -193,10 +193,7 @@ function testAddressSpaceLimit() {
assertTrue(e instanceof RangeError); assertTrue(e instanceof RangeError);
return; return;
} }
// If we get here it's because our fallback behavior is working. We may not assertUnreachable("should have reached the address space limit");
// be using the fallback, in which case we would have thrown a RangeError in
// the previous block.
assertTrue(!%WasmMemoryHasFullGuardRegion(last_memory));
} }
if(%IsWasmTrapHandlerEnabled()) { if(%IsWasmTrapHandlerEnabled()) {
......
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --wasm-trap-handler-fallback
load("test/mjsunit/wasm/wasm-module-builder.js");
// Make sure we can get at least one guard region if the trap handler is enabled.
(function CanGetGuardRegionTest() {
print("CanGetGuardRegionTest()");
const memory = new WebAssembly.Memory({initial: 1});
if (%IsWasmTrapHandlerEnabled()) {
assertTrue(%WasmMemoryHasFullGuardRegion(memory));
}
})();
// This test verifies that when we have too many outstanding memories to get
// another fast memory, we fall back on bounds checking rather than failing.
(function TrapHandlerFallbackTest() {
print("TrapHandlerFallbackTest()");
let builder = new WasmModuleBuilder();
builder.addImportedMemory("mod", "imported_mem", 1);
builder.addFunction("load", kSig_i_i)
.addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
.exportFunc();
let memory;
let instance;
let instances = [];
let fallback_occurred = false;
// Create 135 instances. V8 limits wasm to slightly more than 1 TiB of address
// space per isolate (see kAddressSpaceLimit in wasm-memory.cc), which allows
// up to 128 fast memories. As long as we create more than that, we should
// trigger the fallback behavior.
for (var i = 0; i < 135 && !fallback_occurred; i++) {
memory = new WebAssembly.Memory({initial: 1});
instance = builder.instantiate({mod: {imported_mem: memory}});
instances.push(instance);
assertTraps(kTrapMemOutOfBounds, () => instance.exports.load(1 << 20));
fallback_occurred = !%WasmMemoryHasFullGuardRegion(memory);
}
assertTrue(fallback_occurred);
})();
(function TrapHandlerFallbackTestZeroInitialMemory() {
print("TrapHandlerFallbackTestZeroInitialMemory()");
let builder = new WasmModuleBuilder();
builder.addImportedMemory("mod", "imported_mem", 0);
builder.addFunction("load", kSig_i_i)
.addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
.exportFunc();
let memory;
let instance;
let instances = [];
let fallback_occurred = false;
// Create 135 instances. V8 limits wasm to slightly more than 1 TiB of address
// space per isolate (see kAddressSpaceLimit in wasm-memory.cc), which allows
// up to 128 fast memories. As long as we create more than that, we should
// trigger the fallback behavior.
for (var i = 0; i < 135 && !fallback_occurred; i++) {
memory = new WebAssembly.Memory({initial: 1});
instance = builder.instantiate({mod: {imported_mem: memory}});
instances.push(instance);
assertTraps(kTrapMemOutOfBounds, () => instance.exports.load(1 << 20));
fallback_occurred = !%WasmMemoryHasFullGuardRegion(memory);
}
assertTrue(fallback_occurred);
})();
(function TrapHandlerFallbackTestGrowFromZero() {
print("TrapHandlerFallbackTestGrowFromZero()");
// Create a zero-length memory to make sure the empty backing store is created.
const zero_memory = new WebAssembly.Memory({initial: 0});
// Create enough memories to overflow the address space limit
let memories = []
for (var i = 0; i < 135; i++) {
memories.push(new WebAssembly.Memory({initial: 1}));
}
// Create a memory for the module. We'll grow this later.
let memory = new WebAssembly.Memory({initial: 0});
let builder = new WasmModuleBuilder();
builder.addImportedMemory("mod", "imported_mem", 0);
builder.addFunction("load", kSig_i_i)
.addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
.exportFunc();
instance = builder.instantiate({mod: {imported_mem: memory}});
assertTraps(kTrapMemOutOfBounds, () => instance.exports.load(1 << 20));
try {
memory.grow(1);
} catch(e) {
if (typeof e == typeof new RangeError) {
return;
}
throw e;
}
assertTraps(kTrapMemOutOfBounds, () => instance.exports.load(1 << 20));
})();
// Like TrapHandlerFallbackTest, but allows the module to be reused, so we only
// have to recompile once.
(function TrapHandlerFallbackTestReuseModule() {
print("TrapHandlerFallbackTestReuseModule()");
let builder = new WasmModuleBuilder();
builder.addImportedMemory("mod", "imported_mem", 1);
builder.addFunction("load", kSig_i_i)
.addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
.exportFunc();
let memory;
let instance;
let instances = [];
let fallback_occurred = false;
// Create 135 instances. V8 limits wasm to slightly more than 1 TiB of address
// space per isolate (see kAddressSpaceLimit in wasm-memory.cc), which allows
// up to 128 fast memories. As long as we create more than that, we should
// trigger the fallback behavior.
const module = builder.toModule();
for (var i = 0; i < 135 && !fallback_occurred; i++) {
memory = new WebAssembly.Memory({initial: 1});
instance = new WebAssembly.Instance(module, {mod: {imported_mem: memory}});
instances.push(instance);
assertTraps(kTrapMemOutOfBounds, () => instance.exports.load(1 << 20));
fallback_occurred = !%WasmMemoryHasFullGuardRegion(memory);
}
assertTrue(fallback_occurred);
})();
// Make sure that a bounds checked instance still works when calling an
// imported unchecked function.
(function CallIndirectImportTest() {
print("CallIndirectImportTest()");
// Create an unchecked instance that calls a function through an indirect
// table.
const instance_a = (() => {
const builder = new WasmModuleBuilder();
builder.addMemory(1, 1, false);
builder.addFunction("read_mem", kSig_i_i)
.addBody([
kExprGetLocal, 0,
kExprI32LoadMem, 0, 0
]).exportAs("read_mem");
return builder.instantiate();
})();
// Create new memories until we get one that is unguarded
let memories = [];
let memory;
for (var i = 0; i < 135; i++) {
memory = new WebAssembly.Memory({initial: 1});
memories.push(memory);
if (!%WasmMemoryHasFullGuardRegion(memory)) {
break;
}
}
assertFalse(%WasmMemoryHasFullGuardRegion(memory));
// create a module that imports a function through a table
const instance_b = (() => {
const builder = new WasmModuleBuilder();
builder.addFunction("main", kSig_i_i)
.addBody([
kExprGetLocal, 0,
kExprI32Const, 0,
kExprCallIndirect, 0, kTableZero
]).exportAs("main");
builder.addImportedTable("env", "table", 1, 1);
const module = new WebAssembly.Module(builder.toBuffer());
const table = new WebAssembly.Table({
element: "anyfunc",
initial: 1, maximum: 1
});
// Hook the new instance's export into the old instance's table.
table.set(0, instance_a.exports.read_mem);
return new WebAssembly.Instance(module, {'env': { 'table': table }});
})();
// Make sure we get an out of bounds still.
assertTraps(kTrapMemOutOfBounds, () => instance_b.exports.main(100000));
})();
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment