Commit eaed31c5 authored by eholk's avatar eholk Committed by Commit bot

[wasm] Add guard regions to end of WebAssembly.Memory buffers

With this change, WebAssembly.Memory objects have backing stores allocated as an
8GB region where everything beyond the size of the Wasm heap is inaccessible.

GrowMemory is now implemented by changing the protection on the guard regions to
make the new portions of the heap accessible.

Guard pages are not enabled by default, but this change adds a flag and a test
variant to make sure we get test coverage on them.

BUG= https://bugs.chromium.org/p/v8/issues/detail?id=5277

Review-Url: https://codereview.chromium.org/2396433008
Cr-Commit-Position: refs/heads/master@{#41089}
parent 932a865e
......@@ -49,21 +49,19 @@
namespace v8 {
namespace base {
#ifdef __arm__
bool OS::ArmUsingHardFloat() {
// GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
// the Floating Point ABI used (PCS stands for Procedure Call Standard).
// We use these as well as a couple of other defines to statically determine
// what FP ABI used.
// GCC versions 4.4 and below don't support hard-fp.
// GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
// __ARM_PCS_VFP.
#define GCC_VERSION (__GNUC__ * 10000 \
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
// GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
// the Floating Point ABI used (PCS stands for Procedure Call Standard).
// We use these as well as a couple of other defines to statically determine
// what FP ABI used.
// GCC versions 4.4 and below don't support hard-fp.
// GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
// __ARM_PCS_VFP.
#define GCC_VERSION \
(__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
#if GCC_VERSION >= 40600 && !defined(__clang__)
#if defined(__ARM_PCS_VFP)
return true;
......@@ -81,7 +79,8 @@ bool OS::ArmUsingHardFloat() {
!defined(__VFP_FP__)
return false;
#else
#error "Your version of compiler does not report the FP ABI compiled for." \
#error \
"Your version of compiler does not report the FP ABI compiled for." \
"Please report it on this issue" \
"http://code.google.com/p/v8/issues/detail?id=2140"
......@@ -92,17 +91,15 @@ bool OS::ArmUsingHardFloat() {
#endif // def __arm__
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
time_t tv = static_cast<time_t>(std::floor(time / msPerSecond));
struct tm tm;
struct tm* t = localtime_r(&tv, &tm);
if (!t || !t->tm_zone) return "";
return t->tm_zone;
}
double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
struct tm tm;
......@@ -112,9 +109,7 @@ double OS::LocalTimeOffset(TimezoneCache* cache) {
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
void* OS::Allocate(const size_t requested,
size_t* allocated,
void* OS::Allocate(const size_t requested, size_t* allocated,
bool is_executable) {
const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
......@@ -125,7 +120,6 @@ void* OS::Allocate(const size_t requested,
return mbase;
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
// This function assumes that the layout of the file is as follows:
......@@ -169,8 +163,8 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
lib_name[strlen(lib_name) - 1] = '\0';
} else {
// No library name found, just record the raw address range.
snprintf(lib_name, kLibNameLen,
"%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
snprintf(lib_name, kLibNameLen, "%08" V8PRIxPTR "-%08" V8PRIxPTR, start,
end);
}
result.push_back(SharedLibraryAddress(lib_name, start, end));
} else {
......@@ -187,7 +181,6 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return result;
}
void OS::SignalCodeMovingGC() {
// Support for ll_prof.py.
//
......@@ -203,38 +196,30 @@ void OS::SignalCodeMovingGC() {
OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
OS::Abort();
}
void* addr = mmap(OS::GetRandomMmapAddr(), size,
PROT_READ | PROT_EXEC,
void* addr = mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_EXEC,
MAP_PRIVATE, fileno(f), 0);
DCHECK_NE(MAP_FAILED, addr);
OS::Free(addr, size);
fclose(f);
}
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) {}
VirtualMemory::VirtualMemory(size_t size)
: address_(ReserveRegion(size)), size_(size) { }
: address_(ReserveRegion(size)), size_(size) {}
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
: address_(NULL), size_(0) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* reservation = mmap(OS::GetRandomMmapAddr(),
request_size,
PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd,
kMmapFdOffset);
size_t request_size =
RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
void* reservation =
mmap(OS::GetRandomMmapAddr(), request_size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
if (reservation == MAP_FAILED) return;
uint8_t* base = static_cast<uint8_t*>(reservation);
......@@ -266,7 +251,6 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
#endif
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = ReleaseRegion(address(), size());
......@@ -275,44 +259,33 @@ VirtualMemory::~VirtualMemory() {
}
}
bool VirtualMemory::IsReserved() {
return address_ != NULL;
}
bool VirtualMemory::IsReserved() { return address_ != NULL; }
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
CHECK(InVM(address, size));
return CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
CHECK(InVM(address, size));
return UncommitRegion(address, size);
}
bool VirtualMemory::Guard(void* address) {
CHECK(InVM(address, OS::CommitPageSize()));
OS::Guard(address, OS::CommitPageSize());
return true;
}
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(OS::GetRandomMmapAddr(),
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd,
kMmapFdOffset);
void* result =
mmap(OS::GetRandomMmapAddr(), size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
......@@ -322,14 +295,10 @@ void* VirtualMemory::ReserveRegion(size_t size) {
return result;
}
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(base,
size,
prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
kMmapFd,
if (MAP_FAILED == mmap(base, size, prot,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
kMmapFdOffset)) {
return false;
}
......@@ -337,13 +306,9 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
return true;
}
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return mmap(base,
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
kMmapFd,
return mmap(base, size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}
......@@ -363,10 +328,7 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return munmap(base, size) == 0;
}
bool VirtualMemory::HasLazyCommits() {
return true;
}
bool VirtualMemory::HasLazyCommits() { return true; }
} // namespace base
} // namespace v8
......@@ -99,6 +99,20 @@ intptr_t OS::CommitPageSize() {
return page_size;
}
void* OS::AllocateGuarded(const size_t requested) {
size_t allocated = 0;
const bool is_executable = false;
void* mbase = OS::Allocate(requested, &allocated, is_executable);
if (allocated != requested) {
OS::Free(mbase, allocated);
return nullptr;
}
if (mbase == nullptr) {
return nullptr;
}
OS::Guard(mbase, requested);
return mbase;
}
void OS::Free(void* address, const size_t size) {
// TODO(1240712): munmap has a return value which is ignored here.
......@@ -129,6 +143,15 @@ void OS::Guard(void* address, const size_t size) {
#endif
}
// Make a region of memory readable and writable.
void OS::Unprotect(void* address, const size_t size) {
#if V8_OS_CYGWIN
DWORD oldprotect;
VirtualProtect(address, size, PAGE_READWRITE, &oldprotect);
#else
mprotect(address, size, PROT_READ | PROT_WRITE);
#endif
}
static LazyInstance<RandomNumberGenerator>::type
platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
......
......@@ -797,6 +797,9 @@ void* OS::Allocate(const size_t requested,
return mbase;
}
void* OS::AllocateGuarded(const size_t requested) {
return VirtualAlloc(nullptr, requested, MEM_RESERVE, PAGE_NOACCESS);
}
void OS::Free(void* address, const size_t size) {
// TODO(1240712): VirtualFree has a return value which is ignored here.
......@@ -821,6 +824,10 @@ void OS::Guard(void* address, const size_t size) {
VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
}
void OS::Unprotect(void* address, const size_t size) {
LPVOID result = VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE);
DCHECK_IMPLIES(result != nullptr, GetLastError() == 0);
}
void OS::Sleep(TimeDelta interval) {
::Sleep(static_cast<DWORD>(interval.InMilliseconds()));
......
......@@ -178,6 +178,11 @@ class V8_BASE_EXPORT OS {
bool is_executable);
static void Free(void* address, const size_t size);
// Allocates a region of memory that is inaccessible. On Windows this reserves
// but does not commit the memory. On Linux, it is equivalent to a call to
// Allocate() followed by Guard().
static void* AllocateGuarded(const size_t requested);
// This is the granularity at which the ProtectCode(...) call can set page
// permissions.
static intptr_t CommitPageSize();
......@@ -188,6 +193,9 @@ class V8_BASE_EXPORT OS {
// Assign memory as a guard page so that access will cause an exception.
static void Guard(void* address, const size_t size);
// Make a region of memory readable and writable.
static void Unprotect(void* address, const size_t size);
// Generate a random address to be used for hinting mmap().
static void* GetRandomMmapAddr();
......
......@@ -2964,6 +2964,7 @@ Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
if (aligned ||
jsgraph()->machine()->UnalignedLoadSupported(memtype, alignment)) {
if (FLAG_wasm_trap_handler) {
DCHECK(FLAG_wasm_guard_pages);
Node* context = HeapConstant(module_->instance->context);
Node* position_node = jsgraph()->Int32Constant(position);
load = graph()->NewNode(jsgraph()->machine()->ProtectedLoad(memtype),
......
......@@ -529,7 +529,11 @@ DEFINE_BOOL(wasm_no_stack_checks, false,
DEFINE_BOOL(wasm_trap_handler, false,
"use signal handlers to catch out of bounds memory access in wasm"
" (currently Linux x86_64 only)")
" (experimental, currently Linux x86_64 only)")
DEFINE_BOOL(wasm_guard_pages, false,
"add guard pages to the end of WebWassembly memory"
" (experimental, no effect on 32-bit)")
DEFINE_IMPLICATION(wasm_trap_handler, wasm_guard_pages)
// Profiler flags.
DEFINE_INT(frame_count, 1, "number of stack frames inspected by the profiler")
......
......@@ -6978,6 +6978,7 @@ bool JSArrayBuffer::is_external() { return IsExternal::decode(bit_field()); }
void JSArrayBuffer::set_is_external(bool value) {
DCHECK(!value || !has_guard_region());
set_bit_field(IsExternal::update(bit_field(), value));
}
......@@ -7007,6 +7008,13 @@ void JSArrayBuffer::set_is_shared(bool value) {
set_bit_field(IsShared::update(bit_field(), value));
}
bool JSArrayBuffer::has_guard_region() {
return HasGuardRegion::decode(bit_field());
}
void JSArrayBuffer::set_has_guard_region(bool value) {
set_bit_field(HasGuardRegion::update(bit_field(), value));
}
Object* JSArrayBufferView::byte_offset() const {
if (WasNeutered()) return Smi::kZero;
......
......@@ -11007,6 +11007,9 @@ class JSArrayBuffer: public JSObject {
inline bool is_shared();
inline void set_is_shared(bool value);
inline bool has_guard_region();
inline void set_has_guard_region(bool value);
DECLARE_CAST(JSArrayBuffer)
void Neuter();
......@@ -11046,6 +11049,7 @@ class JSArrayBuffer: public JSObject {
class IsNeuterable : public BitField<bool, 2, 1> {};
class WasNeutered : public BitField<bool, 3, 1> {};
class IsShared : public BitField<bool, 4, 1> {};
class HasGuardRegion : public BitField<bool, 5, 1> {};
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBuffer);
......
......@@ -364,11 +364,10 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
}
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::JSArrayBuffer> buffer =
i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
size_t size = static_cast<size_t>(i::wasm::WasmModule::kPageSize) *
static_cast<size_t>(initial);
i::JSArrayBuffer::SetupAllocatingData(buffer, i_isolate, size);
i::Handle<i::JSArrayBuffer> buffer =
i::wasm::NewArrayBuffer(i_isolate, size, i::FLAG_wasm_guard_pages);
i::Handle<i::JSObject> memory_obj = i::WasmMemoryObject::New(
i_isolate, buffer, has_maximum.FromJust() ? maximum : -1);
......
......@@ -70,28 +70,65 @@ void ReplaceReferenceInCode(Handle<Code> code, Handle<Object> old_ref,
}
}
Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size) {
if (size > (WasmModule::kV8MaxPages * WasmModule::kPageSize)) {
// TODO(titzer): lift restriction on maximum memory allocated here.
return Handle<JSArrayBuffer>::null();
}
void* memory = isolate->array_buffer_allocator()->Allocate(size);
static void MemoryFinalizer(const v8::WeakCallbackInfo<void>& data) {
JSArrayBuffer** p = reinterpret_cast<JSArrayBuffer**>(data.GetParameter());
JSArrayBuffer* buffer = *p;
void* memory = buffer->backing_store();
base::OS::Free(memory,
RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize()));
data.GetIsolate()->AdjustAmountOfExternalAllocatedMemory(
-buffer->byte_length()->Number());
GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
}
#if V8_TARGET_ARCH_64_BIT
const bool kGuardRegionsSupported = true;
#else
const bool kGuardRegionsSupported = false;
#endif
bool EnableGuardRegions() {
return FLAG_wasm_guard_pages && kGuardRegionsSupported;
}
void* TryAllocateBackingStore(Isolate* isolate, size_t size,
bool enable_guard_regions, bool& is_external) {
is_external = false;
// TODO(eholk): Right now enable_guard_regions has no effect on 32-bit
// systems. It may be safer to fail instead, given that other code might do
// things that would be unsafe if they expected guard pages where there
// weren't any.
if (enable_guard_regions && kGuardRegionsSupported) {
// TODO(eholk): On Windows we want to make sure we don't commit the guard
// pages yet.
// We always allocate the largest possible offset into the heap, so the
// addressable memory after the guard page can be made inaccessible.
const size_t alloc_size =
RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize());
DCHECK_EQ(0u, size % base::OS::CommitPageSize());
// AllocateGuarded makes the whole region inaccessible by default.
void* memory = base::OS::AllocateGuarded(alloc_size);
if (memory == nullptr) {
return Handle<JSArrayBuffer>::null();
return nullptr;
}
#if DEBUG
// Double check the API allocator actually zero-initialized the memory.
const byte* bytes = reinterpret_cast<const byte*>(memory);
for (size_t i = 0; i < size; ++i) {
DCHECK_EQ(0, bytes[i]);
}
#endif
// Make the part we care about accessible.
base::OS::Unprotect(memory, size);
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
JSArrayBuffer::Setup(buffer, isolate, false, memory, static_cast<int>(size));
buffer->set_is_neuterable(false);
return buffer;
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(size);
is_external = true;
return memory;
} else {
void* memory = isolate->array_buffer_allocator()->Allocate(size);
return memory;
}
}
void RelocateMemoryReferencesInCode(Handle<FixedArray> code_table,
......@@ -608,6 +645,51 @@ Vector<const uint8_t> GetFunctionBytes(
} // namespace
Handle<JSArrayBuffer> wasm::NewArrayBuffer(Isolate* isolate, size_t size,
bool enable_guard_regions) {
if (size > (WasmModule::kV8MaxPages * WasmModule::kPageSize)) {
// TODO(titzer): lift restriction on maximum memory allocated here.
return Handle<JSArrayBuffer>::null();
}
enable_guard_regions = enable_guard_regions && kGuardRegionsSupported;
bool is_external; // Set by TryAllocateBackingStore
void* memory =
TryAllocateBackingStore(isolate, size, enable_guard_regions, is_external);
if (memory == nullptr) {
return Handle<JSArrayBuffer>::null();
}
#if DEBUG
// Double check the API allocator actually zero-initialized the memory.
const byte* bytes = reinterpret_cast<const byte*>(memory);
for (size_t i = 0; i < size; ++i) {
DCHECK_EQ(0, bytes[i]);
}
#endif
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
JSArrayBuffer::Setup(buffer, isolate, is_external, memory,
static_cast<int>(size));
buffer->set_is_neuterable(false);
buffer->set_has_guard_region(enable_guard_regions);
if (is_external) {
// We mark the buffer as external if we allocated it here with guard
// pages. That means we need to arrange for it to be freed.
// TODO(eholk): Finalizers may not run when the main thread is shutting
// down, which means we may leak memory here.
Handle<Object> global_handle = isolate->global_handles()->Create(*buffer);
GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
&MemoryFinalizer, v8::WeakCallbackType::kFinalizer);
}
return buffer;
}
const char* wasm::SectionName(WasmSectionCode code) {
switch (code) {
case kUnknownSectionCode:
......@@ -1059,8 +1141,9 @@ class WasmInstanceBuilder {
MaybeHandle<JSArrayBuffer> old_globals;
uint32_t globals_size = module_->globals_size;
if (globals_size > 0) {
const bool enable_guard_regions = false;
Handle<JSArrayBuffer> global_buffer =
NewArrayBuffer(isolate_, globals_size);
NewArrayBuffer(isolate_, globals_size, enable_guard_regions);
globals_ = global_buffer;
if (globals_.is_null()) {
thrower_->RangeError("Out of memory: wasm globals");
......@@ -1109,6 +1192,9 @@ class WasmInstanceBuilder {
if (!memory_.is_null()) {
// Set externally passed ArrayBuffer non neuterable.
memory_->set_is_neuterable(false);
DCHECK_IMPLIES(EnableGuardRegions(), module_->origin == kAsmJsOrigin ||
memory_->has_guard_region());
} else if (min_mem_pages > 0) {
memory_ = AllocateMemory(min_mem_pages);
if (memory_.is_null()) return nothing; // failed to allocate memory
......@@ -1581,8 +1667,9 @@ class WasmInstanceBuilder {
thrower_->RangeError("Out of memory: wasm memory too large");
return Handle<JSArrayBuffer>::null();
}
Handle<JSArrayBuffer> mem_buffer =
NewArrayBuffer(isolate_, min_mem_pages * WasmModule::kPageSize);
const bool enable_guard_regions = EnableGuardRegions();
Handle<JSArrayBuffer> mem_buffer = NewArrayBuffer(
isolate_, min_mem_pages * WasmModule::kPageSize, enable_guard_regions);
if (mem_buffer.is_null()) {
thrower_->RangeError("Out of memory: wasm memory");
......@@ -2097,16 +2184,41 @@ int32_t wasm::GrowInstanceMemory(Isolate* isolate,
WasmModule::kV8MaxPages * WasmModule::kPageSize < new_size) {
return -1;
}
Handle<JSArrayBuffer> buffer = NewArrayBuffer(isolate, new_size);
Handle<JSArrayBuffer> buffer;
if (!old_buffer.is_null() && old_buffer->has_guard_region()) {
// We don't move the backing store, we simply change the protection to make
// more of it accessible.
base::OS::Unprotect(old_buffer->backing_store(), new_size);
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(pages * WasmModule::kPageSize);
Handle<Object> new_size_object =
isolate->factory()->NewNumberFromSize(new_size);
old_buffer->set_byte_length(*new_size_object);
SetInstanceMemory(instance, *old_buffer);
Handle<FixedArray> code_table =
instance->get_compiled_module()->code_table();
RelocateMemoryReferencesInCode(code_table, old_mem_start, old_mem_start,
old_size, new_size);
buffer = old_buffer;
} else {
const bool enable_guard_regions = false;
buffer = NewArrayBuffer(isolate, new_size, enable_guard_regions);
if (buffer.is_null()) return -1;
Address new_mem_start = static_cast<Address>(buffer->backing_store());
if (old_size != 0) {
memcpy(new_mem_start, old_mem_start, old_size);
}
SetInstanceMemory(instance, *buffer);
Handle<FixedArray> code_table = instance->get_compiled_module()->code_table();
Handle<FixedArray> code_table =
instance->get_compiled_module()->code_table();
RelocateMemoryReferencesInCode(code_table, old_mem_start, new_mem_start,
old_size, new_size);
}
SetInstanceMemory(instance, *buffer);
if (instance->has_memory_object()) {
instance->get_memory_object()->set_buffer(*buffer);
}
......
......@@ -41,6 +41,11 @@ const uint32_t kWasmVersion = 0x0d;
const uint8_t kWasmFunctionTypeForm = 0x60;
const uint8_t kWasmAnyFunctionTypeForm = 0x70;
const uint64_t kWasmMaxHeapOffset =
static_cast<uint64_t>(
std::numeric_limits<uint32_t>::max()) // maximum base value
+ std::numeric_limits<uint32_t>::max(); // maximum index value
enum WasmSectionCode {
kUnknownSectionCode = 0, // code for unknown sections
kTypeSectionCode = 1, // Function signature declarations
......@@ -426,6 +431,9 @@ int32_t GetInstanceMemorySize(Isolate* isolate,
int32_t GrowInstanceMemory(Isolate* isolate,
Handle<WasmInstanceObject> instance, uint32_t pages);
Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
bool enable_guard_regions);
void UpdateDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
int index, Handle<JSFunction> js_function);
......
......@@ -402,4 +402,29 @@
'*': [SKIP],
}], # variant == asm_wasm
##############################################################################
['variant == wasm_traps', {
'test-accessors/*': [SKIP],
'test-api-interceptors/*': [SKIP],
'test-api/*': [SKIP],
'test-bignum-dtoa/*': [SKIP],
'test-cpu-profiler/*': [SKIP],
'test-debug/*': [SKIP],
'test-global-handles/*': [SKIP],
'test-heap-profiler/*': [SKIP],
'test-heap/*': [SKIP],
'test-inobject-slack-tracking/*': [SKIP],
'test-lockers/*': [SKIP],
'test-run-machops/*': [SKIP],
'test-serialize/*': [SKIP],
'test-strings/*': [SKIP],
'test-field-type-tracking/*': [SKIP],
'test-parsing/*': [SKIP],
'test-page-promotion/*': [SKIP],
'test-decls/*': [SKIP],
'test-log/*': [SKIP],
'test-gap-resolver/*': [SKIP],
'test-dtoa/*': [SKIP],
}], # variant == wasm_traps
]
......@@ -44,6 +44,15 @@
}], # 'gc_stress == True'
##############################################################################
['variant == asm_wasm', {
'*': [SKIP],
}], # variant == asm_wasm
##############################################################################
['variant == wasm_traps', {
'*': [SKIP],
}], # variant == wasm_traps
['arch == s390 or arch == s390x', {
# Stack manipulations in LiveEdit is not implemented for this arch.
......
......@@ -4,4 +4,14 @@
[
##############################################################################
['variant == asm_wasm', {
'*': [SKIP],
}], # variant == asm_wasm
##############################################################################
['variant == wasm_traps', {
'*': [SKIP],
}], # variant == wasm_traps
]
......@@ -41,4 +41,8 @@
'*': [SKIP],
}], # variant == asm_wasm
['variant == wasm_traps', {
'*': [SKIP],
}], # variant == wasm_traps
]
......@@ -690,4 +690,22 @@
'whitespaces': [SKIP],
}], # variant == asm_wasm
['variant == wasm_traps', {
# Skip stuff uninteresting for wasm traps
'bugs/*': [SKIP],
'compiler/*': [SKIP],
'es6/*': [SKIP],
'es7/*': [SKIP],
'es8/*': [SKIP],
'harmony/*': [SKIP],
'ignition/*': [SKIP],
'lithium/*': [SKIP],
'third_party/*': [SKIP],
'tools/*': [SKIP],
'apply': [SKIP],
'math-*': [SKIP],
'unicode-test': [SKIP],
'whitespaces': [SKIP],
}], # variant == wasm_traps
]
......@@ -999,4 +999,8 @@
'*': [SKIP],
}], # variant == asm_wasm
['variant == wasm_traps', {
'*': [SKIP],
}], # variant == wasm_traps
]
......@@ -592,6 +592,9 @@
'*': [SKIP],
}], # variant == asm_wasm
['variant == wasm_traps', {
'*': [SKIP],
}], # variant == wasm_traps
# Module-related tests
# https://bugs.chromium.org/p/v8/issues/detail?id=1569
......
......@@ -15,4 +15,8 @@
'*': [SKIP],
}], # variant == asm_wasm
['variant == wasm_traps', {
'*': [SKIP],
}], # variant == wasm_traps
]
......@@ -134,5 +134,10 @@
'*': [SKIP],
}], # variant == asm_wasm
##############################################################################
['variant == wasm_traps', {
'*': [SKIP],
}], # variant == wasm_traps
##############################################################################
]
......@@ -111,6 +111,7 @@ MORE_VARIANTS = [
"stress",
"turbofan_opt",
"asm_wasm",
"wasm_traps",
]
EXHAUSTIVE_VARIANTS = VARIANTS + MORE_VARIANTS
......
......@@ -14,6 +14,7 @@ ALL_VARIANT_FLAGS = {
"ignition_turbofan": [["--ignition-staging", "--turbo"]],
"preparser": [["--min-preparse-length=0"]],
"asm_wasm": [["--validate-asm"]],
"wasm_traps": [["--wasm_guard_pages", "--invoke-weak-callbacks"]],
}
# FAST_VARIANTS implies no --always-opt.
......@@ -27,8 +28,9 @@ FAST_VARIANT_FLAGS = {
"ignition_turbofan": [["--ignition-staging", "--turbo"]],
"preparser": [["--min-preparse-length=0"]],
"asm_wasm": [["--validate-asm"]],
"wasm_traps": [["--wasm_guard_pages", "--invoke-weak-callbacks"]],
}
ALL_VARIANTS = set(["default", "stress", "turbofan", "turbofan_opt",
"nocrankshaft", "ignition", "ignition_staging",
"ignition_turbofan", "preparser", "asm_wasm"])
"ignition_turbofan", "preparser", "asm_wasm", "wasm_traps"])
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment