Commit 8860c5f1 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

Reland "[wasm] Move kMaxWasmCodeSpaceSize to wasm directory"

This is a reland of c2ea2047

Original change's description:
> [wasm] Move kMaxWasmCodeSpaceSize to wasm directory
> 
> This limit is wasm-internal, and does not need to be exposed via
> src/common/globals.h.
> This CL moves it into the {WasmCodeAllocator}.
> 
> Drive-by: Minor simplification in jump table stress test.
> 
> R=ecmziegler@chromium.org
> 
> Change-Id: Iff8c4657697ae98123d840a022c5b21c4948fcdf
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2375189
> Reviewed-by: Emanuel Ziegler <ecmziegler@chromium.org>
> Commit-Queue: Clemens Backes <clemensb@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#69558}

Change-Id: I6e0432d14d23978dea599233e620e84d8255caf9
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2375388Reviewed-by: 's avatarEmanuel Ziegler <ecmziegler@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69560}
parent dc3f30ef
...@@ -186,18 +186,9 @@ constexpr int kElidedFrameSlots = 0; ...@@ -186,18 +186,9 @@ constexpr int kElidedFrameSlots = 0;
#endif #endif
constexpr int kDoubleSizeLog2 = 3; constexpr int kDoubleSizeLog2 = 3;
constexpr size_t kMaxWasmCodeMB = 2048; constexpr size_t kMaxWasmCodeMB = 2048;
constexpr size_t kMaxWasmCodeMemory = kMaxWasmCodeMB * MB; constexpr size_t kMaxWasmCodeMemory = kMaxWasmCodeMB * MB;
#if V8_TARGET_ARCH_ARM64
// ARM64 only supports direct calls within a 128 MB range.
constexpr size_t kMaxWasmCodeSpaceSize = 128 * MB;
#else
// Use 1024 MB limit for code spaces on other platforms. This is smaller than
// the total allowed code space (kMaxWasmCodeMemory) to avoid unnecessarily big
// reservations, and to ensure that distances within a code space fit within a
// 32-bit signed integer.
constexpr size_t kMaxWasmCodeSpaceSize = 1024 * MB;
#endif
#if V8_HOST_ARCH_64_BIT #if V8_HOST_ARCH_64_BIT
constexpr int kSystemPointerSizeLog2 = 3; constexpr int kSystemPointerSizeLog2 = 3;
......
...@@ -495,6 +495,9 @@ void WasmCodeAllocator::OptionalLock::Lock(WasmCodeAllocator* allocator) { ...@@ -495,6 +495,9 @@ void WasmCodeAllocator::OptionalLock::Lock(WasmCodeAllocator* allocator) {
allocator->mutex_.Lock(); allocator->mutex_.Lock();
} }
// static
constexpr size_t WasmCodeAllocator::kMaxCodeSpaceSize;
WasmCodeAllocator::WasmCodeAllocator(WasmCodeManager* code_manager, WasmCodeAllocator::WasmCodeAllocator(WasmCodeManager* code_manager,
VirtualMemory code_space, VirtualMemory code_space,
std::shared_ptr<Counters> async_counters) std::shared_ptr<Counters> async_counters)
...@@ -598,7 +601,7 @@ size_t ReservationSize(size_t code_size_estimate, int num_declared_functions, ...@@ -598,7 +601,7 @@ size_t ReservationSize(size_t code_size_estimate, int num_declared_functions,
total_reserved / 4)); total_reserved / 4));
// Limit by the maximum supported code space size. // Limit by the maximum supported code space size.
return std::min(kMaxWasmCodeSpaceSize, reserve_size); return std::min(WasmCodeAllocator::kMaxCodeSpaceSize, reserve_size);
} }
} // namespace } // namespace
...@@ -1419,7 +1422,7 @@ NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegion( ...@@ -1419,7 +1422,7 @@ NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegion(
size_t max_distance = std::max( size_t max_distance = std::max(
code_region.end() > table_start ? code_region.end() - table_start : 0, code_region.end() > table_start ? code_region.end() - table_start : 0,
table_end > code_region.begin() ? table_end - code_region.begin() : 0); table_end > code_region.begin() ? table_end - code_region.begin() : 0);
return max_distance < kMaxWasmCodeSpaceSize; return max_distance < WasmCodeAllocator::kMaxCodeSpaceSize;
}; };
// Fast path: Try to use {main_jump_table_} and {main_far_jump_table_}. // Fast path: Try to use {main_jump_table_} and {main_far_jump_table_}.
......
...@@ -366,6 +366,17 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind); ...@@ -366,6 +366,17 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind);
// Manages the code reservations and allocations of a single {NativeModule}. // Manages the code reservations and allocations of a single {NativeModule}.
class WasmCodeAllocator { class WasmCodeAllocator {
public: public:
#if V8_TARGET_ARCH_ARM64
// ARM64 only supports direct calls within a 128 MB range.
static constexpr size_t kMaxCodeSpaceSize = 128 * MB;
#else
// Use 1024 MB limit for code spaces on other platforms. This is smaller than
// the total allowed code space (kMaxWasmCodeMemory) to avoid unnecessarily
// big reservations, and to ensure that distances within a code space fit
// within a 32-bit signed integer.
static constexpr size_t kMaxCodeSpaceSize = 1024 * MB;
#endif
// {OptionalLock} is passed between {WasmCodeAllocator} and {NativeModule} to // {OptionalLock} is passed between {WasmCodeAllocator} and {NativeModule} to
// indicate that the lock on the {WasmCodeAllocator} is already taken. It's // indicate that the lock on the {WasmCodeAllocator} is already taken. It's
// optional to allow to also call methods without holding the lock. // optional to allow to also call methods without holding the lock.
......
...@@ -36,12 +36,20 @@ constexpr uint32_t kJumpTableSize = ...@@ -36,12 +36,20 @@ constexpr uint32_t kJumpTableSize =
constexpr size_t kThunkBufferSize = 4 * KB; constexpr size_t kThunkBufferSize = 4 * KB;
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 #if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
// We need the branches (from CompileJumpTableThunk) to be within near-call
// range of the jump table slots. The address hint to AllocateAssemblerBuffer
// is not reliable enough to guarantee that we can always achieve this with
// separate allocations, so we generate all code in a single
// kMaxCodeMemory-sized chunk.
constexpr size_t kAssemblerBufferSize = WasmCodeAllocator::kMaxCodeSpaceSize;
constexpr uint32_t kAvailableBufferSlots = constexpr uint32_t kAvailableBufferSlots =
(kMaxWasmCodeSpaceSize - kJumpTableSize) / kThunkBufferSize; (WasmCodeAllocator::kMaxCodeSpaceSize - kJumpTableSize) / kThunkBufferSize;
constexpr uint32_t kBufferSlotStartOffset = constexpr uint32_t kBufferSlotStartOffset =
RoundUp<kThunkBufferSize>(kJumpTableSize); RoundUp<kThunkBufferSize>(kJumpTableSize);
#else #else
constexpr size_t kAssemblerBufferSize = kJumpTableSize;
constexpr uint32_t kAvailableBufferSlots = 0; constexpr uint32_t kAvailableBufferSlots = 0;
constexpr uint32_t kBufferSlotStartOffset = 0;
#endif #endif
Address AllocateJumpTableThunk( Address AllocateJumpTableThunk(
...@@ -219,19 +227,9 @@ TEST(JumpTablePatchingStress) { ...@@ -219,19 +227,9 @@ TEST(JumpTablePatchingStress) {
constexpr int kNumberOfRunnerThreads = 5; constexpr int kNumberOfRunnerThreads = 5;
constexpr int kNumberOfPatcherThreads = 3; constexpr int kNumberOfPatcherThreads = 3;
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 STATIC_ASSERT(kAssemblerBufferSize >= kJumpTableSize);
// We need the branches (from CompileJumpTableThunk) to be within near-call auto buffer = AllocateAssemblerBuffer(kAssemblerBufferSize);
// range of the jump table slots. The address hint to AllocateAssemblerBuffer
// is not reliable enough to guarantee that we can always achieve this with
// separate allocations, so for Arm64 we generate all code in a single
// kMaxMasmCodeMemory-sized chunk.
STATIC_ASSERT(kMaxWasmCodeSpaceSize >= kJumpTableSize);
auto buffer = AllocateAssemblerBuffer(kMaxWasmCodeSpaceSize);
byte* thunk_slot_buffer = buffer->start() + kBufferSlotStartOffset; byte* thunk_slot_buffer = buffer->start() + kBufferSlotStartOffset;
#else
auto buffer = AllocateAssemblerBuffer(kJumpTableSize);
byte* thunk_slot_buffer = nullptr;
#endif
std::bitset<kAvailableBufferSlots> used_thunk_slots; std::bitset<kAvailableBufferSlots> used_thunk_slots;
buffer->MakeWritableAndExecutable(); buffer->MakeWritableAndExecutable();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment