Commit e1b0086a authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[wasm] Increase maximum code space on arm64

With the far jump table, we need to distinguish the maximum size of a
single code space from the maximum total code size per module. On
arm64, they differ, because we now support 1GB of code space, but each
code space is still limited to 128MB.

Bug: v8:9477

R=mstarzinger@chromium.org

Change-Id: I7b9aaec56a1d9d1f70573b6b895216d5b3f38346
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1815253
Commit-Queue: Clemens Backes [né Hammacher] <clemensh@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63961}
parent 368241d0
......@@ -166,13 +166,14 @@ constexpr int kElidedFrameSlots = 0;
#endif
constexpr int kDoubleSizeLog2 = 3;
constexpr size_t kMaxWasmCodeMB = 1024;
constexpr size_t kMaxWasmCodeMemory = kMaxWasmCodeMB * MB;
#if V8_TARGET_ARCH_ARM64
// ARM64 only supports direct calls within a 128 MB range.
constexpr size_t kMaxWasmCodeMB = 128;
constexpr size_t kMaxWasmCodeSpaceSize = 128 * MB;
#else
constexpr size_t kMaxWasmCodeMB = 1024;
constexpr size_t kMaxWasmCodeSpaceSize = kMaxWasmCodeMemory;
#endif
constexpr size_t kMaxWasmCodeMemory = kMaxWasmCodeMB * MB;
#if V8_HOST_ARCH_64_BIT
constexpr int kSystemPointerSizeLog2 = 3;
......
......@@ -525,10 +525,13 @@ size_t ReservationSize(size_t code_size_estimate, int num_declared_functions,
// a) needed size + overhead (this is the minimum needed)
// b) 2 * overhead (to not waste too much space by overhead)
// c) 1/4 of current total reservation size (to grow exponentially)
return base::bits::RoundUpToPowerOfTwo(
size_t reserve_size = base::bits::RoundUpToPowerOfTwo(
std::max(std::max(RoundUp<kCodeAlignment>(code_size_estimate) + overhead,
2 * overhead),
total_reserved / 4));
// Limit by the maximum supported code space size.
return std::min(kMaxWasmCodeSpaceSize, reserve_size);
}
} // namespace
......@@ -1563,7 +1566,7 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
size_t code_vmem_size =
can_request_more ? ReservationSize(code_size_estimate,
module->num_declared_functions, 0)
: kMaxWasmCodeMemory;
: kMaxWasmCodeSpaceSize;
// The '--wasm-max-code-space-reservation' testing flag can be used to reduce
// the maximum size of the initial code space reservation (in MB).
......
......@@ -36,7 +36,7 @@ constexpr size_t kThunkBufferSize = AssemblerBase::kMinimalBufferSize;
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
constexpr uint32_t kAvailableBufferSlots =
(kMaxWasmCodeMemory - kJumpTableSize) / kThunkBufferSize;
(kMaxWasmCodeSpaceSize - kJumpTableSize) / kThunkBufferSize;
constexpr uint32_t kBufferSlotStartOffset =
RoundUp<kThunkBufferSize>(kJumpTableSize);
#else
......@@ -49,7 +49,7 @@ Address AllocateJumpTableThunk(
std::vector<std::unique_ptr<TestingAssemblerBuffer>>* thunk_buffers) {
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
// To guarantee that the branch range lies within the near-call range,
// generate the thunk in the same (kMaxWasmCodeMemory-sized) buffer as the
// generate the thunk in the same (kMaxWasmCodeSpaceSize-sized) buffer as the
// jump_target itself.
//
// Allocate a slot that we haven't already used. This is necessary because
......@@ -181,8 +181,9 @@ class JumpTablePatcher : public v8::base::Thread {
// Then, repeatedly patch the jump table to jump to one of the two thunks.
constexpr int kNumberOfPatchIterations = 64;
for (int i = 0; i < kNumberOfPatchIterations; ++i) {
TRACE(" patcher %p patch slot " V8PRIxPTR_FMT " to thunk #%d\n", this,
slot_address, i % 2);
TRACE(" patcher %p patch slot " V8PRIxPTR_FMT
" to thunk #%d (" V8PRIxPTR_FMT ")\n",
this, slot_address, i % 2, thunks_[i % 2]);
base::MutexGuard jump_table_guard(jump_table_mutex_);
JumpTableAssembler::PatchJumpTableSlot(
slot_start_ + JumpTableAssembler::JumpSlotIndexToOffset(slot_index_),
......@@ -220,11 +221,8 @@ TEST(JumpTablePatchingStress) {
// is not reliable enough to guarantee that we can always achieve this with
// separate allocations, so for Arm64 we generate all code in a single
// kMaxMasmCodeMemory-sized chunk.
//
// TODO(wasm): Currently {kMaxWasmCodeMemory} limits code sufficiently, so
// that the jump table only supports {near_call} distances.
STATIC_ASSERT(kMaxWasmCodeMemory >= kJumpTableSize);
auto buffer = AllocateAssemblerBuffer(kMaxWasmCodeMemory);
STATIC_ASSERT(kMaxWasmCodeSpaceSize >= kJumpTableSize);
auto buffer = AllocateAssemblerBuffer(kMaxWasmCodeSpaceSize);
byte* thunk_slot_buffer = buffer->start() + kBufferSlotStartOffset;
#else
auto buffer = AllocateAssemblerBuffer(kJumpTableSize);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment