Commit 36243360 authored by Jacob Bramley's avatar Jacob Bramley Committed by Commit Bot

Make cctest/test-jump-table-assembler/JumpTablePatchingStress reliable on Arm64.

The test was flaky because it assumed that AllocatedAssemblerBuffer
would eventually return an address within near-call range. Rarely, this
did not happen (within the retry limit), and so the test would crash.

This fix allocates a single, kMaxWasmCodeMemory-sized buffer for the
test, and generates call sequences within that buffer.

BUG=v8:8245

Change-Id: I4b44d897c6cbda15a18ab992fa57805de3b2db29
Reviewed-on: https://chromium-review.googlesource.com/c/1347484Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Jacob Bramley <jacob.bramley@arm.com>
Cr-Commit-Position: refs/heads/master@{#57898}
parent 86a5d0c1
......@@ -213,6 +213,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
return rmode != RelocInfo::EXTERNAL_REFERENCE;
}
static bool IsNearCallOffset(int64_t offset);
// Activation support.
void EnterFrame(StackFrame::Type type);
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
......@@ -1270,7 +1272,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& addr, LoadStorePairOp op);
static bool IsNearCallOffset(int64_t offset);
void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);
void CallRecordWriteStub(Register object, Register address,
......
......@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <bitset>
#include "src/assembler-inl.h"
#include "src/macro-assembler-inl.h"
#include "src/simulator.h"
......@@ -30,25 +32,50 @@ namespace {
static volatile int global_stop_bit = 0;
Address GenerateJumpTableThunk(Address jump_target) {
constexpr int kJumpTableSlotCount = 128;
constexpr uint32_t kJumpTableSize =
JumpTableAssembler::SizeForNumberOfSlots(kJumpTableSlotCount);
#if V8_TARGET_ARCH_ARM64
constexpr uint32_t kAvailableBufferSlots =
(kMaxWasmCodeMemory - kJumpTableSize) / AssemblerBase::kMinimalBufferSize;
constexpr uint32_t kBufferSlotStartOffset =
RoundUp<AssemblerBase::kMinimalBufferSize>(kJumpTableSize);
#else
constexpr uint32_t kAvailableBufferSlots = 0;
#endif
Address GenerateJumpTableThunk(Address jump_target, byte* thunk_slot_buffer,
std::bitset<kAvailableBufferSlots>* used_slots) {
size_t allocated;
byte* buffer;
#if V8_TARGET_ARCH_ARM64
// TODO(wasm): Currently {kMaxWasmCodeMemory} limits code sufficiently, so
// that the jump table only supports {near_call} distances.
const uintptr_t kThunkAddrMask = (1 << WhichPowerOf2(kMaxWasmCodeMemory)) - 1;
const int kArbitrarilyChosenRetryCount = 10; // Retry to avoid flakes.
for (int retry = 0; retry < kArbitrarilyChosenRetryCount; ++retry) {
Address random_addr = reinterpret_cast<Address>(GetRandomMmapAddr());
void* address = reinterpret_cast<void*>((jump_target & ~kThunkAddrMask) |
(random_addr & kThunkAddrMask));
buffer = AllocateAssemblerBuffer(
&allocated, AssemblerBase::kMinimalBufferSize, address);
Address bufferptr = reinterpret_cast<uintptr_t>(buffer);
if ((bufferptr & ~kThunkAddrMask) == (jump_target & ~kThunkAddrMask)) break;
}
// To guarantee that the branch range lies within the near-call range,
// generate the thunk in the same (kMaxWasmCodeMemory-sized) buffer as the
// jump_target itself.
//
// Allocate a slot that we haven't already used. This is necessary because
// each test iteration expects to generate two unique addresses and we leave
// each slot executable (and not writable).
base::RandomNumberGenerator* rng =
CcTest::i_isolate()->random_number_generator();
// Ensure a chance of completion without too much thrashing.
DCHECK(used_slots->count() < (used_slots->size() / 2));
int buffer_index;
do {
buffer_index = rng->NextInt(kAvailableBufferSlots);
} while (used_slots->test(buffer_index));
used_slots->set(buffer_index);
byte* buffer =
thunk_slot_buffer + buffer_index * AssemblerBase::kMinimalBufferSize;
DCHECK(TurboAssembler::IsNearCallOffset(
(reinterpret_cast<byte*>(jump_target) - buffer) / kInstrSize));
allocated = AssemblerBase::kMinimalBufferSize;
#else
buffer = AllocateAssemblerBuffer(
USE(thunk_slot_buffer);
USE(used_slots);
byte* buffer = AllocateAssemblerBuffer(
&allocated, AssemblerBase::kMinimalBufferSize, GetRandomMmapAddr());
#endif
MacroAssembler masm(nullptr, AssemblerOptions{}, buffer,
......@@ -151,13 +178,26 @@ class JumpTablePatcher : public v8::base::Thread {
// forth between two thunk. If there is a race then chances are high that
// one of the runners is currently executing the jump-table slot.
TEST(JumpTablePatchingStress) {
constexpr int kJumpTableSlotCount = 128;
constexpr int kNumberOfRunnerThreads = 5;
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(
&allocated,
JumpTableAssembler::SizeForNumberOfSlots(kJumpTableSlotCount));
#if V8_TARGET_ARCH_ARM64
// We need the branches (from GenerateJumpTableThunk) to be within near-call
// range of the jump table slots. The address hint to AllocateAssemblerBuffer
// is not reliable enough to guarantee that we can always achieve this with
// separate allocations, so for Arm64 we generate all code in a single
// kMaxMasmCodeMemory-sized chunk.
//
// TODO(wasm): Currently {kMaxWasmCodeMemory} limits code sufficiently, so
// that the jump table only supports {near_call} distances.
STATIC_ASSERT(kMaxWasmCodeMemory >= kJumpTableSize);
byte* buffer = AllocateAssemblerBuffer(&allocated, kMaxWasmCodeMemory);
byte* thunk_slot_buffer = buffer + kBufferSlotStartOffset;
#else
byte* buffer = AllocateAssemblerBuffer(&allocated, kJumpTableSize);
byte* thunk_slot_buffer = nullptr;
#endif
std::bitset<kAvailableBufferSlots> used_thunk_slots;
MakeAssemblerBufferWritableAndExecutable(buffer, allocated);
// Iterate through jump-table slots to hammer at different alignments within
......@@ -166,8 +206,10 @@ TEST(JumpTablePatchingStress) {
for (int slot = 0; slot < kJumpTableSlotCount; ++slot) {
TRACE("Hammering on jump table slot #%d ...\n", slot);
uint32_t slot_offset = JumpTableAssembler::SlotIndexToOffset(slot);
Address thunk1 = GenerateJumpTableThunk(slot_start + slot_offset);
Address thunk2 = GenerateJumpTableThunk(slot_start + slot_offset);
Address thunk1 = GenerateJumpTableThunk(
slot_start + slot_offset, thunk_slot_buffer, &used_thunk_slots);
Address thunk2 = GenerateJumpTableThunk(
slot_start + slot_offset, thunk_slot_buffer, &used_thunk_slots);
TRACE(" generated thunk1: " V8PRIxPTR_FMT "\n", thunk1);
TRACE(" generated thunk2: " V8PRIxPTR_FMT "\n", thunk2);
JumpTableAssembler::PatchJumpTableSlot(slot_start, slot, thunk1,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment