Commit adba0512 authored by Clemens Backes's avatar Clemens Backes Committed by V8 LUCI CQ

[wasm] Remove low-level test-only functions

This removes the low-level {SwitchMemoryPermissionsToWritable()} and
{SwitchMemoryPermissionsToExecutable()} functions. They are only used in
tests and can be replaced by {CodeSpaceWriteScope} objects that we also
use in production.

R=jkummerow@chromium.org

Change-Id: I7ba702c836f3ac2dd7c7a81d6362040b28e8bef4
Cq-Include-Trybots: luci.v8.try:v8_mac_arm64_rel_ng
Cq-Include-Trybots: luci.v8.try:v8_mac_arm64_dbg_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3024150
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/master@{#75739}
parent a1147408
......@@ -32,13 +32,18 @@ CodeSpaceWriteScope::~CodeSpaceWriteScope() {
#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
// Ignoring this warning is considered better than relying on
// __builtin_available.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunguarded-availability-new"
void CodeSpaceWriteScope::SetWritable() const {
SwitchMemoryPermissionsToWritable();
pthread_jit_write_protect_np(0);
}
void CodeSpaceWriteScope::SetExecutable() const {
SwitchMemoryPermissionsToExecutable();
pthread_jit_write_protect_np(1);
}
#pragma clang diagnostic pop
#else // Not Mac-on-arm64.
......
......@@ -69,32 +69,6 @@ class V8_NODISCARD CodeSpaceWriteScope final {
};
} // namespace wasm
#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
// Low-level API for switching MAP_JIT pages between writable and executable.
// TODO(wasm): Access to these functions is only needed in tests. Remove?
// Ignoring this warning is considered better than relying on
// __builtin_available.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunguarded-availability-new"
inline void SwitchMemoryPermissionsToWritable() {
pthread_jit_write_protect_np(0);
}
inline void SwitchMemoryPermissionsToExecutable() {
pthread_jit_write_protect_np(1);
}
#pragma clang diagnostic pop
#else // Not Mac-on-arm64.
// Nothing to do, we map code memory with rwx permissions.
inline void SwitchMemoryPermissionsToWritable() {}
inline void SwitchMemoryPermissionsToExecutable() {}
#endif // defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
} // namespace internal
} // namespace v8
......
......@@ -189,15 +189,23 @@ TEST(TestFlushICacheOfWritableAndExecutable) {
CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
buffer->size(), v8::PageAllocator::kReadWriteExecute));
SwitchMemoryPermissionsToWritable();
FloodWithInc(isolate, buffer.get());
FlushInstructionCache(buffer->start(), buffer->size());
SwitchMemoryPermissionsToExecutable();
{
#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
// Make sure to switch memory to writable on M1 hardware.
wasm::CodeSpaceWriteScope code_space_write_scope(nullptr);
#endif
FloodWithInc(isolate, buffer.get());
FlushInstructionCache(buffer->start(), buffer->size());
}
CHECK_EQ(23 + kNumInstr, f.Call(23)); // Call into generated code.
SwitchMemoryPermissionsToWritable();
FloodWithNop(isolate, buffer.get());
FlushInstructionCache(buffer->start(), buffer->size());
SwitchMemoryPermissionsToExecutable();
{
#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
// Make sure to switch memory to writable on M1 hardware.
wasm::CodeSpaceWriteScope code_space_write_scope(nullptr);
#endif
FloodWithNop(isolate, buffer.get());
FlushInstructionCache(buffer->start(), buffer->size());
}
CHECK_EQ(23, f.Call(23)); // Call into generated code.
}
}
......
......@@ -159,8 +159,8 @@ void CompileJumpTableThunk(Address thunk, Address jump_target) {
FlushInstructionCache(thunk, kThunkBufferSize);
#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
// MacOS on arm64 refuses {mprotect} calls to toggle permissions of RWX
// memory. Simply do nothing here, and rely on
// {SwitchMemoryPermissionsToExecutable} in the JumpTableRunner.
// memory. Simply do nothing here, as the space will by default be executable
// and non-writable for the JumpTableRunner.
#else
CHECK(SetPermissions(GetPlatformPageAllocator(), thunk, kThunkBufferSize,
v8::PageAllocator::kReadExecute));
......@@ -176,7 +176,6 @@ class JumpTableRunner : public v8::base::Thread {
void Run() override {
TRACE("Runner #%d is starting ...\n", runner_id_);
SwitchMemoryPermissionsToExecutable();
GeneratedCode<void>::FromAddress(CcTest::i_isolate(), slot_address_).Call();
TRACE("Runner #%d is stopping ...\n", runner_id_);
USE(runner_id_);
......@@ -199,7 +198,10 @@ class JumpTablePatcher : public v8::base::Thread {
void Run() override {
TRACE("Patcher %p is starting ...\n", this);
SwitchMemoryPermissionsToWritable();
#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
// Make sure to switch memory to writable on M1 hardware.
CodeSpaceWriteScope code_space_write_scope(nullptr);
#endif
Address slot_address =
slot_start_ + JumpTableAssembler::JumpSlotIndexToOffset(slot_index_);
// First, emit code to the two thunks.
......@@ -250,7 +252,6 @@ TEST(JumpTablePatchingStress) {
std::bitset<kAvailableBufferSlots> used_thunk_slots;
buffer->MakeWritableAndExecutable();
SwitchMemoryPermissionsToWritable();
// Iterate through jump-table slots to hammer at different alignments within
// the jump-table, thereby increasing stress for variable-length ISAs.
......@@ -259,22 +260,29 @@ TEST(JumpTablePatchingStress) {
TRACE("Hammering on jump table slot #%d ...\n", slot);
uint32_t slot_offset = JumpTableAssembler::JumpSlotIndexToOffset(slot);
std::vector<std::unique_ptr<TestingAssemblerBuffer>> thunk_buffers;
// Patch the jump table slot to jump to itself. This will later be patched
// by the patchers.
Address slot_addr =
slot_start + JumpTableAssembler::JumpSlotIndexToOffset(slot);
JumpTableAssembler::PatchJumpTableSlot(slot_addr, kNullAddress, slot_addr);
// For each patcher, generate two thunks where this patcher can emit code
// which finally jumps back to {slot} in the jump table.
std::vector<Address> patcher_thunks;
for (int i = 0; i < 2 * kNumberOfPatcherThreads; ++i) {
Address thunk =
AllocateJumpTableThunk(slot_start + slot_offset, thunk_slot_buffer,
&used_thunk_slots, &thunk_buffers);
ZapCode(thunk, kThunkBufferSize);
patcher_thunks.push_back(thunk);
TRACE(" generated jump thunk: " V8PRIxPTR_FMT "\n",
patcher_thunks.back());
{
#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
// Make sure to switch memory to writable on M1 hardware.
CodeSpaceWriteScope code_space_write_scope(nullptr);
#endif
// Patch the jump table slot to jump to itself. This will later be patched
// by the patchers.
Address slot_addr =
slot_start + JumpTableAssembler::JumpSlotIndexToOffset(slot);
JumpTableAssembler::PatchJumpTableSlot(slot_addr, kNullAddress,
slot_addr);
// For each patcher, generate two thunks where this patcher can emit code
// which finally jumps back to {slot} in the jump table.
for (int i = 0; i < 2 * kNumberOfPatcherThreads; ++i) {
Address thunk =
AllocateJumpTableThunk(slot_start + slot_offset, thunk_slot_buffer,
&used_thunk_slots, &thunk_buffers);
ZapCode(thunk, kThunkBufferSize);
patcher_thunks.push_back(thunk);
TRACE(" generated jump thunk: " V8PRIxPTR_FMT "\n",
patcher_thunks.back());
}
}
// Start multiple runner threads that execute the jump table slot
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment