Commit 7579b1e3 authored by Michael Starzinger's avatar Michael Starzinger Committed by Commit Bot

[wasm] Support concurrent patching of jump table.

This adds initial support for concurrently patching jump table slots. It
is needed once different Isolates share code (for the --wasm-shared-code
feature). We need to ensure that instructions holding the target address
within a jump table slot do not cross cache-line boundaries. To do this,
the jump table has been split into consecutive pages.

Note that this also adds a stress test for multiple threads hammering at
a single slot concurrently. The test is currently limited to the ia32
and the x64 architecture, but will be extended to cover others. The test
reliably triggers tearing of the target address on almost every run of
the test and hence serves to prevent regressions.

R=clemensh@chromium.org
TEST=cctest/test-jump-table-assembler
BUG=v8:8018

Change-Id: Ife56bbb61ffcae5d8906ca7b8c604b195603707c
Reviewed-on: https://chromium-review.googlesource.com/1163664
Commit-Queue: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: 's avatarClemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#54942}
parent 125b8a45
......@@ -4344,6 +4344,16 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return function_index;
}
Node* BuildLoadJumpTableOffsetFromExportedFunctionData(Node* function_data) {
Node* jump_table_offset_smi = SetEffect(graph()->NewNode(
jsgraph()->machine()->Load(MachineType::AnyTagged()), function_data,
jsgraph()->Int32Constant(
WasmExportedFunctionData::kJumpTableOffsetOffset - kHeapObjectTag),
Effect(), Control()));
Node* jump_table_offset = BuildChangeSmiToInt32(jump_table_offset_smi);
return jump_table_offset;
}
void BuildJSToWasmWrapper(bool is_import) {
const int wasm_count = static_cast<int>(sig_->parameter_count());
......@@ -4392,28 +4402,22 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Set the ThreadInWasm flag before we do the actual call.
BuildModifyThreadInWasmFlag(true);
// Load function index from {WasmExportedFunctionData}.
Node* function_index =
BuildLoadFunctionIndexFromExportedFunctionData(function_data);
if (is_import) {
// Call to an imported function.
// Load function index from {WasmExportedFunctionData}.
Node* function_index =
BuildLoadFunctionIndexFromExportedFunctionData(function_data);
BuildImportWasmCall(sig_, args, &rets, wasm::kNoCodePosition,
function_index);
} else {
// Call to a wasm function defined in this module.
// The call target is the jump table slot for that function. This is
// {jump_table + (func_index - num_imports) * kJumpTableSlotSize}
// == {jump_table_adjusted + func_index * kJumpTableSlotSize}.
Node* jump_table_adjusted =
LOAD_INSTANCE_FIELD(JumpTableAdjustedStart, MachineType::Pointer());
Node* jump_table_offset = graph()->NewNode(
mcgraph()->machine()->IntMul(), Uint32ToUintptr(function_index),
mcgraph()->IntPtrConstant(
wasm::JumpTableAssembler::kJumpTableSlotSize));
Node* jump_table_slot =
graph()->NewNode(mcgraph()->machine()->IntAdd(), jump_table_adjusted,
jump_table_offset);
// The call target is the jump table slot for that function.
Node* jump_table_start =
LOAD_INSTANCE_FIELD(JumpTableStart, MachineType::Pointer());
Node* jump_table_offset =
BuildLoadJumpTableOffsetFromExportedFunctionData(function_data);
Node* jump_table_slot = graph()->NewNode(
mcgraph()->machine()->IntAdd(), jump_table_start, jump_table_offset);
args[0] = jump_table_slot;
BuildWasmCall(sig_, args, &rets, wasm::kNoCodePosition, nullptr,
......
......@@ -1664,6 +1664,7 @@ void WasmExportedFunctionData::WasmExportedFunctionDataVerify(
CHECK(wrapper_code()->kind() == Code::JS_TO_WASM_FUNCTION ||
wrapper_code()->kind() == Code::C_WASM_ENTRY);
VerifyObjectField(isolate, kInstanceOffset);
VerifySmiField(kJumpTableOffsetOffset);
VerifySmiField(kFunctionIndexOffset);
}
......
......@@ -12,8 +12,75 @@ namespace v8 {
namespace internal {
namespace wasm {
// The jump table is the central dispatch point for all (direct and indirect)
// invocations in WebAssembly. It holds one slot per function in a module, with
// each slot containing a dispatch to the currently published {WasmCode} that
// corresponds to the function.
//
// Note that the table is split into lines of fixed size, with lines laid out
// consecutively within the executable memory of the {NativeModule}. The slots
// in turn are consecutive within a line, but do not cross line boundaries.
//
// +- L1 -------------------+ +- L2 -------------------+ +- L3 ...
// | S1 | S2 | ... | Sn | x | | S1 | S2 | ... | Sn | x | | S1 ...
// +------------------------+ +------------------------+ +---- ...
//
// The above illustrates jump table lines {Li} containing slots {Si} with each
// line containing {n} slots and some padding {x} for alignment purposes.
class JumpTableAssembler : public TurboAssembler {
public:
// Translate an offset into the continuous jump table to a jump table index.
static uint32_t SlotOffsetToIndex(uint32_t slot_offset) {
uint32_t line_index = slot_offset / kJumpTableLineSize;
uint32_t line_offset = slot_offset % kJumpTableLineSize;
DCHECK_EQ(0, line_offset % kJumpTableSlotSize);
return line_index * kJumpTableSlotsPerLine +
line_offset / kJumpTableSlotSize;
}
// Translate a jump table index to an offset into the continuous jump table.
static uint32_t SlotIndexToOffset(uint32_t slot_index) {
uint32_t line_index = slot_index / kJumpTableSlotsPerLine;
uint32_t line_offset =
(slot_index % kJumpTableSlotsPerLine) * kJumpTableSlotSize;
return line_index * kJumpTableLineSize + line_offset;
}
// Determine the size of a jump table containing the given number of slots.
static constexpr uint32_t SizeForNumberOfSlots(uint32_t slot_count) {
// TODO(wasm): Once the {RoundUp} utility handles non-powers of two values,
// use: {RoundUp<kJumpTableSlotsPerLine>(slot_count) * kJumpTableLineSize}
return ((slot_count + kJumpTableSlotsPerLine - 1) /
kJumpTableSlotsPerLine) *
kJumpTableLineSize;
}
static void EmitLazyCompileJumpSlot(Address base, uint32_t slot_index,
uint32_t func_index,
Address lazy_compile_target,
WasmCode::FlushICache flush_i_cache) {
Address slot = base + SlotIndexToOffset(slot_index);
JumpTableAssembler jtasm(slot);
jtasm.EmitLazyCompileJumpSlot(func_index, lazy_compile_target);
jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
if (flush_i_cache) {
Assembler::FlushICache(slot, kJumpTableSlotSize);
}
}
static void PatchJumpTableSlot(Address base, uint32_t slot_index,
Address new_target,
WasmCode::FlushICache flush_i_cache) {
Address slot = base + SlotIndexToOffset(slot_index);
JumpTableAssembler jtasm(slot);
jtasm.EmitJumpSlot(new_target);
jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
if (flush_i_cache) {
Assembler::FlushICache(slot, kJumpTableSlotSize);
}
}
private:
// {JumpTableAssembler} is never used during snapshot generation, and its code
// must be independent of the code range of any isolate anyway. So just use
// this default {Options} for each {JumpTableAssembler}.
......@@ -27,72 +94,47 @@ class JumpTableAssembler : public TurboAssembler {
reinterpret_cast<void*>(slot_addr), size,
CodeObjectRequired::kNo) {}
// To allow concurrent patching of the jump table entries we need to ensure
// that slots do not cross cache-line boundaries. Hence translation between
// slot offsets and index is encapsulated in the following methods.
static uint32_t SlotOffsetToIndex(uint32_t slot_offset) {
DCHECK_EQ(0, slot_offset % kJumpTableSlotSize);
return slot_offset / kJumpTableSlotSize;
}
static uint32_t SlotIndexToOffset(uint32_t slot_index) {
return slot_index * kJumpTableSlotSize;
}
// Determine the size of a jump table containing the given number of slots.
static size_t SizeForNumberOfSlots(uint32_t slot_count) {
return slot_count * kJumpTableSlotSize;
}
// To allow concurrent patching of the jump table entries, we need to ensure
// that the instruction containing the call target does not cross cache-line
// boundaries. The jump table line size has been chosen to satisfy this.
#if V8_TARGET_ARCH_X64
static constexpr int kJumpTableLineSize = 64;
static constexpr int kJumpTableSlotSize = 18;
#elif V8_TARGET_ARCH_IA32
static constexpr int kJumpTableLineSize = 64;
static constexpr int kJumpTableSlotSize = 10;
#elif V8_TARGET_ARCH_ARM
static constexpr int kJumpTableLineSize = 5 * kInstrSize;
static constexpr int kJumpTableSlotSize = 5 * kInstrSize;
#elif V8_TARGET_ARCH_ARM64
static constexpr int kJumpTableLineSize = 3 * kInstructionSize;
static constexpr int kJumpTableSlotSize = 3 * kInstructionSize;
#elif V8_TARGET_ARCH_S390X
static constexpr int kJumpTableLineSize = 20;
static constexpr int kJumpTableSlotSize = 20;
#elif V8_TARGET_ARCH_S390
static constexpr int kJumpTableLineSize = 14;
static constexpr int kJumpTableSlotSize = 14;
#elif V8_TARGET_ARCH_PPC64
static constexpr int kJumpTableLineSize = 48;
static constexpr int kJumpTableSlotSize = 48;
#elif V8_TARGET_ARCH_PPC
static constexpr int kJumpTableLineSize = 24;
static constexpr int kJumpTableSlotSize = 24;
#elif V8_TARGET_ARCH_MIPS
static constexpr int kJumpTableLineSize = 6 * kInstrSize;
static constexpr int kJumpTableSlotSize = 6 * kInstrSize;
#elif V8_TARGET_ARCH_MIPS64
static constexpr int kJumpTableLineSize = 8 * kInstrSize;
static constexpr int kJumpTableSlotSize = 8 * kInstrSize;
#else
static constexpr int kJumpTableLineSize = 1;
static constexpr int kJumpTableSlotSize = 1;
#endif
static void EmitLazyCompileJumpSlot(Address base, uint32_t slot_index,
uint32_t func_index,
Address lazy_compile_target,
WasmCode::FlushICache flush_i_cache) {
Address slot = base + SlotIndexToOffset(slot_index);
JumpTableAssembler jtasm(slot);
jtasm.EmitLazyCompileJumpSlot(func_index, lazy_compile_target);
jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
if (flush_i_cache) {
Assembler::FlushICache(slot, kJumpTableSlotSize);
}
}
static void PatchJumpTableSlot(Address base, uint32_t slot_index,
Address new_target,
WasmCode::FlushICache flush_i_cache) {
Address slot = base + SlotIndexToOffset(slot_index);
JumpTableAssembler jtasm(slot);
jtasm.EmitJumpSlot(new_target);
jtasm.NopBytes(kJumpTableSlotSize - jtasm.pc_offset());
if (flush_i_cache) {
Assembler::FlushICache(slot, kJumpTableSlotSize);
}
}
static constexpr int kJumpTableSlotsPerLine =
kJumpTableLineSize / kJumpTableSlotSize;
private:
void EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target);
......
......@@ -292,6 +292,11 @@ class V8_EXPORT_PRIVATE NativeModule final {
return jump_table_ ? jump_table_->instruction_start() : kNullAddress;
}
ptrdiff_t jump_table_offset(uint32_t func_index) const {
DCHECK_GE(func_index, num_imported_functions());
return GetCallTargetForFunction(func_index) - jump_table_start();
}
bool is_jump_table_slot(Address address) const {
return jump_table_->contains(address);
}
......
......@@ -727,6 +727,7 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
WASM_EXPORTED_FUNCTION_DATA_TYPE, TENURED));
function_data->set_wrapper_code(*new_entry_code);
function_data->set_instance(debug_info->wasm_instance());
function_data->set_jump_table_offset(-1);
function_data->set_function_index(-1);
Handle<String> name = isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("c-wasm-entry"));
......
......@@ -154,8 +154,8 @@ PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_sig_ids,
uint32_t*, kIndirectFunctionTableSigIdsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, indirect_function_table_targets,
Address*, kIndirectFunctionTableTargetsOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, jump_table_adjusted_start, Address,
kJumpTableAdjustedStartOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, jump_table_start, Address,
kJumpTableStartOffset)
ACCESSORS(WasmInstanceObject, module_object, WasmModuleObject,
kModuleObjectOffset)
......@@ -205,6 +205,8 @@ ImportedFunctionEntry::ImportedFunctionEntry(
ACCESSORS(WasmExportedFunctionData, wrapper_code, Code, kWrapperCodeOffset)
ACCESSORS(WasmExportedFunctionData, instance, WasmInstanceObject,
kInstanceOffset)
SMI_ACCESSORS(WasmExportedFunctionData, jump_table_offset,
kJumpTableOffsetOffset)
SMI_ACCESSORS(WasmExportedFunctionData, function_index, kFunctionIndexOffset)
// WasmDebugInfo
......
......@@ -1278,10 +1278,8 @@ Handle<WasmInstanceObject> WasmInstanceObject::New(
instance->set_module_object(*module_object);
instance->set_undefined_value(ReadOnlyRoots(isolate).undefined_value());
instance->set_null_value(ReadOnlyRoots(isolate).null_value());
instance->set_jump_table_adjusted_start(
module_object->native_module()->jump_table_start() -
wasm::JumpTableAssembler::kJumpTableSlotSize *
module->num_imported_functions);
instance->set_jump_table_start(
module_object->native_module()->jump_table_start());
// Insert the new instance into the modules weak list of instances.
// TODO(mstarzinger): Allow to reuse holes in the {WeakArrayList} below.
......@@ -1366,11 +1364,21 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
MaybeHandle<String> maybe_name, int func_index, int arity,
Handle<Code> export_wrapper) {
DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind());
int num_imported_functions = instance->module()->num_imported_functions;
int jump_table_offset = -1;
if (func_index >= num_imported_functions) {
ptrdiff_t jump_table_diff =
instance->module_object()->native_module()->jump_table_offset(
func_index);
DCHECK(jump_table_diff >= 0 && jump_table_diff <= INT_MAX);
jump_table_offset = static_cast<int>(jump_table_diff);
}
Handle<WasmExportedFunctionData> function_data =
Handle<WasmExportedFunctionData>::cast(isolate->factory()->NewStruct(
WASM_EXPORTED_FUNCTION_DATA_TYPE, TENURED));
function_data->set_wrapper_code(*export_wrapper);
function_data->set_instance(*instance);
function_data->set_jump_table_offset(jump_table_offset);
function_data->set_function_index(func_index);
Handle<String> name;
if (!maybe_name.ToHandle(&name)) {
......
......@@ -400,7 +400,7 @@ class WasmInstanceObject : public JSObject {
DECL_PRIMITIVE_ACCESSORS(indirect_function_table_size, uint32_t)
DECL_PRIMITIVE_ACCESSORS(indirect_function_table_sig_ids, uint32_t*)
DECL_PRIMITIVE_ACCESSORS(indirect_function_table_targets, Address*)
DECL_PRIMITIVE_ACCESSORS(jump_table_adjusted_start, Address)
DECL_PRIMITIVE_ACCESSORS(jump_table_start, Address)
// Dispatched behavior.
DECL_PRINTER(WasmInstanceObject)
......@@ -435,7 +435,7 @@ class WasmInstanceObject : public JSObject {
V(kImportedMutableGlobalsOffset, kPointerSize) /* untagged */ \
V(kIndirectFunctionTableSigIdsOffset, kPointerSize) /* untagged */ \
V(kIndirectFunctionTableTargetsOffset, kPointerSize) /* untagged */ \
V(kJumpTableAdjustedStartOffset, kPointerSize) /* untagged */ \
V(kJumpTableStartOffset, kPointerSize) /* untagged */ \
V(kIndirectFunctionTableSizeOffset, kUInt32Size) /* untagged */ \
V(k64BitArchPaddingOffset, kPointerSize - kUInt32Size) /* padding */ \
V(kSize, 0)
......@@ -495,6 +495,7 @@ class WasmExportedFunctionData : public Struct {
public:
DECL_ACCESSORS(wrapper_code, Code);
DECL_ACCESSORS(instance, WasmInstanceObject)
DECL_INT_ACCESSORS(jump_table_offset);
DECL_INT_ACCESSORS(function_index);
DECL_CAST(WasmExportedFunctionData)
......@@ -504,10 +505,11 @@ class WasmExportedFunctionData : public Struct {
DECL_VERIFIER(WasmExportedFunctionData)
// Layout description.
#define WASM_EXPORTED_FUNCTION_DATA_FIELDS(V) \
V(kWrapperCodeOffset, kPointerSize) \
V(kInstanceOffset, kPointerSize) \
V(kFunctionIndexOffset, kPointerSize) /* Smi */ \
#define WASM_EXPORTED_FUNCTION_DATA_FIELDS(V) \
V(kWrapperCodeOffset, kPointerSize) \
V(kInstanceOffset, kPointerSize) \
V(kJumpTableOffsetOffset, kPointerSize) /* Smi */ \
V(kFunctionIndexOffset, kPointerSize) /* Smi */ \
V(kSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
......
......@@ -239,6 +239,7 @@ v8_source_set("cctest_sources") {
"types-fuzz.h",
"unicode-helpers.h",
"wasm/test-c-wasm-entry.cc",
"wasm/test-jump-table-assembler.cc",
"wasm/test-run-wasm-64.cc",
"wasm/test-run-wasm-asmjs.cc",
"wasm/test-run-wasm-atomics.cc",
......
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/assembler-inl.h"
#include "src/simulator.h"
#include "src/wasm/jump-table-assembler.h"
#include "test/cctest/cctest.h"
#include "test/common/assembler-tester.h"
namespace v8 {
namespace internal {
namespace wasm {
#if 0
#define TRACE(...) PrintF(__VA_ARGS__)
#else
#define TRACE(...)
#endif
#define __ masm.
// TODO(v8:7424,v8:8018): Extend this test to all architectures.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
namespace {
static volatile int global_stop_bit = 0;
Address GenerateJumpTableThunk(Address jump_target) {
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(
&allocated, AssemblerBase::kMinimalBufferSize, GetRandomMmapAddr());
MacroAssembler masm(nullptr, AssemblerOptions{}, buffer,
static_cast<int>(allocated), CodeObjectRequired::kNo);
Label exit;
Register scratch = kReturnRegister0;
Address stop_bit_address = reinterpret_cast<Address>(&global_stop_bit);
#if V8_TARGET_ARCH_X64
__ Move(scratch, stop_bit_address, RelocInfo::NONE);
__ testl(MemOperand(scratch, 0), Immediate(1));
__ j(not_zero, &exit);
__ Jump(jump_target, RelocInfo::NONE);
#elif V8_TARGET_ARCH_IA32
__ Move(scratch, Immediate(stop_bit_address, RelocInfo::NONE));
__ test(MemOperand(scratch, 0), Immediate(1));
__ j(not_zero, &exit);
__ jmp(jump_target, RelocInfo::NONE);
#else
#error Unsupported architecture
#endif
__ bind(&exit);
__ Ret();
CodeDesc desc;
masm.GetCode(nullptr, &desc);
MakeAssemblerBufferExecutable(buffer, allocated);
return reinterpret_cast<Address>(buffer);
}
class JumpTableRunner : public v8::base::Thread {
public:
JumpTableRunner(Address slot_address, int runner_id)
: Thread(Options("JumpTableRunner")),
slot_address_(slot_address),
runner_id_(runner_id) {}
void Run() override {
TRACE("Runner #%d is starting ...\n", runner_id_);
GeneratedCode<void>::FromAddress(nullptr, slot_address_).Call();
TRACE("Runner #%d is stopping ...\n", runner_id_);
USE(runner_id_);
}
private:
Address slot_address_;
int runner_id_;
};
class JumpTablePatcher : public v8::base::Thread {
public:
JumpTablePatcher(Address slot_start, uint32_t slot_index, Address thunk1,
Address thunk2)
: Thread(Options("JumpTablePatcher")),
slot_start_(slot_start),
slot_index_(slot_index),
thunks_{thunk1, thunk2} {}
void Run() override {
TRACE("Patcher is starting ...\n");
constexpr int kNumberOfPatchIterations = 64;
for (int i = 0; i < kNumberOfPatchIterations; ++i) {
TRACE(" patch slot " V8PRIxPTR_FMT " to thunk #%d\n",
slot_start_ + JumpTableAssembler::SlotIndexToOffset(slot_index_),
i % 2);
JumpTableAssembler::PatchJumpTableSlot(
slot_start_, slot_index_, thunks_[i % 2], WasmCode::kFlushICache);
}
TRACE("Patcher is stopping ...\n");
}
private:
Address slot_start_;
uint32_t slot_index_;
Address thunks_[2];
};
} // namespace
// This test is intended to stress concurrent patching of jump-table slots. It
// uses the following setup:
// 1) Picks a particular slot of the jump-table. Slots are iterated over to
// ensure multiple entries (at different offset alignments) are tested.
// 2) Starts multiple runners that spin through the above slot. The runners
// use thunk code that will jump to the same jump-table slot repeatedly
// until the {global_stop_bit} indicates a test-end condition.
// 3) Start a patcher that repeatedly patches the jump-table slot back and
// forth between two thunk. If there is a race then chances are high that
// one of the runners is currently executing the jump-table slot.
TEST(JumpTablePatchingStress) {
constexpr int kJumpTableSlotCount = 128;
constexpr int kNumberOfRunnerThreads = 5;
size_t allocated;
byte* buffer = AllocateAssemblerBuffer(
&allocated,
JumpTableAssembler::SizeForNumberOfSlots(kJumpTableSlotCount));
// Iterate through jump-table slots to hammer at different alignments within
// the jump-table, thereby increasing stress for variable-length ISAs.
Address slot_start = reinterpret_cast<Address>(buffer);
for (int slot = 0; slot < kJumpTableSlotCount; ++slot) {
TRACE("Hammering on jump table slot #%d ...\n", slot);
uint32_t slot_offset = JumpTableAssembler::SlotIndexToOffset(slot);
Address thunk1 = GenerateJumpTableThunk(slot_start + slot_offset);
Address thunk2 = GenerateJumpTableThunk(slot_start + slot_offset);
TRACE(" generated thunk1: " V8PRIxPTR_FMT "\n", thunk1);
TRACE(" generated thunk2: " V8PRIxPTR_FMT "\n", thunk2);
JumpTableAssembler::PatchJumpTableSlot(slot_start, slot, thunk1,
WasmCode::kFlushICache);
// Start multiple runner threads and a patcher thread that hammer on the
// same jump-table slot concurrently.
std::list<JumpTableRunner> runners;
for (int runner = 0; runner < kNumberOfRunnerThreads; ++runner) {
runners.emplace_back(slot_start + slot_offset, runner);
}
JumpTablePatcher patcher(slot_start, slot, thunk1, thunk2);
global_stop_bit = 0; // Signal runners to keep going.
for (auto& runner : runners) runner.Start();
patcher.Start();
patcher.Join();
global_stop_bit = -1; // Signal runners to stop.
for (auto& runner : runners) runner.Join();
}
}
#endif // V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
#undef __
#undef TRACE
} // namespace wasm
} // namespace internal
} // namespace v8
......@@ -12,11 +12,12 @@ namespace internal {
static inline uint8_t* AllocateAssemblerBuffer(
size_t* allocated,
size_t requested = v8::internal::AssemblerBase::kMinimalBufferSize) {
size_t requested = v8::internal::AssemblerBase::kMinimalBufferSize,
void* address = nullptr) {
size_t page_size = v8::internal::AllocatePageSize();
size_t alloc_size = RoundUp(requested, page_size);
void* result = v8::internal::AllocatePages(
nullptr, alloc_size, page_size, v8::PageAllocator::kReadWriteExecute);
address, alloc_size, page_size, v8::PageAllocator::kReadWriteExecute);
CHECK(result);
*allocated = alloc_size;
return static_cast<uint8_t*>(result);
......
......@@ -157,7 +157,7 @@ class WasmCodeManagerTest : public TestWithContext,
public:
static constexpr uint32_t kNumFunctions = 10;
static constexpr uint32_t kJumpTableSize = RoundUp<kCodeAlignment>(
kNumFunctions * JumpTableAssembler::kJumpTableSlotSize);
JumpTableAssembler::SizeForNumberOfSlots(kNumFunctions));
using NativeModulePtr = std::unique_ptr<NativeModule>;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment