Commit a18be72c authored by Miran.Karic's avatar Miran.Karic Committed by Commit bot

Add v8_os_page_size flag for cross compilation

When generating snapshot on a machine with a different page size than
the target machine, we can run into problems as the v8 page area size
changes. This is because v8 has page guards which depend on os page
size, so if the target has larger os page, v8 page area is smaller and
may not fit the contents.

The solution proposed here is adding a flag, v8_os_page_size, that
would, if used, override local os page size and use the one specified
during snapshot generation.

BUG=

Review-Url: https://codereview.chromium.org/2488403003
Cr-Commit-Position: refs/heads/master@{#40997}
parent 4a660091
......@@ -61,6 +61,9 @@ declare_args() {
# Switches off inlining in V8.
v8_no_inline = false
# Override OS page size when generating snapshot
v8_os_page_size = "0"
# Similar to vfp but on MIPS.
v8_can_use_fpu_instructions = true
......@@ -679,6 +682,13 @@ action("run_mksnapshot") {
]
}
if (v8_os_page_size != "0") {
args += [
"--v8_os_page_size",
v8_os_page_size,
]
}
if (v8_use_external_startup_data) {
outputs += [ "$root_out_dir/snapshot_blob.bin" ]
args += [
......
......@@ -163,6 +163,14 @@ endif
ifeq ($(goma), on)
GYPFLAGS += -Duse_goma=1
endif
# v8_os_page_size=0, when 0 or not specified use build OS page size
ifdef v8_os_page_size
ifneq ($(v8_os_page_size), 0)
ifneq ($(snapshot), off)
GYPFLAGS += -Dv8_os_page_size=$(v8_os_page_size)
endif
endif
endif
# arm specific flags.
# arm_version=<number | "default">
ifneq ($(strip $(arm_version)),)
......
......@@ -24,9 +24,8 @@ namespace internal {
static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(),
base::OS::CommitPageSize(),
EXECUTABLE,
NULL);
MemoryAllocator::GetCommitPageSize(),
EXECUTABLE, NULL);
}
......@@ -88,7 +87,7 @@ static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB;
size_t Deoptimizer::GetMaxDeoptTableSize() {
int entries_size =
Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
int commit_page_size = static_cast<int>(base::OS::CommitPageSize());
int commit_page_size = static_cast<int>(MemoryAllocator::GetCommitPageSize());
int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
commit_page_size) + 1;
return static_cast<size_t>(commit_page_size * page_count);
......
......@@ -767,6 +767,9 @@ DEFINE_BOOL(memory_reducer, true, "use memory reducer")
DEFINE_INT(heap_growing_percent, 0,
"specifies heap growing factor as (1 + heap_growing_percent/100)")
// spaces.cc
DEFINE_INT(v8_os_page_size, 0, "override OS page size (in KBytes)")
// execution.cc, messages.cc
DEFINE_BOOL(clear_exceptions_on_js_entry, false,
"clear pending exceptions when entering JavaScript")
......@@ -1019,7 +1022,6 @@ DEFINE_BOOL(collect_heap_spill_statistics, false,
"(requires heap_stats)")
DEFINE_BOOL(trace_live_bytes, false,
"trace incrementing and resetting of live bytes")
DEFINE_BOOL(trace_isolates, false, "trace isolate state changes")
// Regexp
......
......@@ -107,7 +107,7 @@ bool CodeRange::SetUp(size_t requested) {
}
const size_t reserved_area =
kReservedCodeRangePages * base::OS::CommitPageSize();
kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
if (requested < (kMaximalCodeRangeSize - reserved_area))
requested += reserved_area;
......@@ -548,9 +548,9 @@ bool MemoryChunk::CommitArea(size_t requested) {
IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
size_t header_size = area_start() - address() - guard_size;
size_t commit_size =
RoundUp(header_size + requested, base::OS::CommitPageSize());
RoundUp(header_size + requested, MemoryAllocator::GetCommitPageSize());
size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
base::OS::CommitPageSize());
MemoryAllocator::GetCommitPageSize());
if (commit_size > committed_size) {
// Commit size should be less or equal than the reserved size.
......@@ -618,8 +618,8 @@ void MemoryChunk::Unlink() {
}
void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize()));
DCHECK_EQ(0u, bytes_to_shrink % base::OS::CommitPageSize());
DCHECK_GE(bytes_to_shrink, static_cast<size_t>(GetCommitPageSize()));
DCHECK_EQ(0u, bytes_to_shrink % GetCommitPageSize());
Address free_start = chunk->area_end_ - bytes_to_shrink;
// Don't adjust the size of the page. The area is just uncomitted but not
// released.
......@@ -629,7 +629,7 @@ void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
if (chunk->reservation_.IsReserved())
chunk->reservation_.Guard(chunk->area_end_);
else
base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize());
base::OS::Guard(chunk->area_end_, GetCommitPageSize());
}
}
......@@ -678,7 +678,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
if (executable == EXECUTABLE) {
chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
base::OS::CommitPageSize()) +
GetCommitPageSize()) +
CodePageGuardSize();
// Check executable memory limit.
......@@ -690,7 +690,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
// Size of header (not executable) plus area (executable).
size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
base::OS::CommitPageSize());
GetCommitPageSize());
// Allocate executable memory either from code range or from the
// OS.
#ifdef V8_TARGET_ARCH_MIPS64
......@@ -726,10 +726,10 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
area_end = area_start + commit_area_size;
} else {
chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
base::OS::CommitPageSize());
GetCommitPageSize());
size_t commit_size =
RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
base::OS::CommitPageSize());
GetCommitPageSize());
base =
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
executable, &reservation);
......@@ -814,7 +814,7 @@ size_t Page::ShrinkToHighWaterMark() {
size_t unused = RoundDown(
static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
base::OS::CommitPageSize());
MemoryAllocator::GetCommitPageSize());
if (unused > 0) {
if (FLAG_trace_gc_verbose) {
PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
......@@ -1012,11 +1012,11 @@ void MemoryAllocator::ReportStatistics() {
size_t MemoryAllocator::CodePageGuardStartOffset() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
return RoundUp(Page::kObjectStartOffset, GetCommitPageSize());
}
size_t MemoryAllocator::CodePageGuardSize() {
return static_cast<int>(base::OS::CommitPageSize());
return static_cast<int>(GetCommitPageSize());
}
size_t MemoryAllocator::CodePageAreaStartOffset() {
......@@ -1028,7 +1028,16 @@ size_t MemoryAllocator::CodePageAreaStartOffset() {
size_t MemoryAllocator::CodePageAreaEndOffset() {
// We are guarding code pages: the last OS page will be protected as
// non-writable.
return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
return Page::kPageSize - static_cast<int>(GetCommitPageSize());
}
intptr_t MemoryAllocator::GetCommitPageSize() {
if (FLAG_v8_os_page_size != 0) {
DCHECK(base::bits::IsPowerOfTwo32(FLAG_v8_os_page_size));
return FLAG_v8_os_page_size * KB;
} else {
return base::OS::CommitPageSize();
}
}
......@@ -2893,7 +2902,7 @@ Address LargePage::GetAddressToShrink() {
return 0;
}
size_t used_size = RoundUp((object->address() - address()) + object->Size(),
base::OS::CommitPageSize());
MemoryAllocator::GetCommitPageSize());
if (used_size < CommittedPhysicalMemory()) {
return address() + used_size;
}
......
......@@ -1090,7 +1090,7 @@ class SkipList {
// ----------------------------------------------------------------------------
// A space acquires chunks of memory from the operating system. The memory
// allocator allocated and deallocates pages for the paged heap spaces and large
// allocator allocates and deallocates pages for the paged heap spaces and large
// pages for large object space.
class MemoryAllocator {
public:
......@@ -1209,6 +1209,8 @@ class MemoryAllocator {
: Page::kAllocatableMemory;
}
static intptr_t GetCommitPageSize();
explicit MemoryAllocator(Isolate* isolate);
// Initializes its internal bookkeeping structures.
......
......@@ -37,6 +37,7 @@
'v8_enable_inspector%': 0,
'mksnapshot_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
'mkpeephole_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mkpeephole<(EXECUTABLE_SUFFIX)',
'v8_os_page_size%': 0,
},
'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi', 'inspector/inspector.gypi'],
'targets': [
......@@ -306,6 +307,9 @@
['v8_vector_stores!=0', {
'mksnapshot_flags': ['--vector-stores'],
}],
['v8_os_page_size!=0', {
'mksnapshot_flags': ['--v8_os_page_size', '<(v8_os_page_size)'],
}],
],
},
'conditions': [
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment