Commit 16cf6c06 authored by Michael Lippautz's avatar Michael Lippautz Committed by Commit Bot

heap: Avoid allocating a code range when running in jitless mode

Jitless mode does not allocate executable memory, so we can avoid reserving
memory in such configurations.

Bug: v8:10033
Change-Id: Ie6a943084e3bade85848e3219cb4d8779ed34830
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1981505
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66388}
parent c4e944cb
......@@ -1014,7 +1014,7 @@ void ResourceConstraints::ConfigureDefaultsFromHeapSize(
set_initial_young_generation_size_in_bytes(young_generation);
set_initial_old_generation_size_in_bytes(old_generation);
}
if (i::kRequiresCodeRange) {
if (i::kPlatformRequiresCodeRange) {
set_code_range_size_in_bytes(
i::Min(i::kMaximalCodeRangeSize, maximum_heap_size_in_bytes));
}
......@@ -1029,7 +1029,7 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
set_max_young_generation_size_in_bytes(young_generation);
set_max_old_generation_size_in_bytes(old_generation);
if (virtual_memory_limit > 0 && i::kRequiresCodeRange) {
if (virtual_memory_limit > 0 && i::kPlatformRequiresCodeRange) {
set_code_range_size_in_bytes(
i::Min(i::kMaximalCodeRangeSize,
static_cast<size_t>(virtual_memory_limit / 8)));
......
......@@ -191,7 +191,7 @@ constexpr size_t kMaxWasmCodeSpaceSize = kMaxWasmCodeMemory;
constexpr int kSystemPointerSizeLog2 = 3;
constexpr intptr_t kIntptrSignBit =
static_cast<intptr_t>(uintptr_t{0x8000000000000000});
constexpr bool kRequiresCodeRange = true;
constexpr bool kPlatformRequiresCodeRange = true;
#if V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
constexpr size_t kMaximalCodeRangeSize = 512 * MB;
constexpr size_t kMinExpectedOSPageSize = 64 * KB; // OS page on PPC Linux
......@@ -213,17 +213,17 @@ constexpr size_t kReservedCodeRangePages = 0;
constexpr int kSystemPointerSizeLog2 = 2;
constexpr intptr_t kIntptrSignBit = 0x80000000;
#if V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
constexpr bool kRequiresCodeRange = false;
constexpr bool kPlatformRequiresCodeRange = false;
constexpr size_t kMaximalCodeRangeSize = 0 * MB;
constexpr size_t kMinimumCodeRangeSize = 0 * MB;
constexpr size_t kMinExpectedOSPageSize = 64 * KB; // OS page on PPC Linux
#elif V8_TARGET_ARCH_MIPS
constexpr bool kRequiresCodeRange = false;
constexpr bool kPlatformRequiresCodeRange = false;
constexpr size_t kMaximalCodeRangeSize = 2048LL * MB;
constexpr size_t kMinimumCodeRangeSize = 0 * MB;
constexpr size_t kMinExpectedOSPageSize = 4 * KB; // OS page.
#else
constexpr bool kRequiresCodeRange = false;
constexpr bool kPlatformRequiresCodeRange = false;
constexpr size_t kMaximalCodeRangeSize = 0 * MB;
constexpr size_t kMinimumCodeRangeSize = 0 * MB;
constexpr size_t kMinExpectedOSPageSize = 4 * KB; // OS page.
......
......@@ -2856,6 +2856,7 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
builtins_(this),
rail_mode_(PERFORMANCE_ANIMATION),
code_event_dispatcher_(new CodeEventDispatcher()),
jitless_(FLAG_jitless),
#if V8_SFI_HAS_UNIQUE_ID
next_unique_sfi_id_(0),
#endif
......@@ -2937,7 +2938,7 @@ void Isolate::Deinit() {
#if defined(V8_OS_WIN64)
if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
heap()->memory_allocator()) {
heap()->memory_allocator() && RequiresCodeRange()) {
const base::AddressRegion& code_range =
heap()->memory_allocator()->code_range();
void* start = reinterpret_cast<void*>(code_range.begin());
......@@ -4494,6 +4495,10 @@ void Isolate::AddCodeRange(Address begin, size_t length_in_bytes) {
MemoryRange{reinterpret_cast<void*>(begin), length_in_bytes});
}
bool Isolate::RequiresCodeRange() const {
return kPlatformRequiresCodeRange && !jitless_;
}
// |chunk| is either a Page or an executable LargePage.
void Isolate::RemoveCodeMemoryChunk(MemoryChunk* chunk) {
// We only keep track of individual code pages/allocations if we are on arm32,
......
......@@ -1522,6 +1522,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void RemoveCodeMemoryChunk(MemoryChunk* chunk);
void AddCodeRange(Address begin, size_t length_in_bytes);
bool RequiresCodeRange() const;
private:
explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator);
~Isolate();
......@@ -1774,6 +1776,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
bool force_slow_path_ = false;
bool jitless_ = false;
int next_optimization_id_ = 0;
#if V8_SFI_HAS_UNIQUE_ID
......
......@@ -413,7 +413,6 @@ bool Heap::HasBeenSetUp() {
return new_space_ != nullptr;
}
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
const char** reason) {
// Is global GC requested?
......
......@@ -156,7 +156,7 @@ void MemoryAllocator::InitializeCodePageAllocator(
code_page_allocator_ = page_allocator;
if (requested == 0) {
if (!kRequiresCodeRange) return;
if (!isolate_->RequiresCodeRange()) return;
// When a target requires the code range feature, we put all code objects
// in a kMaximalCodeRangeSize range of virtual address space, so that
// they can call each other with near calls.
......@@ -173,7 +173,7 @@ void MemoryAllocator::InitializeCodePageAllocator(
// alignments is not supported (requires re-implementation).
DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
}
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
DCHECK(!isolate_->RequiresCodeRange() || requested <= kMaximalCodeRangeSize);
Address hint =
RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
......
......@@ -494,6 +494,7 @@
# Tests that generate code at runtime.
'codegen-tester/*': [SKIP],
'serializer-tester/*': [SKIP],
'test-accessor-assembler/*': [SKIP],
'test-assembler-*': [SKIP],
'test-basic-block-profiler/*': [SKIP],
......@@ -502,7 +503,10 @@
'test-code-generator/*': [SKIP],
'test-code-pages/*': [SKIP],
'test-code-stub-assembler/*': [SKIP],
'test-debug-helper/GetObjectProperties': [SKIP],
'test-disasm-x64/DisasmX64': [SKIP],
'test-js-context-specialization/*': [SKIP],
'test-macro-assembler-x64/EmbeddedObj': [SKIP],
'test-multiple-return/*': [SKIP],
'test-regexp/MacroAssemblernativeAtStart': [SKIP],
'test-regexp/MacroAssemblerNativeBackReferenceLATIN1': [SKIP],
......@@ -533,8 +537,11 @@
'test-run-tail-calls/*': [SKIP],
'test-run-unwinding-info/*': [SKIP],
'test-run-variables/*': [SKIP],
'test-serialize/*': [SKIP],
'test-torque/*': [SKIP],
'test-macro-assembler-x64/EmbeddedObj': [SKIP],
'test-unwinder/PCIsInV8_InCodeOrEmbeddedRange': [SKIP],
'test-unwinder/PCIsInV8_LargeCodeObject': [SKIP],
'test-unwinder-code-pages/PCIsInV8_LargeCodeObject_CodePagesAPI': [SKIP],
# Field representation tracking is disabled in jitless mode.
'test-field-type-tracking/*': [SKIP],
......
......@@ -6926,6 +6926,13 @@ UNINITIALIZED_TEST(HeapLimit) {
isolate->Dispose();
}
TEST(NoCodeRangeInJitlessMode) {
if (!FLAG_jitless) return;
CcTest::InitializeVM();
CHECK(
CcTest::i_isolate()->heap()->memory_allocator()->code_range().is_empty());
}
} // namespace heap
} // namespace internal
} // namespace v8
......
......@@ -21,10 +21,8 @@ namespace test_code_pages {
// 2 - Have code pages. ARM32 only
// 3 - Nothing - This feature does not work on other platforms.
#if defined(V8_TARGET_ARCH_ARM)
static const bool kHaveCodeRange = false;
static const bool kHaveCodePages = true;
#else
static const bool kHaveCodeRange = kRequiresCodeRange;
static const bool kHaveCodePages = false;
#endif // defined(V8_TARGET_ARCH_ARM)
......@@ -86,11 +84,10 @@ bool PagesContainsAddress(std::vector<MemoryRange>* pages,
} // namespace
TEST(CodeRangeCorrectContents) {
if (!kHaveCodeRange) return;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
if (!i_isolate->RequiresCodeRange()) return;
std::vector<MemoryRange>* pages = i_isolate->GetCodePages();
......@@ -128,12 +125,12 @@ TEST(CodePagesCorrectContents) {
}
TEST(OptimizedCodeWithCodeRange) {
if (!kHaveCodeRange) return;
FLAG_allow_natives_syntax = true;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
if (!i_isolate->RequiresCodeRange()) return;
HandleScope scope(i_isolate);
std::string foo_str = getFooCode(1);
......@@ -255,7 +252,6 @@ TEST(OptimizedCodeWithCodePages) {
}
TEST(LargeCodeObject) {
if (!kHaveCodeRange && !kHaveCodePages) return;
// We don't want incremental marking to start which could cause the code to
// not be collected on the CollectGarbage() call.
ManualGCScope manual_gc_scope;
......@@ -263,6 +259,7 @@ TEST(LargeCodeObject) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
if (!i_isolate->RequiresCodeRange() && !kHaveCodePages) return;
// Create a big function that ends up in CODE_LO_SPACE.
const int instruction_size = Page::kPageSize + 1;
......@@ -290,7 +287,7 @@ TEST(LargeCodeObject) {
std::vector<MemoryRange>* pages = i_isolate->GetCodePages();
if (kHaveCodeRange) {
if (i_isolate->RequiresCodeRange()) {
CHECK(PagesContainsAddress(pages, foo_code->address()));
} else {
CHECK(PagesHasExactPage(pages, foo_code->address()));
......@@ -371,7 +368,6 @@ class SamplingThread : public base::Thread {
};
TEST(LargeCodeObjectWithSignalHandler) {
if (!kHaveCodeRange && !kHaveCodePages) return;
// We don't want incremental marking to start which could cause the code to
// not be collected on the CollectGarbage() call.
ManualGCScope manual_gc_scope;
......@@ -379,6 +375,7 @@ TEST(LargeCodeObjectWithSignalHandler) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
if (!i_isolate->RequiresCodeRange() && !kHaveCodePages) return;
// Create a big function that ends up in CODE_LO_SPACE.
const int instruction_size = Page::kPageSize + 1;
......@@ -421,7 +418,7 @@ TEST(LargeCodeObjectWithSignalHandler) {
// Check that the page was added.
std::vector<MemoryRange> pages =
SamplingThread::DoSynchronousSample(isolate);
if (kHaveCodeRange) {
if (i_isolate->RequiresCodeRange()) {
CHECK(PagesContainsAddress(&pages, foo_code->address()));
} else {
CHECK(PagesHasExactPage(&pages, foo_code->address()));
......@@ -447,7 +444,6 @@ TEST(LargeCodeObjectWithSignalHandler) {
}
TEST(Sorted) {
if (!kHaveCodeRange && !kHaveCodePages) return;
// We don't want incremental marking to start which could cause the code to
// not be collected on the CollectGarbage() call.
ManualGCScope manual_gc_scope;
......@@ -455,6 +451,7 @@ TEST(Sorted) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
if (!i_isolate->RequiresCodeRange() && !kHaveCodePages) return;
// Create a big function that ends up in CODE_LO_SPACE.
const int instruction_size = Page::kPageSize + 1;
......@@ -507,7 +504,7 @@ TEST(Sorted) {
// Check that the pages were added.
std::vector<MemoryRange> pages =
SamplingThread::DoSynchronousSample(isolate);
if (kHaveCodeRange) {
if (i_isolate->RequiresCodeRange()) {
CHECK_EQ(pages.size(), initial_num_pages);
} else {
CHECK_EQ(pages.size(), initial_num_pages + 3);
......@@ -528,7 +525,7 @@ TEST(Sorted) {
std::vector<MemoryRange> pages =
SamplingThread::DoSynchronousSample(isolate);
if (kHaveCodeRange) {
if (i_isolate->RequiresCodeRange()) {
CHECK_EQ(pages.size(), initial_num_pages);
} else {
CHECK_EQ(pages.size(), initial_num_pages + 2);
......
......@@ -20,7 +20,8 @@ namespace {
// This needs to be large enough to create a new nosnap Isolate, but smaller
// than kMaximalCodeRangeSize so we can recover from the OOM.
constexpr int kInstructionSize = 100 * MB;
STATIC_ASSERT(kInstructionSize < kMaximalCodeRangeSize || !kRequiresCodeRange);
STATIC_ASSERT(kInstructionSize < kMaximalCodeRangeSize ||
!kPlatformRequiresCodeRange);
size_t NearHeapLimitCallback(void* raw_bool, size_t current_heap_limit,
size_t initial_heap_limit) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment