Commit 23883b85 authored by Michael Starzinger's avatar Michael Starzinger Committed by Commit Bot

[deoptimizer] Turn deopt entries into immovable Code objects.

This turns the deoptimization entries from free-floating memory chunks
that were not considered part of the heap into true {Code} objects. By
marking them as immovable we get the same guarantees without the need
for side-stepping heap API methods.

R=jarin@chromium.org
BUG=v8:6792

Change-Id: I88e1795e52fb586f7ca960d08cd6d9d082f4df9b
Reviewed-on: https://chromium-review.googlesource.com/756851Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Commit-Queue: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#49257}
parent a9c908eb
...@@ -1968,6 +1968,7 @@ Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info) { ...@@ -1968,6 +1968,7 @@ Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info) {
PipelineImpl pipeline(&data); PipelineImpl pipeline(&data);
Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info)); Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(info->isolate());
if (!pipeline.CreateGraph()) return Handle<Code>::null(); if (!pipeline.CreateGraph()) return Handle<Code>::null();
if (!pipeline.OptimizeGraph(&linkage)) return Handle<Code>::null(); if (!pipeline.OptimizeGraph(&linkage)) return Handle<Code>::null();
......
...@@ -23,32 +23,23 @@ ...@@ -23,32 +23,23 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) { DeoptimizerData::DeoptimizerData(Heap* heap) : heap_(heap), current_(nullptr) {
MemoryChunk* chunk = allocator->AllocateChunk(
Deoptimizer::GetMaxDeoptTableSize(), MemoryAllocator::GetCommitPageSize(),
EXECUTABLE, nullptr);
if (FLAG_write_protect_code_memory) {
// TODO(hpayer): Ensure code memory chunk allocation gives us rx by default.
chunk->SetReadAndWritable();
chunk->SetReadAndExecutable();
}
return chunk;
}
DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator)
: allocator_(allocator), current_(nullptr) {
for (int i = 0; i <= Deoptimizer::kLastBailoutType; ++i) { for (int i = 0; i <= Deoptimizer::kLastBailoutType; ++i) {
deopt_entry_code_entries_[i] = -1; deopt_entry_code_[i] = nullptr;
deopt_entry_code_[i] = AllocateCodeChunk(allocator);
} }
Code** start = &deopt_entry_code_[0];
Code** end = &deopt_entry_code_[Deoptimizer::kLastBailoutType + 1];
heap_->RegisterStrongRoots(reinterpret_cast<Object**>(start),
reinterpret_cast<Object**>(end));
} }
DeoptimizerData::~DeoptimizerData() { DeoptimizerData::~DeoptimizerData() {
for (int i = 0; i <= Deoptimizer::kLastBailoutType; ++i) { for (int i = 0; i <= Deoptimizer::kLastBailoutType; ++i) {
allocator_->Free<MemoryAllocator::kFull>(deopt_entry_code_[i]);
deopt_entry_code_[i] = nullptr; deopt_entry_code_[i] = nullptr;
} }
Code** start = &deopt_entry_code_[0];
heap_->UnregisterStrongRoots(reinterpret_cast<Object**>(start));
} }
...@@ -85,20 +76,6 @@ Deoptimizer* Deoptimizer::New(JSFunction* function, ...@@ -85,20 +76,6 @@ Deoptimizer* Deoptimizer::New(JSFunction* function,
} }
// No larger than 2K on all platforms
static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB;
size_t Deoptimizer::GetMaxDeoptTableSize() {
int entries_size =
Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
int commit_page_size = static_cast<int>(MemoryAllocator::GetCommitPageSize());
int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
commit_page_size) + 1;
return static_cast<size_t>(commit_page_size * page_count);
}
Deoptimizer* Deoptimizer::Grab(Isolate* isolate) { Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
Deoptimizer* result = isolate->deoptimizer_data()->current_; Deoptimizer* result = isolate->deoptimizer_data()->current_;
CHECK_NOT_NULL(result); CHECK_NOT_NULL(result);
...@@ -521,22 +498,15 @@ void Deoptimizer::DeleteFrameDescriptions() { ...@@ -521,22 +498,15 @@ void Deoptimizer::DeleteFrameDescriptions() {
#endif // DEBUG #endif // DEBUG
} }
Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate, int id,
Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate, BailoutType type) {
int id,
BailoutType type,
GetEntryMode mode) {
CHECK_GE(id, 0); CHECK_GE(id, 0);
if (id >= kMaxNumberOfEntries) return nullptr; if (id >= kMaxNumberOfEntries) return nullptr;
if (mode == ENSURE_ENTRY_CODE) {
EnsureCodeForDeoptimizationEntry(isolate, type, id);
} else {
CHECK_EQ(mode, CALCULATE_ENTRY_ADDRESS);
}
DeoptimizerData* data = isolate->deoptimizer_data(); DeoptimizerData* data = isolate->deoptimizer_data();
CHECK_LE(type, kLastBailoutType); CHECK_LE(type, kLastBailoutType);
MemoryChunk* base = data->deopt_entry_code_[type]; CHECK_NOT_NULL(data->deopt_entry_code_[type]);
return base->area_start() + (id * table_entry_size_); Code* code = data->deopt_entry_code_[type];
return code->instruction_start() + (id * table_entry_size_);
} }
...@@ -544,8 +514,10 @@ int Deoptimizer::GetDeoptimizationId(Isolate* isolate, ...@@ -544,8 +514,10 @@ int Deoptimizer::GetDeoptimizationId(Isolate* isolate,
Address addr, Address addr,
BailoutType type) { BailoutType type) {
DeoptimizerData* data = isolate->deoptimizer_data(); DeoptimizerData* data = isolate->deoptimizer_data();
MemoryChunk* base = data->deopt_entry_code_[type]; CHECK_LE(type, kLastBailoutType);
Address start = base->area_start(); Code* code = data->deopt_entry_code_[type];
if (code == nullptr) return kNotDeoptimizationEntry;
Address start = code->instruction_start();
if (addr < start || if (addr < start ||
addr >= start + (kMaxNumberOfEntries * table_entry_size_)) { addr >= start + (kMaxNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry; return kNotDeoptimizationEntry;
...@@ -1806,51 +1778,33 @@ unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo* shared) { ...@@ -1806,51 +1778,33 @@ unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo* shared) {
} }
void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate, void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
BailoutType type, BailoutType type) {
int max_entry_id) {
// We cannot run this if the serializer is enabled because this will
// cause us to emit relocation information for the external
// references. This is fine because the deoptimizer's code section
// isn't meant to be serialized at all.
CHECK(type == EAGER || type == SOFT || type == LAZY); CHECK(type == EAGER || type == SOFT || type == LAZY);
DeoptimizerData* data = isolate->deoptimizer_data(); DeoptimizerData* data = isolate->deoptimizer_data();
int entry_count = data->deopt_entry_code_entries_[type]; if (data->deopt_entry_code_[type] != nullptr) return;
if (max_entry_id < entry_count) return;
entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries);
while (max_entry_id >= entry_count) entry_count *= 2;
CHECK_LE(entry_count, Deoptimizer::kMaxNumberOfEntries);
MacroAssembler masm(isolate, nullptr, 16 * KB, CodeObjectRequired::kYes); MacroAssembler masm(isolate, nullptr, 16 * KB, CodeObjectRequired::kYes);
masm.set_emit_debug_code(false); masm.set_emit_debug_code(false);
GenerateDeoptimizationEntries(&masm, entry_count, type); GenerateDeoptimizationEntries(&masm, kMaxNumberOfEntries, type);
CodeDesc desc; CodeDesc desc;
masm.GetCode(isolate, &desc); masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc)); DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
MemoryChunk* chunk = data->deopt_entry_code_[type]; // Allocate the code as immovable since the entry addresses will be used
// directly and there is no support for relocating them.
// TODO(mstarzinger,6792): This code-space modification section should be Handle<Code> code = isolate->factory()->NewCode(
// moved into {Heap} eventually and a safe wrapper be provided. desc, Code::STUB, Handle<Object>(), MaybeHandle<HandlerTable>(),
CodePageMemoryModificationScope modification_scope( MaybeHandle<ByteArray>(), MaybeHandle<DeoptimizationData>(), true);
chunk, CodePageMemoryModificationScope::READ_WRITE); CHECK(isolate->heap()->IsImmovable(*code));
CHECK(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
desc.instr_size);
if (!chunk->CommitArea(desc.instr_size)) {
V8::FatalProcessOutOfMemory(
"Deoptimizer::EnsureCodeForDeoptimizationEntry");
}
CopyBytes(chunk->area_start(), desc.buffer,
static_cast<size_t>(desc.instr_size));
Assembler::FlushICache(isolate, chunk->area_start(), desc.instr_size);
data->deopt_entry_code_entries_[type] = entry_count; CHECK_NULL(data->deopt_entry_code_[type]);
data->deopt_entry_code_[type] = *code;
} }
void Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(Isolate* isolate) { void Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(Isolate* isolate) {
EnsureCodeForDeoptimizationEntry(isolate, EAGER, kMaxNumberOfEntries - 1); EnsureCodeForDeoptimizationEntry(isolate, EAGER);
EnsureCodeForDeoptimizationEntry(isolate, LAZY, kMaxNumberOfEntries - 1); EnsureCodeForDeoptimizationEntry(isolate, LAZY);
EnsureCodeForDeoptimizationEntry(isolate, SOFT, kMaxNumberOfEntries - 1); EnsureCodeForDeoptimizationEntry(isolate, SOFT);
} }
FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count) FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count)
......
...@@ -405,18 +405,8 @@ class Deoptimizer : public Malloced { ...@@ -405,18 +405,8 @@ class Deoptimizer : public Malloced {
static void ComputeOutputFrames(Deoptimizer* deoptimizer); static void ComputeOutputFrames(Deoptimizer* deoptimizer);
static Address GetDeoptimizationEntry(Isolate* isolate, int id,
enum GetEntryMode { BailoutType type);
CALCULATE_ENTRY_ADDRESS,
ENSURE_ENTRY_CODE
};
static Address GetDeoptimizationEntry(
Isolate* isolate,
int id,
BailoutType type,
GetEntryMode mode = ENSURE_ENTRY_CODE);
static int GetDeoptimizationId(Isolate* isolate, static int GetDeoptimizationId(Isolate* isolate,
Address addr, Address addr,
BailoutType type); BailoutType type);
...@@ -459,11 +449,8 @@ class Deoptimizer : public Malloced { ...@@ -459,11 +449,8 @@ class Deoptimizer : public Malloced {
int count_; int count_;
}; };
static size_t GetMaxDeoptTableSize();
static void EnsureCodeForDeoptimizationEntry(Isolate* isolate, static void EnsureCodeForDeoptimizationEntry(Isolate* isolate,
BailoutType type, BailoutType type);
int max_entry_id);
static void EnsureCodeForMaxDeoptimizationEntries(Isolate* isolate); static void EnsureCodeForMaxDeoptimizationEntries(Isolate* isolate);
Isolate* isolate() const { return isolate_; } Isolate* isolate() const { return isolate_; }
...@@ -770,13 +757,12 @@ class FrameDescription { ...@@ -770,13 +757,12 @@ class FrameDescription {
class DeoptimizerData { class DeoptimizerData {
public: public:
explicit DeoptimizerData(MemoryAllocator* allocator); explicit DeoptimizerData(Heap* heap);
~DeoptimizerData(); ~DeoptimizerData();
private: private:
MemoryAllocator* allocator_; Heap* heap_;
int deopt_entry_code_entries_[Deoptimizer::kLastBailoutType + 1]; Code* deopt_entry_code_[Deoptimizer::kLastBailoutType + 1];
MemoryChunk* deopt_entry_code_[Deoptimizer::kLastBailoutType + 1];
Deoptimizer* current_; Deoptimizer* current_;
......
...@@ -630,10 +630,7 @@ CodePageMemoryModificationScope::CodePageMemoryModificationScope( ...@@ -630,10 +630,7 @@ CodePageMemoryModificationScope::CodePageMemoryModificationScope(
scope_active_(FLAG_write_protect_code_memory && scope_active_(FLAG_write_protect_code_memory &&
chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) { chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
if (scope_active_) { if (scope_active_) {
// TODO(hpayer): owner() can only be null if we use the MemoryChunk outside DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
// of spaces. We actually should not do that and we should untangle this.
DCHECK(chunk_->owner() == nullptr ||
chunk_->owner()->identity() == CODE_SPACE ||
(chunk_->owner()->identity() == LO_SPACE && (chunk_->owner()->identity() == LO_SPACE &&
chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE))); chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)));
if (mode == READ_WRITE_EXECUTABLE) { if (mode == READ_WRITE_EXECUTABLE) {
......
...@@ -528,10 +528,7 @@ void MemoryChunk::InitializationMemoryFence() { ...@@ -528,10 +528,7 @@ void MemoryChunk::InitializationMemoryFence() {
void MemoryChunk::SetReadAndExecutable() { void MemoryChunk::SetReadAndExecutable() {
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE)); DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
// TODO(hpayer): owner() can only be null if we use the MemoryChunk outside DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE);
// of spaces. We actually should not do that and we should untangle this.
DCHECK(owner() == nullptr || owner()->identity() == CODE_SPACE ||
owner()->identity() == LO_SPACE);
// Decrementing the write_unprotect_counter_ and changing the page // Decrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic. // protection mode has to be atomic.
base::LockGuard<base::Mutex> guard(page_protection_change_mutex_); base::LockGuard<base::Mutex> guard(page_protection_change_mutex_);
...@@ -554,10 +551,7 @@ void MemoryChunk::SetReadAndExecutable() { ...@@ -554,10 +551,7 @@ void MemoryChunk::SetReadAndExecutable() {
void MemoryChunk::SetReadAndWritable() { void MemoryChunk::SetReadAndWritable() {
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE)); DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
// TODO(hpayer): owner() can only be null if we use the MemoryChunk outside DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE);
// of spaces. We actually should not do that and we should untangle this.
DCHECK(owner() == nullptr || owner()->identity() == CODE_SPACE ||
owner()->identity() == LO_SPACE);
// Incrementing the write_unprotect_counter_ and changing the page // Incrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic. // protection mode has to be atomic.
base::LockGuard<base::Mutex> guard(page_protection_change_mutex_); base::LockGuard<base::Mutex> guard(page_protection_change_mutex_);
...@@ -574,10 +568,7 @@ void MemoryChunk::SetReadAndWritable() { ...@@ -574,10 +568,7 @@ void MemoryChunk::SetReadAndWritable() {
void MemoryChunk::SetReadWriteAndExecutable() { void MemoryChunk::SetReadWriteAndExecutable() {
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE)); DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
// TODO(hpayer): owner() can only be null if we use the MemoryChunk outside DCHECK(owner()->identity() == CODE_SPACE || owner()->identity() == LO_SPACE);
// of spaces. We actually should not do that and we should untangle this.
DCHECK(owner() == nullptr || owner()->identity() == CODE_SPACE ||
owner()->identity() == LO_SPACE);
// Incrementing the write_unprotect_counter_ and changing the page // Incrementing the write_unprotect_counter_ and changing the page
// protection mode has to be atomic. // protection mode has to be atomic.
base::LockGuard<base::Mutex> guard(page_protection_change_mutex_); base::LockGuard<base::Mutex> guard(page_protection_change_mutex_);
......
...@@ -2823,7 +2823,7 @@ bool Isolate::Init(StartupDeserializer* des) { ...@@ -2823,7 +2823,7 @@ bool Isolate::Init(StartupDeserializer* des) {
INTERFACE_DESCRIPTOR_LIST(INTERFACE_DESCRIPTOR) INTERFACE_DESCRIPTOR_LIST(INTERFACE_DESCRIPTOR)
#undef INTERFACE_DESCRIPTOR #undef INTERFACE_DESCRIPTOR
deoptimizer_data_ = new DeoptimizerData(heap()->memory_allocator()); deoptimizer_data_ = new DeoptimizerData(heap());
const bool create_heap_objects = (des == nullptr); const bool create_heap_objects = (des == nullptr);
if (setup_delegate_ == nullptr) { if (setup_delegate_ == nullptr) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment