Commit 0fc9527a authored by danno@chromium.org's avatar danno@chromium.org

Decouple allocation and creation of deopt tables

This makes it possible to calculate the future address of a deopt entry before it is possible to generate the deopt table.

Review URL: https://codereview.chromium.org/11275145

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12877 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 85ac311a
...@@ -6501,6 +6501,7 @@ void Testing::PrepareStressRun(int run) { ...@@ -6501,6 +6501,7 @@ void Testing::PrepareStressRun(int run) {
void Testing::DeoptimizeAll() { void Testing::DeoptimizeAll() {
i::HandleScope scope;
internal::Deoptimizer::DeoptimizeAll(); internal::Deoptimizer::DeoptimizeAll();
} }
......
...@@ -41,8 +41,11 @@ namespace v8 { ...@@ -41,8 +41,11 @@ namespace v8 {
namespace internal { namespace internal {
DeoptimizerData::DeoptimizerData() { DeoptimizerData::DeoptimizerData() {
eager_deoptimization_entry_code_ = NULL; eager_deoptimization_entry_code_entries_ = -1;
lazy_deoptimization_entry_code_ = NULL; lazy_deoptimization_entry_code_entries_ = -1;
size_t deopt_table_size = Deoptimizer::GetMaxDeoptTableSize();
eager_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
lazy_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
current_ = NULL; current_ = NULL;
deoptimizing_code_list_ = NULL; deoptimizing_code_list_ = NULL;
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
...@@ -52,16 +55,11 @@ DeoptimizerData::DeoptimizerData() { ...@@ -52,16 +55,11 @@ DeoptimizerData::DeoptimizerData() {
DeoptimizerData::~DeoptimizerData() { DeoptimizerData::~DeoptimizerData() {
if (eager_deoptimization_entry_code_ != NULL) { delete eager_deoptimization_entry_code_;
Isolate::Current()->memory_allocator()->Free(
eager_deoptimization_entry_code_);
eager_deoptimization_entry_code_ = NULL; eager_deoptimization_entry_code_ = NULL;
} delete lazy_deoptimization_entry_code_;
if (lazy_deoptimization_entry_code_ != NULL) {
Isolate::Current()->memory_allocator()->Free(
lazy_deoptimization_entry_code_);
lazy_deoptimization_entry_code_ = NULL; lazy_deoptimization_entry_code_ = NULL;
}
DeoptimizingCodeListNode* current = deoptimizing_code_list_; DeoptimizingCodeListNode* current = deoptimizing_code_list_;
while (current != NULL) { while (current != NULL) {
DeoptimizingCodeListNode* prev = current; DeoptimizingCodeListNode* prev = current;
...@@ -103,6 +101,19 @@ Deoptimizer* Deoptimizer::New(JSFunction* function, ...@@ -103,6 +101,19 @@ Deoptimizer* Deoptimizer::New(JSFunction* function,
} }
// No larger than 2K on all platforms
static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB;
size_t Deoptimizer::GetMaxDeoptTableSize() {
size_t entries_size =
Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
OS::CommitPageSize()) + 1;
return OS::CommitPageSize() * page_count;
}
Deoptimizer* Deoptimizer::Grab(Isolate* isolate) { Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
ASSERT(isolate == Isolate::Current()); ASSERT(isolate == Isolate::Current());
Deoptimizer* result = isolate->deoptimizer_data()->current_; Deoptimizer* result = isolate->deoptimizer_data()->current_;
...@@ -461,44 +472,45 @@ void Deoptimizer::DeleteFrameDescriptions() { ...@@ -461,44 +472,45 @@ void Deoptimizer::DeleteFrameDescriptions() {
} }
Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) { Address Deoptimizer::GetDeoptimizationEntry(int id,
BailoutType type,
GetEntryMode mode) {
ASSERT(id >= 0); ASSERT(id >= 0);
if (id >= kNumberOfEntries) return NULL; if (id >= kMaxNumberOfEntries) return NULL;
MemoryChunk* base = NULL; VirtualMemory* base = NULL;
if (mode == ENSURE_ENTRY_CODE) {
EnsureCodeForDeoptimizationEntry(type, id);
} else {
ASSERT(mode == CALCULATE_ENTRY_ADDRESS);
}
DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) { if (type == EAGER) {
if (data->eager_deoptimization_entry_code_ == NULL) {
data->eager_deoptimization_entry_code_ = CreateCode(type);
}
base = data->eager_deoptimization_entry_code_; base = data->eager_deoptimization_entry_code_;
} else { } else {
if (data->lazy_deoptimization_entry_code_ == NULL) {
data->lazy_deoptimization_entry_code_ = CreateCode(type);
}
base = data->lazy_deoptimization_entry_code_; base = data->lazy_deoptimization_entry_code_;
} }
return return
static_cast<Address>(base->area_start()) + (id * table_entry_size_); static_cast<Address>(base->address()) + (id * table_entry_size_);
} }
int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) { int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
MemoryChunk* base = NULL; VirtualMemory* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) { if (type == EAGER) {
base = data->eager_deoptimization_entry_code_; base = data->eager_deoptimization_entry_code_;
} else { } else {
base = data->lazy_deoptimization_entry_code_; base = data->lazy_deoptimization_entry_code_;
} }
Address base_casted = reinterpret_cast<Address>(base->address());
if (base == NULL || if (base == NULL ||
addr < base->area_start() || addr < base->address() ||
addr >= base->area_start() + addr >= base_casted + (kMaxNumberOfEntries * table_entry_size_)) {
(kNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry; return kNotDeoptimizationEntry;
} }
ASSERT_EQ(0, ASSERT_EQ(0,
static_cast<int>(addr - base->area_start()) % table_entry_size_); static_cast<int>(addr - base_casted) % table_entry_size_);
return static_cast<int>(addr - base->area_start()) / table_entry_size_; return static_cast<int>(addr - base_casted) / table_entry_size_;
} }
...@@ -1384,31 +1396,44 @@ void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) { ...@@ -1384,31 +1396,44 @@ void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) {
} }
MemoryChunk* Deoptimizer::CreateCode(BailoutType type) { void Deoptimizer::EnsureCodeForDeoptimizationEntry(BailoutType type,
int max_entry_id) {
// We cannot run this if the serializer is enabled because this will // We cannot run this if the serializer is enabled because this will
// cause us to emit relocation information for the external // cause us to emit relocation information for the external
// references. This is fine because the deoptimizer's code section // references. This is fine because the deoptimizer's code section
// isn't meant to be serialized at all. // isn't meant to be serialized at all.
ASSERT(!Serializer::enabled()); ASSERT(!Serializer::enabled());
ASSERT(type == EAGER || type == LAZY);
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
int entry_count = (type == EAGER)
? data->eager_deoptimization_entry_code_entries_
: data->lazy_deoptimization_entry_code_entries_;
if (max_entry_id < entry_count) return;
entry_count = Min(Max(entry_count * 2, Deoptimizer::kMinNumberOfEntries),
Deoptimizer::kMaxNumberOfEntries);
MacroAssembler masm(Isolate::Current(), NULL, 16 * KB); MacroAssembler masm(Isolate::Current(), NULL, 16 * KB);
masm.set_emit_debug_code(false); masm.set_emit_debug_code(false);
GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type); GenerateDeoptimizationEntries(&masm, entry_count, type);
CodeDesc desc; CodeDesc desc;
masm.GetCode(&desc); masm.GetCode(&desc);
ASSERT(desc.reloc_size == 0); ASSERT(desc.reloc_size == 0);
MemoryChunk* chunk = VirtualMemory* memory = type == EAGER
Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size, ? data->eager_deoptimization_entry_code_
EXECUTABLE, : data->lazy_deoptimization_entry_code_;
NULL); size_t table_size = Deoptimizer::GetMaxDeoptTableSize();
ASSERT(chunk->area_size() >= desc.instr_size); ASSERT(static_cast<int>(table_size) >= desc.instr_size);
if (chunk == NULL) { memory->Commit(memory->address(), table_size, true);
V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table"); memcpy(memory->address(), desc.buffer, desc.instr_size);
CPU::FlushICache(memory->address(), desc.instr_size);
if (type == EAGER) {
data->eager_deoptimization_entry_code_entries_ = entry_count;
} else {
data->lazy_deoptimization_entry_code_entries_ = entry_count;
} }
memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
CPU::FlushICache(chunk->area_start(), desc.instr_size);
return chunk;
} }
......
...@@ -100,8 +100,10 @@ class DeoptimizerData { ...@@ -100,8 +100,10 @@ class DeoptimizerData {
#endif #endif
private: private:
MemoryChunk* eager_deoptimization_entry_code_; int eager_deoptimization_entry_code_entries_;
MemoryChunk* lazy_deoptimization_entry_code_; int lazy_deoptimization_entry_code_entries_;
VirtualMemory* eager_deoptimization_entry_code_;
VirtualMemory* lazy_deoptimization_entry_code_;
Deoptimizer* current_; Deoptimizer* current_;
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
...@@ -226,7 +228,17 @@ class Deoptimizer : public Malloced { ...@@ -226,7 +228,17 @@ class Deoptimizer : public Malloced {
static void ComputeOutputFrames(Deoptimizer* deoptimizer); static void ComputeOutputFrames(Deoptimizer* deoptimizer);
static Address GetDeoptimizationEntry(int id, BailoutType type);
enum GetEntryMode {
CALCULATE_ENTRY_ADDRESS,
ENSURE_ENTRY_CODE
};
static Address GetDeoptimizationEntry(
int id,
BailoutType type,
GetEntryMode mode = ENSURE_ENTRY_CODE);
static int GetDeoptimizationId(Address addr, BailoutType type); static int GetDeoptimizationId(Address addr, BailoutType type);
static int GetOutputInfo(DeoptimizationOutputData* data, static int GetOutputInfo(DeoptimizationOutputData* data,
BailoutId node_id, BailoutId node_id,
...@@ -283,8 +295,11 @@ class Deoptimizer : public Malloced { ...@@ -283,8 +295,11 @@ class Deoptimizer : public Malloced {
int ConvertJSFrameIndexToFrameIndex(int jsframe_index); int ConvertJSFrameIndexToFrameIndex(int jsframe_index);
static size_t GetMaxDeoptTableSize();
private: private:
static const int kNumberOfEntries = 16384; static const int kMinNumberOfEntries = 64;
static const int kMaxNumberOfEntries = 16384;
Deoptimizer(Isolate* isolate, Deoptimizer(Isolate* isolate,
JSFunction* function, JSFunction* function,
...@@ -327,7 +342,8 @@ class Deoptimizer : public Malloced { ...@@ -327,7 +342,8 @@ class Deoptimizer : public Malloced {
void AddArgumentsObjectValue(intptr_t value); void AddArgumentsObjectValue(intptr_t value);
void AddDoubleValue(intptr_t slot_address, double value); void AddDoubleValue(intptr_t slot_address, double value);
static MemoryChunk* CreateCode(BailoutType type); static void EnsureCodeForDeoptimizationEntry(BailoutType type,
int max_entry_id);
static void GenerateDeoptimizationEntries( static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type); MacroAssembler* masm, int count, BailoutType type);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment