Commit c31454ac authored by danno@chromium.org's avatar danno@chromium.org

Use MemoryChunk-based allocation for deoptimization entry code

This is done by first committing the deoptimization entry code with a minimal
area size (OS::CommitPageSize) and later using CommitArea to adjust the size.

Committed: http://code.google.com/p/v8/source/detail?r=13494

Review URL: https://chromiumcodereview.appspot.com/11566011
Patch from Haitao Feng <haitao.feng@intel.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13532 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 905548d2
......@@ -44,8 +44,18 @@ DeoptimizerData::DeoptimizerData() {
eager_deoptimization_entry_code_entries_ = -1;
lazy_deoptimization_entry_code_entries_ = -1;
size_t deopt_table_size = Deoptimizer::GetMaxDeoptTableSize();
eager_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
lazy_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
MemoryAllocator* allocator = Isolate::Current()->memory_allocator();
size_t initial_commit_size = OS::CommitPageSize();
eager_deoptimization_entry_code_ =
allocator->AllocateChunk(deopt_table_size,
initial_commit_size,
EXECUTABLE,
NULL);
lazy_deoptimization_entry_code_ =
allocator->AllocateChunk(deopt_table_size,
initial_commit_size,
EXECUTABLE,
NULL);
current_ = NULL;
deoptimizing_code_list_ = NULL;
#ifdef ENABLE_DEBUGGER_SUPPORT
......@@ -55,9 +65,11 @@ DeoptimizerData::DeoptimizerData() {
DeoptimizerData::~DeoptimizerData() {
delete eager_deoptimization_entry_code_;
Isolate::Current()->memory_allocator()->Free(
eager_deoptimization_entry_code_);
eager_deoptimization_entry_code_ = NULL;
delete lazy_deoptimization_entry_code_;
Isolate::Current()->memory_allocator()->Free(
lazy_deoptimization_entry_code_);
lazy_deoptimization_entry_code_ = NULL;
DeoptimizingCodeListNode* current = deoptimizing_code_list_;
......@@ -619,7 +631,7 @@ Address Deoptimizer::GetDeoptimizationEntry(int id,
GetEntryMode mode) {
ASSERT(id >= 0);
if (id >= kMaxNumberOfEntries) return NULL;
VirtualMemory* base = NULL;
MemoryChunk* base = NULL;
if (mode == ENSURE_ENTRY_CODE) {
EnsureCodeForDeoptimizationEntry(type, id);
} else {
......@@ -631,28 +643,27 @@ Address Deoptimizer::GetDeoptimizationEntry(int id,
} else {
base = data->lazy_deoptimization_entry_code_;
}
return
static_cast<Address>(base->address()) + (id * table_entry_size_);
return base->area_start() + (id * table_entry_size_);
}
int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
VirtualMemory* base = NULL;
MemoryChunk* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
base = data->eager_deoptimization_entry_code_;
} else {
base = data->lazy_deoptimization_entry_code_;
}
Address base_casted = reinterpret_cast<Address>(base->address());
Address start = base->area_start();
if (base == NULL ||
addr < base->address() ||
addr >= base_casted + (kMaxNumberOfEntries * table_entry_size_)) {
addr < start ||
addr >= start + (kMaxNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry;
}
ASSERT_EQ(0,
static_cast<int>(addr - base_casted) % table_entry_size_);
return static_cast<int>(addr - base_casted) / table_entry_size_;
static_cast<int>(addr - start) % table_entry_size_);
return static_cast<int>(addr - start) / table_entry_size_;
}
......@@ -1571,14 +1582,14 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(BailoutType type,
CodeDesc desc;
masm.GetCode(&desc);
VirtualMemory* memory = type == EAGER
MemoryChunk* chunk = type == EAGER
? data->eager_deoptimization_entry_code_
: data->lazy_deoptimization_entry_code_;
size_t table_size = Deoptimizer::GetMaxDeoptTableSize();
ASSERT(static_cast<int>(table_size) >= desc.instr_size);
memory->Commit(memory->address(), table_size, true);
memcpy(memory->address(), desc.buffer, desc.instr_size);
CPU::FlushICache(memory->address(), desc.instr_size);
ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
desc.instr_size);
chunk->CommitArea(desc.instr_size);
memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
CPU::FlushICache(chunk->area_start(), desc.instr_size);
if (type == EAGER) {
data->eager_deoptimization_entry_code_entries_ = entry_count;
......
......@@ -113,8 +113,8 @@ class DeoptimizerData {
private:
int eager_deoptimization_entry_code_entries_;
int lazy_deoptimization_entry_code_entries_;
VirtualMemory* eager_deoptimization_entry_code_;
VirtualMemory* lazy_deoptimization_entry_code_;
MemoryChunk* eager_deoptimization_entry_code_;
MemoryChunk* lazy_deoptimization_entry_code_;
Deoptimizer* current_;
#ifdef ENABLE_DEBUGGER_SUPPORT
......
......@@ -1311,6 +1311,7 @@ class Isolate {
friend class StackGuard;
friend class ThreadId;
friend class TestMemoryAllocatorScope;
friend class TestCodeRangeScope;
friend class v8::Isolate;
friend class v8::Locker;
friend class v8::Unlocker;
......
This diff is collapsed.
......@@ -645,6 +645,7 @@ class MemoryChunk {
int area_size() {
return static_cast<int>(area_end() - area_start());
}
bool CommitArea(size_t requested);
// Approximate amount of physical memory committed for this chunk.
size_t CommittedPhysicalMemory() {
......@@ -887,8 +888,11 @@ class CodeRange {
// Allocates a chunk of memory from the large-object portion of
// the code range. On platforms with no separate code range, should
// not be called.
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested,
MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
const size_t commit_size,
size_t* allocated);
bool CommitRawMemory(Address start, size_t length);
bool UncommitRawMemory(Address start, size_t length);
void FreeRawMemory(Address buf, size_t length);
private:
......@@ -1036,14 +1040,19 @@ class MemoryAllocator {
void ReportStatistics();
#endif
MemoryChunk* AllocateChunk(intptr_t body_size,
// Returns a MemoryChunk in which the memory region from commit_area_size to
// reserve_area_size of the chunk area is reserved but not committed, it
// could be committed later by calling MemoryChunk::CommitArea.
MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
intptr_t commit_area_size,
Executability executable,
Space* space);
Address ReserveAlignedMemory(size_t requested,
size_t alignment,
VirtualMemory* controller);
Address AllocateAlignedMemory(size_t requested,
Address AllocateAlignedMemory(size_t reserve_size,
size_t commit_size,
size_t alignment,
Executability executable,
VirtualMemory* controller);
......@@ -1093,9 +1102,10 @@ class MemoryAllocator {
return CodePageAreaEndOffset() - CodePageAreaStartOffset();
}
MUST_USE_RESULT static bool CommitCodePage(VirtualMemory* vm,
Address start,
size_t size);
MUST_USE_RESULT static bool CommitExecutableMemory(VirtualMemory* vm,
Address start,
size_t commit_size,
size_t reserved_size);
private:
Isolate* isolate_;
......
......@@ -204,7 +204,9 @@ TEST(CodeRange) {
(Page::kMaxNonCodeHeapObjectSize << (Pseudorandom() % 3)) +
Pseudorandom() % 5000 + 1;
size_t allocated = 0;
Address base = code_range->AllocateRawMemory(requested, &allocated);
Address base = code_range->AllocateRawMemory(requested,
requested,
&allocated);
CHECK(base != NULL);
blocks.Add(Block(base, static_cast<int>(allocated)));
current_allocated += static_cast<int>(allocated);
......
......@@ -121,9 +121,148 @@ class TestMemoryAllocatorScope {
DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
};
// Temporarily sets a given code range in an isolate.
class TestCodeRangeScope {
public:
TestCodeRangeScope(Isolate* isolate, CodeRange* code_range)
: isolate_(isolate),
old_code_range_(isolate->code_range_) {
isolate->code_range_ = code_range;
}
~TestCodeRangeScope() {
isolate_->code_range_ = old_code_range_;
}
private:
Isolate* isolate_;
CodeRange* old_code_range_;
DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope);
};
} } // namespace v8::internal
static void VerifyMemoryChunk(Isolate* isolate,
Heap* heap,
CodeRange* code_range,
size_t reserve_area_size,
size_t commit_area_size,
size_t second_commit_area_size,
Executability executable) {
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
CHECK(memory_allocator->SetUp(heap->MaxReserved(),
heap->MaxExecutableSize()));
TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
TestCodeRangeScope test_code_range_scope(isolate, code_range);
size_t header_size = (executable == EXECUTABLE)
? MemoryAllocator::CodePageGuardStartOffset()
: MemoryChunk::kObjectStartOffset;
size_t guard_size = (executable == EXECUTABLE)
? MemoryAllocator::CodePageGuardSize()
: 0;
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(reserve_area_size,
commit_area_size,
executable,
NULL);
size_t alignment = code_range->exists() ?
MemoryChunk::kAlignment : OS::CommitPageSize();
size_t reserved_size = ((executable == EXECUTABLE))
? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
alignment)
: RoundUp(header_size + reserve_area_size, OS::CommitPageSize());
CHECK(memory_chunk->size() == reserved_size);
CHECK(memory_chunk->area_start() < memory_chunk->address() +
memory_chunk->size());
CHECK(memory_chunk->area_end() <= memory_chunk->address() +
memory_chunk->size());
CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
Address area_start = memory_chunk->area_start();
memory_chunk->CommitArea(second_commit_area_size);
CHECK(area_start == memory_chunk->area_start());
CHECK(memory_chunk->area_start() < memory_chunk->address() +
memory_chunk->size());
CHECK(memory_chunk->area_end() <= memory_chunk->address() +
memory_chunk->size());
CHECK(static_cast<size_t>(memory_chunk->area_size()) ==
second_commit_area_size);
memory_allocator->Free(memory_chunk);
memory_allocator->TearDown();
delete memory_allocator;
}
static unsigned int Pseudorandom() {
static uint32_t lo = 2345;
lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
return lo & 0xFFFFF;
}
TEST(MemoryChunk) {
OS::SetUp();
Isolate* isolate = Isolate::Current();
isolate->InitializeLoggingAndCounters();
Heap* heap = isolate->heap();
CHECK(heap->ConfigureHeapDefault());
size_t reserve_area_size = 1 * MB;
size_t initial_commit_area_size, second_commit_area_size;
for (int i = 0; i < 100; i++) {
initial_commit_area_size = Pseudorandom();
second_commit_area_size = Pseudorandom();
// With CodeRange.
CodeRange* code_range = new CodeRange(isolate);
const int code_range_size = 32 * MB;
if (!code_range->SetUp(code_range_size)) return;
VerifyMemoryChunk(isolate,
heap,
code_range,
reserve_area_size,
initial_commit_area_size,
second_commit_area_size,
EXECUTABLE);
VerifyMemoryChunk(isolate,
heap,
code_range,
reserve_area_size,
initial_commit_area_size,
second_commit_area_size,
NOT_EXECUTABLE);
delete code_range;
// Without CodeRange.
code_range = NULL;
VerifyMemoryChunk(isolate,
heap,
code_range,
reserve_area_size,
initial_commit_area_size,
second_commit_area_size,
EXECUTABLE);
VerifyMemoryChunk(isolate,
heap,
code_range,
reserve_area_size,
initial_commit_area_size,
second_commit_area_size,
NOT_EXECUTABLE);
}
}
TEST(MemoryAllocator) {
OS::SetUp();
Isolate* isolate = Isolate::Current();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment