Commit b114df1d authored by mlippautz's avatar mlippautz Committed by Commit bot

[heap] Bundle platform-specific constants for CodeRange

- Removing null checks as we always have the CodeRange object (it might be
  invalid, but it's there)
- Account for reserved area (Win64) in SetUp so we the caller doesn't need to be
  aware of it

R=ulan@chromium.org

Review-Url: https://codereview.chromium.org/1991253002
Cr-Commit-Position: refs/heads/master@{#36377}
parent 6254a4e3
......@@ -1418,8 +1418,7 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc,
int obj_size = Code::SizeFor(body_size);
Handle<Code> code = NewCodeRaw(obj_size, immovable);
DCHECK(isolate()->heap()->memory_allocator()->code_range() == NULL ||
!isolate()->heap()->memory_allocator()->code_range()->valid() ||
DCHECK(!isolate()->heap()->memory_allocator()->code_range()->valid() ||
isolate()->heap()->memory_allocator()->code_range()->contains(
code->address()) ||
obj_size <= isolate()->heap()->code_space()->AreaSize());
......
......@@ -150,8 +150,10 @@ const bool kRequiresCodeRange = true;
// encoded immediate, the addresses have to be in range of 256MB aligned
// region. Used only for large object space.
const size_t kMaximalCodeRangeSize = 256 * MB;
const size_t kCodeRangeAreaAlignment = 256 * MB;
#else
const size_t kMaximalCodeRangeSize = 512 * MB;
const size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
#endif
#if V8_OS_WIN
const size_t kMinimumCodeRangeSize = 4 * MB;
......@@ -170,11 +172,13 @@ const bool kRequiresCodeRange = true;
const size_t kMaximalCodeRangeSize = 256 * MB;
const size_t kMinimumCodeRangeSize = 3 * MB;
const size_t kReservedCodeRangePages = 0;
const size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
#else
const bool kRequiresCodeRange = false;
const size_t kMaximalCodeRangeSize = 0 * MB;
const size_t kMinimumCodeRangeSize = 0 * MB;
const size_t kReservedCodeRangePages = 0;
const size_t kCodeRangeAreaAlignment = 4 * KB; // OS page.
#endif
#endif
......
......@@ -3329,8 +3329,7 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
result->set_map_no_write_barrier(code_map());
Code* code = Code::cast(result);
DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
DCHECK(memory_allocator()->code_range() == NULL ||
!memory_allocator()->code_range()->valid() ||
DCHECK(!memory_allocator()->code_range()->valid() ||
memory_allocator()->code_range()->contains(code->address()) ||
object_size <= code_space()->AreaSize());
code->set_gc_metadata(Smi::FromInt(0));
......@@ -3356,8 +3355,7 @@ AllocationResult Heap::CopyCode(Code* code) {
// Relocate the copy.
DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
DCHECK(memory_allocator()->code_range() == NULL ||
!memory_allocator()->code_range()->valid() ||
DCHECK(!memory_allocator()->code_range()->valid() ||
memory_allocator()->code_range()->contains(code->address()) ||
obj_size <= code_space()->AreaSize());
new_code->Relocate(new_addr - old_addr);
......@@ -3426,8 +3424,7 @@ AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
// Relocate the copy.
DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
DCHECK(memory_allocator()->code_range() == NULL ||
!memory_allocator()->code_range()->valid() ||
DCHECK(!memory_allocator()->code_range()->valid() ||
memory_allocator()->code_range()->contains(code->address()) ||
new_obj_size <= code_space()->AreaSize());
......
......@@ -119,15 +119,16 @@ bool CodeRange::SetUp(size_t requested) {
requested = kMinimumCodeRangeSize;
}
const size_t reserved_area =
kReservedCodeRangePages * base::OS::CommitPageSize();
if (requested < (kMaximalCodeRangeSize - reserved_area))
requested += reserved_area;
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
#ifdef V8_TARGET_ARCH_MIPS64
// To use pseudo-relative jumps such as j/jal instructions which have 28-bit
// encoded immediate, the addresses have to be in range of 256Mb aligned
// region.
code_range_ = new base::VirtualMemory(requested, kMaximalCodeRangeSize);
#else
code_range_ = new base::VirtualMemory(requested);
#endif
code_range_ = new base::VirtualMemory(
requested, Max(kCodeRangeAreaAlignment,
static_cast<size_t>(base::OS::AllocateAlignment())));
CHECK(code_range_ != NULL);
if (!code_range_->IsReserved()) {
delete code_range_;
......@@ -141,18 +142,16 @@ bool CodeRange::SetUp(size_t requested) {
// On some platforms, specifically Win64, we need to reserve some pages at
// the beginning of an executable space.
if (kReservedCodeRangePages) {
if (!code_range_->Commit(
base, kReservedCodeRangePages * base::OS::CommitPageSize(), true)) {
if (reserved_area > 0) {
if (!code_range_->Commit(base, reserved_area, true)) {
delete code_range_;
code_range_ = NULL;
return false;
}
base += kReservedCodeRangePages * base::OS::CommitPageSize();
base += reserved_area;
}
Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
size_t size = code_range_->size() - (aligned_base - base) -
kReservedCodeRangePages * base::OS::CommitPageSize();
size_t size = code_range_->size() - (aligned_base - base) - reserved_area;
allocation_list_.Add(FreeBlock(aligned_base, size));
current_allocation_block_index_ = 0;
......@@ -415,8 +414,8 @@ void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
// Code which is part of the code-range does not have its own VirtualMemory.
DCHECK(code_range() == NULL ||
!code_range()->contains(static_cast<Address>(reservation->address())));
DCHECK(executable == NOT_EXECUTABLE || code_range() == NULL ||
!code_range()->valid() || reservation->size() <= Page::kPageSize);
DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid() ||
reservation->size() <= Page::kPageSize);
reservation->Release();
}
......@@ -430,8 +429,7 @@ void MemoryAllocator::FreeMemory(Address base, size_t size,
DCHECK(executable == EXECUTABLE);
code_range()->FreeRawMemory(base, size);
} else {
DCHECK(executable == NOT_EXECUTABLE || code_range() == NULL ||
!code_range()->valid());
DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
bool result = base::VirtualMemory::ReleaseRegion(base, size);
USE(result);
DCHECK(result);
......@@ -561,8 +559,7 @@ bool MemoryChunk::CommitArea(size_t requested) {
}
} else {
CodeRange* code_range = heap_->memory_allocator()->code_range();
DCHECK(code_range != NULL && code_range->valid() &&
IsFlagSet(IS_EXECUTABLE));
DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
if (!code_range->CommitRawMemory(start, length)) return false;
}
......@@ -578,8 +575,7 @@ bool MemoryChunk::CommitArea(size_t requested) {
if (!reservation_.Uncommit(start, length)) return false;
} else {
CodeRange* code_range = heap_->memory_allocator()->code_range();
DCHECK(code_range != NULL && code_range->valid() &&
IsFlagSet(IS_EXECUTABLE));
DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
if (!code_range->UncommitRawMemory(start, length)) return false;
}
}
......@@ -673,10 +669,9 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
#ifdef V8_TARGET_ARCH_MIPS64
// Use code range only for large object space on mips64 to keep address
// range within 256-MB memory region.
if (code_range() != NULL && code_range()->valid() &&
reserve_area_size > CodePageAreaSize()) {
if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) {
#else
if (code_range() != NULL && code_range()->valid()) {
if (code_range()->valid()) {
#endif
base =
code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
......
......@@ -1154,15 +1154,6 @@ class CodeRange {
void FreeRawMemory(Address buf, size_t length);
private:
// Frees the range of virtual memory, and frees the data structures used to
// manage it.
void TearDown();
Isolate* isolate_;
// The reserved range of virtual memory that all code objects are put in.
base::VirtualMemory* code_range_;
// Plain old data class, just a struct plus a constructor.
class FreeBlock {
public:
FreeBlock() : start(0), size(0) {}
......@@ -1181,6 +1172,26 @@ class CodeRange {
size_t size;
};
// Frees the range of virtual memory, and frees the data structures used to
// manage it.
void TearDown();
// Finds a block on the allocation list that contains at least the
// requested amount of memory. If none is found, sorts and merges
// the existing free memory blocks, and searches again.
// If none can be found, returns false.
bool GetNextAllocationBlock(size_t requested);
// Compares the start addresses of two free blocks.
static int CompareFreeBlockAddress(const FreeBlock* left,
const FreeBlock* right);
bool ReserveBlock(const size_t requested_size, FreeBlock* block);
void ReleaseBlock(const FreeBlock* block);
Isolate* isolate_;
// The reserved range of virtual memory that all code objects are put in.
base::VirtualMemory* code_range_;
// The global mutex guards free_list_ and allocation_list_ as GC threads may
// access both lists concurrently to the main thread.
base::Mutex code_range_mutex_;
......@@ -1195,17 +1206,6 @@ class CodeRange {
List<FreeBlock> allocation_list_;
int current_allocation_block_index_;
// Finds a block on the allocation list that contains at least the
// requested amount of memory. If none is found, sorts and merges
// the existing free memory blocks, and searches again.
// If none can be found, returns false.
bool GetNextAllocationBlock(size_t requested);
// Compares the start addresses of two free blocks.
static int CompareFreeBlockAddress(const FreeBlock* left,
const FreeBlock* right);
bool ReserveBlock(const size_t requested_size, FreeBlock* block);
void ReleaseBlock(const FreeBlock* block);
DISALLOW_COPY_AND_ASSIGN(CodeRange);
};
......
......@@ -211,8 +211,7 @@ TEST(CodeRange) {
const size_t code_range_size = 32*MB;
CcTest::InitializeVM();
CodeRange code_range(reinterpret_cast<Isolate*>(CcTest::isolate()));
code_range.SetUp(code_range_size +
kReservedCodeRangePages * v8::base::OS::CommitPageSize());
code_range.SetUp(code_range_size);
size_t current_allocated = 0;
size_t total_allocated = 0;
List< ::Block> blocks(1000);
......
......@@ -203,36 +203,31 @@ static void VerifyMemoryChunk(Isolate* isolate,
TEST(Regress3540) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
const int pageSize = Page::kPageSize;
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
0));
TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
CodeRange* code_range = new CodeRange(isolate);
const size_t code_range_size = 4 * pageSize;
if (!code_range->SetUp(
code_range_size +
RoundUp(v8::base::OS::CommitPageSize() * kReservedCodeRangePages,
MemoryChunk::kAlignment) +
v8::internal::MemoryAllocator::CodePageAreaSize())) {
const size_t code_range_size = 4 * Page::kPageSize;
if (!code_range->SetUp(code_range_size)) {
return;
}
Address address;
size_t size;
size_t request_size = code_range_size - 2 * pageSize;
size_t request_size = code_range_size - 2 * Page::kPageSize;
address = code_range->AllocateRawMemory(
request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
&size);
CHECK(address != NULL);
CHECK_NOT_NULL(address);
Address null_address;
size_t null_size;
request_size = code_range_size - pageSize;
request_size = code_range_size - Page::kPageSize;
null_address = code_range->AllocateRawMemory(
request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
&null_size);
CHECK(null_address == NULL);
CHECK_NULL(null_address);
code_range->FreeRawMemory(address, size);
delete code_range;
......@@ -281,8 +276,8 @@ TEST(MemoryChunk) {
NOT_EXECUTABLE);
delete code_range;
// Without CodeRange.
code_range = NULL;
// Without a valid CodeRange, i.e., omitting SetUp.
code_range = new CodeRange(isolate);
VerifyMemoryChunk(isolate,
heap,
code_range,
......@@ -298,6 +293,7 @@ TEST(MemoryChunk) {
initial_commit_area_size,
second_commit_area_size,
NOT_EXECUTABLE);
delete code_range;
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment