Commit e4c90dc9 authored by ricow@chromium.org's avatar ricow@chromium.org

GC Cleanup + Set max old generation size to 700MB on ia32 and max executable...

GC Cleanup + Set max old generation size to 700MB on ia32 and max executable size to 128 MB (on ia32)
Review URL: http://codereview.chromium.org/7993003

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9406 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 74142296
......@@ -81,8 +81,8 @@ Heap::Heap()
reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
initial_semispace_size_(Max(LUMP_OF_MEMORY, Page::kPageSize)),
max_old_generation_size_(1400ul * LUMP_OF_MEMORY),
max_executable_size_(256l * LUMP_OF_MEMORY),
max_old_generation_size_(700ul * LUMP_OF_MEMORY),
max_executable_size_(128l * LUMP_OF_MEMORY),
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
......
......@@ -251,14 +251,13 @@ void Page::set_prev_page(Page* page) {
// not contain slow case logic (eg, move to the next page or try free list
// allocation) so it can be used by all the allocation functions and for all
// the paged spaces.
HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info,
int size_in_bytes) {
Address current_top = alloc_info->top;
HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
Address current_top = allocation_info_.top;
Address new_top = current_top + size_in_bytes;
if (new_top > alloc_info->limit) return NULL;
if (new_top > allocation_info_.limit) return NULL;
alloc_info->top = new_top;
ASSERT(alloc_info->VerifyPagedAllocation());
allocation_info_.top = new_top;
ASSERT(allocation_info_.VerifyPagedAllocation());
ASSERT(current_top != NULL);
return HeapObject::FromAddress(current_top);
}
......@@ -268,7 +267,7 @@ HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info,
MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
ASSERT(HasBeenSetup());
ASSERT_OBJECT_SIZE(size_in_bytes);
HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
HeapObject* object = AllocateLinearly(size_in_bytes);
if (object != NULL) {
if (identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
......
......@@ -766,15 +766,6 @@ void PagedSpace::Shrink() {
}
bool PagedSpace::EnsureCapacity(int capacity) {
while (Capacity() < capacity) {
// Expand the space until it has the required capacity or expansion fails.
if (!Expand()) return false;
}
return true;
}
#ifdef DEBUG
void PagedSpace::Print() { }
#endif
......
......@@ -1505,9 +1505,6 @@ class PagedSpace : public Space {
// Releases half of unused pages.
void Shrink();
// Ensures that the capacity is at least 'capacity'. Returns false on failure.
bool EnsureCapacity(int capacity);
// The dummy page that anchors the linked list of pages.
Page* anchor() { return &anchor_; }
......@@ -1626,8 +1623,7 @@ class PagedSpace : public Space {
// Generic fast case allocation function that tries linear allocation at the
// address denoted by top in allocation_info_.
inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
int size_in_bytes);
inline HeapObject* AllocateLinearly(int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent.
MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
......
......@@ -38,6 +38,9 @@ regress/regress-1119: FAIL
# NewGC: http://code.google.com/p/v8/issues/detail?id=1701
array-join: SKIP
# NewGC: BUG(1719) slow to collect arrays over several contexts.
regress/regress-524: SKIP
##############################################################################
# Too slow in debug mode with --stress-opt
compiler/regress-stacktrace-methods: PASS, SKIP if $mode == debug
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment