Commit ae0a7ec9 authored by ulan@chromium.org's avatar ulan@chromium.org

Use correct size of promoted space for setting promotion and allocation limits.

Review URL: https://chromiumcodereview.appspot.com/10376008

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@11513 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 11d24334
......@@ -805,7 +805,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
UpdateSurvivalRateTrend(start_new_space_size);
size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
if (high_survival_rate_during_scavenges &&
IsStableOrIncreasingSurvivalTrend()) {
......@@ -5810,16 +5810,6 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
}
intptr_t Heap::PromotedSpaceSize() {
return old_pointer_space_->Size()
+ old_data_space_->Size()
+ code_space_->Size()
+ map_space_->Size()
+ cell_space_->Size()
+ lo_space_->Size();
}
intptr_t Heap::PromotedSpaceSizeOfObjects() {
return old_pointer_space_->SizeOfObjects()
+ old_data_space_->SizeOfObjects()
......
......@@ -1342,7 +1342,7 @@ class Heap {
PretenureFlag pretenure);
inline intptr_t PromotedTotalSize() {
return PromotedSpaceSize() + PromotedExternalMemorySize();
return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
}
// True if we have reached the allocation limit in the old generation that
......@@ -1363,19 +1363,6 @@ class Heap {
static const intptr_t kMinimumAllocationLimit =
8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
// When we sweep lazily we initially guess that there is no garbage on the
// heap and set the limits for the next GC accordingly. As we sweep we find
// out that some of the pages contained garbage and we have to adjust
// downwards the size of the heap. This means the limits that control the
// timing of the next GC also need to be adjusted downwards.
void LowerOldGenLimits(intptr_t adjustment) {
size_of_old_gen_at_last_old_space_gc_ -= adjustment;
old_gen_promotion_limit_ =
OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
old_gen_allocation_limit_ =
OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
}
intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
const int divisor = FLAG_stress_compaction ? 10 : 3;
intptr_t limit =
......@@ -1468,7 +1455,7 @@ class Heap {
intptr_t adjusted_allocation_limit =
old_gen_allocation_limit_ - new_space_.Capacity() / 5;
if (PromotedSpaceSize() >= adjusted_allocation_limit) return true;
if (PromotedSpaceSizeOfObjects() >= adjusted_allocation_limit) return true;
return false;
}
......@@ -1506,7 +1493,6 @@ class Heap {
GCTracer* tracer() { return tracer_; }
// Returns the size of objects residing in non new spaces.
intptr_t PromotedSpaceSize();
intptr_t PromotedSpaceSizeOfObjects();
double total_regexp_code_generated() { return total_regexp_code_generated_; }
......
......@@ -100,7 +100,7 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
int64_t old_bytes_rescanned = bytes_rescanned_;
bytes_rescanned_ = old_bytes_rescanned + obj_size;
if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSize()) {
if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
// If we have queued twice the heap size for rescanning then we are
// going around in circles, scanning the same objects again and again
// as the program mutates the heap faster than we can incrementally
......
......@@ -951,7 +951,7 @@ void IncrementalMarking::ResetStepCounters() {
int64_t IncrementalMarking::SpaceLeftInOldSpace() {
return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize();
return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
}
} } // namespace v8::internal
......@@ -3829,7 +3829,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
bool lazy_sweeping_active = false;
bool unused_page_present = false;
intptr_t old_space_size = heap()->PromotedSpaceSize();
intptr_t old_space_size = heap()->PromotedSpaceSizeOfObjects();
intptr_t space_left =
Min(heap()->OldGenPromotionLimit(old_space_size),
heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
......
......@@ -2295,8 +2295,6 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
first_unswept_page_ = p;
}
heap()->LowerOldGenLimits(freed_bytes);
heap()->FreeQueuedChunks();
return IsSweepingComplete();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment