Commit a0dadc8b authored by hpayer@chromium.org's avatar hpayer@chromium.org

Avoid scan-on-scavenge pages during full collection.

BUG=
R=bmeurer@chromium.org

Review URL: https://codereview.chromium.org/372193002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22267 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent c393b9a5
......@@ -404,7 +404,7 @@ void Heap::ReportStatisticsAfterGC() {
}
void Heap::GarbageCollectionPrologue() {
void Heap::GarbageCollectionPrologue(GarbageCollector collector) {
{ AllowHeapAllocation for_the_first_part_of_prologue;
ClearJSFunctionResultCaches();
gc_count_++;
......@@ -435,7 +435,7 @@ void Heap::GarbageCollectionPrologue() {
ReportStatisticsBeforeGC();
#endif // DEBUG
store_buffer()->GCPrologue();
store_buffer()->GCPrologue(collector == MARK_COMPACTOR);
if (isolate()->concurrent_osr_enabled()) {
isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
......@@ -833,7 +833,7 @@ bool Heap::CollectGarbage(GarbageCollector collector,
{ GCTracer tracer(this, gc_reason, collector_reason);
ASSERT(AllowHeapAllocation::IsAllowed());
DisallowHeapAllocation no_allocation_during_gc;
GarbageCollectionPrologue();
GarbageCollectionPrologue(collector);
// The GC count was incremented in the prologue. Tell the tracer about
// it.
tracer.set_gc_count(gc_count_);
......
......@@ -1698,7 +1698,7 @@ class Heap {
// Code that should be run before and after each GC. Includes some
// reporting/verification activities when compiled with DEBUG set.
void GarbageCollectionPrologue();
void GarbageCollectionPrologue(GarbageCollector collector);
void GarbageCollectionEpilogue();
// Pretenuring decisions are made based on feedback collected during new
......
......@@ -22,10 +22,13 @@ StoreBuffer::StoreBuffer(Heap* heap)
old_start_(NULL),
old_limit_(NULL),
old_top_(NULL),
old_regular_limit_(NULL),
old_reserved_limit_(NULL),
old_virtual_memory_(NULL),
old_store_buffer_length_(0),
old_buffer_is_sorted_(false),
old_buffer_is_filtered_(false),
during_gc_(false),
allow_overflow_(false),
store_buffer_rebuilding_enabled_(false),
callback_(NULL),
may_move_store_buffer_entries_(true),
......@@ -44,8 +47,14 @@ void StoreBuffer::SetUp() {
reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
limit_ = start_ + (kStoreBufferSize / kPointerSize);
// We set the maximum store buffer size to the maximum size of a semi-space.
// The store buffer may reach this limit during a full garbage collection.
// Note that half of the semi-space should be good enough since half of the
// memory in the semi-space are not object pointers.
old_store_buffer_length_ = heap_->MaxSemiSpaceSize() / sizeof(Address);
old_virtual_memory_ =
new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
new base::VirtualMemory(old_store_buffer_length_ * kPointerSize);
old_top_ = old_start_ =
reinterpret_cast<Address*>(old_virtual_memory_->address());
// Don't know the alignment requirements of the OS, but it is certainly not
......@@ -54,9 +63,12 @@ void StoreBuffer::SetUp() {
int initial_length =
static_cast<int>(base::OS::CommitPageSize() / kPointerSize);
ASSERT(initial_length > 0);
ASSERT(initial_length <= kOldStoreBufferLength);
ASSERT(initial_length <= kOldRegularStoreBufferLength);
ASSERT(initial_length <= old_store_buffer_length_);
ASSERT(kOldRegularStoreBufferLength <= old_store_buffer_length_);
old_limit_ = old_start_ + initial_length;
old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
old_regular_limit_ = old_start_ + kOldRegularStoreBufferLength;
old_reserved_limit_ = old_start_ + old_store_buffer_length_;
CHECK(old_virtual_memory_->Commit(
reinterpret_cast<void*>(old_start_),
......@@ -93,8 +105,13 @@ void StoreBuffer::TearDown() {
delete old_virtual_memory_;
delete[] hash_set_1_;
delete[] hash_set_2_;
old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
start_ = limit_ = NULL;
old_start_ = NULL;
old_top_ = NULL;
old_limit_ = NULL;
old_reserved_limit_ = NULL;
old_regular_limit_ = NULL;
start_ = NULL;
limit_ = NULL;
heap_->public_set_store_buffer_top(start_);
}
......@@ -128,9 +145,35 @@ bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
}
template<StoreBuffer::ExemptPopularPagesMode mode>
void StoreBuffer::IterativelyExemptPopularPages(intptr_t space_needed) {
// Sample 1 entry in 97 and filter out the pages where we estimate that more
// than 1 in 8 pointers are to new space.
static const int kSampleFinenesses = 5;
static const struct Samples {
int prime_sample_step;
int threshold;
} samples[kSampleFinenesses] = {
{ 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
{ 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
{ 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
{ 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
{ 1, 0}
};
for (int i = 0; i < kSampleFinenesses; i++) {
ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
// As a last resort we mark all pages as being exempt from the store buffer.
ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
if (mode == ENSURE_SPACE && SpaceAvailable(space_needed)) return;
else if (mode == SHRINK_TO_REGULAR_SIZE && old_top_ < old_limit_) return;
}
}
void StoreBuffer::EnsureSpace(intptr_t space_needed) {
while (old_limit_ - old_top_ < space_needed &&
old_limit_ < old_reserved_limit_) {
((!allow_overflow_ && old_limit_ < old_regular_limit_) ||
(allow_overflow_ && old_limit_ < old_reserved_limit_))) {
size_t grow = old_limit_ - old_start_; // Double size.
CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
grow * kPointerSize,
......@@ -162,26 +205,8 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) {
if (SpaceAvailable(space_needed)) return;
// Sample 1 entry in 97 and filter out the pages where we estimate that more
// than 1 in 8 pointers are to new space.
static const int kSampleFinenesses = 5;
static const struct Samples {
int prime_sample_step;
int threshold;
} samples[kSampleFinenesses] = {
{ 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
{ 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
{ 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
{ 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
{ 1, 0}
};
for (int i = 0; i < kSampleFinenesses; i++) {
ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
// As a last resort we mark all pages as being exempt from the store buffer.
ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
if (SpaceAvailable(space_needed)) return;
}
UNREACHABLE();
IterativelyExemptPopularPages<ENSURE_SPACE>(space_needed);
ASSERT(SpaceAvailable(space_needed));
}
......@@ -328,9 +353,9 @@ void StoreBuffer::ClearFilteringHashSets() {
}
void StoreBuffer::GCPrologue() {
void StoreBuffer::GCPrologue(bool allow_overflow) {
ClearFilteringHashSets();
during_gc_ = true;
allow_overflow_ = allow_overflow;
}
......@@ -366,7 +391,13 @@ void StoreBuffer::Verify() {
void StoreBuffer::GCEpilogue() {
during_gc_ = false;
if (allow_overflow_ && old_limit_ > old_regular_limit_) {
IterativelyExemptPopularPages<SHRINK_TO_REGULAR_SIZE>(0);
ASSERT(old_limit_ < old_regular_limit_);
old_virtual_memory_->Uncommit(old_limit_, old_regular_limit_ - old_limit_);
}
allow_overflow_ = false;
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
......
......@@ -19,11 +19,6 @@ class StoreBuffer;
typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
typedef void (StoreBuffer::*RegionCallback)(Address start,
Address end,
ObjectSlotCallback slot_callback,
bool clear_maps);
// Used to implement the write barrier by collecting addresses of pointers
// between spaces.
class StoreBuffer {
......@@ -68,13 +63,13 @@ class StoreBuffer {
static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
static const int kStoreBufferSize = kStoreBufferOverflowBit;
static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
static const int kOldStoreBufferLength = kStoreBufferLength * 16;
static const int kOldRegularStoreBufferLength = kStoreBufferLength * 16;
static const int kHashSetLengthLog2 = 12;
static const int kHashSetLength = 1 << kHashSetLengthLog2;
void Compact();
void GCPrologue();
void GCPrologue(bool allow_overflow);
void GCEpilogue();
Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
......@@ -118,12 +113,27 @@ class StoreBuffer {
Address* old_start_;
Address* old_limit_;
Address* old_top_;
// The regular limit specifies how big the store buffer may become during
// mutator execution or while scavenging.
Address* old_regular_limit_;
// The reserved limit is bigger then the regular limit. It should be the size
// of a semi-space to avoid new scan-on-scavenge during new space evacuation
// after sweeping in a full garbage collection.
Address* old_reserved_limit_;
base::VirtualMemory* old_virtual_memory_;
int old_store_buffer_length_;
bool old_buffer_is_sorted_;
bool old_buffer_is_filtered_;
bool during_gc_;
// If allow_overflow_ is set, we allow the store buffer to grow until
// old_reserved_limit_. But we will shrink the store buffer in the epilogue to
// stay within the old_regular_limit_.
bool allow_overflow_;
// The garbage collector iterates over many pointers to new space that are not
// handled by the store buffer. This flag indicates whether the pointers
// found by the callbacks should be added to the store buffer or not.
......@@ -146,6 +156,14 @@ class StoreBuffer {
void Uniq();
void ExemptPopularPages(int prime_sample_step, int threshold);
enum ExemptPopularPagesMode {
ENSURE_SPACE,
SHRINK_TO_REGULAR_SIZE
};
template <ExemptPopularPagesMode mode>
void IterativelyExemptPopularPages(intptr_t space_needed);
// Set the map field of the object to NULL if contains a map.
inline void ClearDeadObject(HeapObject *object);
......@@ -156,17 +174,6 @@ class StoreBuffer {
ObjectSlotCallback slot_callback,
bool clear_maps);
// For each region of pointers on a page in use from an old space call
// visit_pointer_region callback.
// If either visit_pointer_region or callback can cause an allocation
// in old space and changes in allocation watermark then
// can_preallocate_during_iteration should be set to true.
void IteratePointersOnPage(
PagedSpace* space,
Page* page,
RegionCallback region_callback,
ObjectSlotCallback slot_callback);
void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
bool clear_maps);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment