Commit 0b38ffac authored by hpayer@chromium.org's avatar hpayer@chromium.org

Revert "Precisely sweeping of scan-on-scavenge pages."

BUG=
R=jarin@chromium.org

Review URL: https://codereview.chromium.org/387483002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22319 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent ce916ff9
...@@ -408,7 +408,7 @@ void Heap::ReportStatisticsAfterGC() { ...@@ -408,7 +408,7 @@ void Heap::ReportStatisticsAfterGC() {
} }
void Heap::GarbageCollectionPrologue(GarbageCollector collector) { void Heap::GarbageCollectionPrologue() {
{ AllowHeapAllocation for_the_first_part_of_prologue; { AllowHeapAllocation for_the_first_part_of_prologue;
ClearJSFunctionResultCaches(); ClearJSFunctionResultCaches();
gc_count_++; gc_count_++;
...@@ -439,7 +439,7 @@ void Heap::GarbageCollectionPrologue(GarbageCollector collector) { ...@@ -439,7 +439,7 @@ void Heap::GarbageCollectionPrologue(GarbageCollector collector) {
ReportStatisticsBeforeGC(); ReportStatisticsBeforeGC();
#endif // DEBUG #endif // DEBUG
store_buffer()->GCPrologue(collector == MARK_COMPACTOR); store_buffer()->GCPrologue();
if (isolate()->concurrent_osr_enabled()) { if (isolate()->concurrent_osr_enabled()) {
isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs(); isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
...@@ -837,7 +837,7 @@ bool Heap::CollectGarbage(GarbageCollector collector, ...@@ -837,7 +837,7 @@ bool Heap::CollectGarbage(GarbageCollector collector,
{ GCTracer tracer(this, gc_reason, collector_reason); { GCTracer tracer(this, gc_reason, collector_reason);
ASSERT(AllowHeapAllocation::IsAllowed()); ASSERT(AllowHeapAllocation::IsAllowed());
DisallowHeapAllocation no_allocation_during_gc; DisallowHeapAllocation no_allocation_during_gc;
GarbageCollectionPrologue(collector); GarbageCollectionPrologue();
// The GC count was incremented in the prologue. Tell the tracer about // The GC count was incremented in the prologue. Tell the tracer about
// it. // it.
tracer.set_gc_count(gc_count_); tracer.set_gc_count(gc_count_);
......
...@@ -1698,7 +1698,7 @@ class Heap { ...@@ -1698,7 +1698,7 @@ class Heap {
// Code that should be run before and after each GC. Includes some // Code that should be run before and after each GC. Includes some
// reporting/verification activities when compiled with DEBUG set. // reporting/verification activities when compiled with DEBUG set.
void GarbageCollectionPrologue(GarbageCollector collector); void GarbageCollectionPrologue();
void GarbageCollectionEpilogue(); void GarbageCollectionEpilogue();
// Pretenuring decisions are made based on feedback collected during new // Pretenuring decisions are made based on feedback collected during new
......
...@@ -4161,23 +4161,12 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { ...@@ -4161,23 +4161,12 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
pages_swept++; pages_swept++;
parallel_sweeping_active = true; parallel_sweeping_active = true;
} else { } else {
if (p->scan_on_scavenge()) { if (FLAG_gc_verbose) {
SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>( PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
space, p, NULL); reinterpret_cast<intptr_t>(p));
pages_swept++;
if (FLAG_gc_verbose) {
PrintF("Sweeping 0x%" V8PRIxPTR
" scan on scavenge page precisely.\n",
reinterpret_cast<intptr_t>(p));
}
} else {
if (FLAG_gc_verbose) {
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
reinterpret_cast<intptr_t>(p));
}
p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_PENDING);
space->IncreaseUnsweptFreeBytes(p);
} }
p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_PENDING);
space->IncreaseUnsweptFreeBytes(p);
} }
space->set_end_of_unswept_pages(p); space->set_end_of_unswept_pages(p);
break; break;
......
...@@ -1480,22 +1480,6 @@ int HeapObject::Size() { ...@@ -1480,22 +1480,6 @@ int HeapObject::Size() {
} }
bool HeapObject::ContainsPointers() {
InstanceType type = map()->instance_type();
if (type <= LAST_NAME_TYPE) {
if (type == SYMBOL_TYPE) {
return true;
}
ASSERT(type < FIRST_NONSTRING_TYPE);
// There are four string representations: sequential strings, external
// strings, cons strings, and sliced strings.
// Only the latter two contain non-map-word pointers to heap objects.
return ((type & kIsIndirectStringMask) == kIsIndirectStringTag);
}
return (type > LAST_DATA_TYPE);
}
void HeapObject::IteratePointers(ObjectVisitor* v, int start, int end) { void HeapObject::IteratePointers(ObjectVisitor* v, int start, int end) {
v->VisitPointers(reinterpret_cast<Object**>(FIELD_ADDR(this, start)), v->VisitPointers(reinterpret_cast<Object**>(FIELD_ADDR(this, start)),
reinterpret_cast<Object**>(FIELD_ADDR(this, end))); reinterpret_cast<Object**>(FIELD_ADDR(this, end)));
......
...@@ -717,7 +717,6 @@ enum InstanceType { ...@@ -717,7 +717,6 @@ enum InstanceType {
FIXED_UINT8_CLAMPED_ARRAY_TYPE, // LAST_FIXED_TYPED_ARRAY_TYPE FIXED_UINT8_CLAMPED_ARRAY_TYPE, // LAST_FIXED_TYPED_ARRAY_TYPE
FIXED_DOUBLE_ARRAY_TYPE, FIXED_DOUBLE_ARRAY_TYPE,
CONSTANT_POOL_ARRAY_TYPE,
FILLER_TYPE, // LAST_DATA_TYPE FILLER_TYPE, // LAST_DATA_TYPE
// Structs. // Structs.
...@@ -744,6 +743,7 @@ enum InstanceType { ...@@ -744,6 +743,7 @@ enum InstanceType {
BREAK_POINT_INFO_TYPE, BREAK_POINT_INFO_TYPE,
FIXED_ARRAY_TYPE, FIXED_ARRAY_TYPE,
CONSTANT_POOL_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE, SHARED_FUNCTION_INFO_TYPE,
// All the following types are subtypes of JSReceiver, which corresponds to // All the following types are subtypes of JSReceiver, which corresponds to
...@@ -1719,10 +1719,6 @@ class HeapObject: public Object { ...@@ -1719,10 +1719,6 @@ class HeapObject: public Object {
// Returns the heap object's size in bytes // Returns the heap object's size in bytes
inline int Size(); inline int Size();
// Returns true if this heap object contains only references to other
// heap objects.
inline bool ContainsPointers();
// Given a heap object's map pointer, returns the heap size in bytes // Given a heap object's map pointer, returns the heap size in bytes
// Useful when the map pointer field is used for other purposes. // Useful when the map pointer field is used for other purposes.
// GC internal. // GC internal.
......
...@@ -18,9 +18,6 @@ namespace internal { ...@@ -18,9 +18,6 @@ namespace internal {
// HeapObjectIterator // HeapObjectIterator
HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
// Check that we actually can iterate this space.
ASSERT(space->is_iterable());
// You can't actually iterate over the anchor page. It is not a real page, // You can't actually iterate over the anchor page. It is not a real page,
// just an anchor for the double linked page list. Initialize as if we have // just an anchor for the double linked page list. Initialize as if we have
// reached the end of the anchor page, then the first iteration will move on // reached the end of the anchor page, then the first iteration will move on
...@@ -35,9 +32,6 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { ...@@ -35,9 +32,6 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
HeapObjectIterator::HeapObjectIterator(PagedSpace* space, HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
HeapObjectCallback size_func) { HeapObjectCallback size_func) {
// Check that we actually can iterate this space.
ASSERT(space->is_iterable());
// You can't actually iterate over the anchor page. It is not a real page, // You can't actually iterate over the anchor page. It is not a real page,
// just an anchor for the double linked page list. Initialize the current // just an anchor for the double linked page list. Initialize the current
// address and end as NULL, then the first iteration will move on // address and end as NULL, then the first iteration will move on
...@@ -72,6 +66,9 @@ void HeapObjectIterator::Initialize(PagedSpace* space, ...@@ -72,6 +66,9 @@ void HeapObjectIterator::Initialize(PagedSpace* space,
Address cur, Address end, Address cur, Address end,
HeapObjectIterator::PageMode mode, HeapObjectIterator::PageMode mode,
HeapObjectCallback size_f) { HeapObjectCallback size_f) {
// Check that we actually can iterate this space.
ASSERT(space->is_iterable());
space_ = space; space_ = space;
cur_addr_ = cur; cur_addr_ = cur;
cur_end_ = end; cur_end_ = end;
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include "src/base/atomicops.h" #include "src/base/atomicops.h"
#include "src/counters.h" #include "src/counters.h"
#include "src/store-buffer-inl.h" #include "src/store-buffer-inl.h"
#include "src/utils.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -23,13 +22,10 @@ StoreBuffer::StoreBuffer(Heap* heap) ...@@ -23,13 +22,10 @@ StoreBuffer::StoreBuffer(Heap* heap)
old_start_(NULL), old_start_(NULL),
old_limit_(NULL), old_limit_(NULL),
old_top_(NULL), old_top_(NULL),
old_regular_limit_(NULL),
old_reserved_limit_(NULL), old_reserved_limit_(NULL),
old_virtual_memory_(NULL),
old_store_buffer_length_(0),
old_buffer_is_sorted_(false), old_buffer_is_sorted_(false),
old_buffer_is_filtered_(false), old_buffer_is_filtered_(false),
allow_overflow_(false), during_gc_(false),
store_buffer_rebuilding_enabled_(false), store_buffer_rebuilding_enabled_(false),
callback_(NULL), callback_(NULL),
may_move_store_buffer_entries_(true), may_move_store_buffer_entries_(true),
...@@ -48,16 +44,8 @@ void StoreBuffer::SetUp() { ...@@ -48,16 +44,8 @@ void StoreBuffer::SetUp() {
reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
limit_ = start_ + (kStoreBufferSize / kPointerSize); limit_ = start_ + (kStoreBufferSize / kPointerSize);
// We set the maximum store buffer size to the maximum size of a semi-space.
// The store buffer may reach this limit during a full garbage collection.
// Note that half of the semi-space should be good enough since half of the
// memory in the semi-space are not object pointers.
old_store_buffer_length_ =
Max(static_cast<int>(heap_->MaxSemiSpaceSize() / sizeof(Address)),
kOldRegularStoreBufferLength);
old_virtual_memory_ = old_virtual_memory_ =
new base::VirtualMemory(old_store_buffer_length_ * kPointerSize); new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
old_top_ = old_start_ = old_top_ = old_start_ =
reinterpret_cast<Address*>(old_virtual_memory_->address()); reinterpret_cast<Address*>(old_virtual_memory_->address());
// Don't know the alignment requirements of the OS, but it is certainly not // Don't know the alignment requirements of the OS, but it is certainly not
...@@ -66,12 +54,9 @@ void StoreBuffer::SetUp() { ...@@ -66,12 +54,9 @@ void StoreBuffer::SetUp() {
int initial_length = int initial_length =
static_cast<int>(base::OS::CommitPageSize() / kPointerSize); static_cast<int>(base::OS::CommitPageSize() / kPointerSize);
ASSERT(initial_length > 0); ASSERT(initial_length > 0);
ASSERT(initial_length <= kOldRegularStoreBufferLength); ASSERT(initial_length <= kOldStoreBufferLength);
ASSERT(initial_length <= old_store_buffer_length_);
ASSERT(kOldRegularStoreBufferLength <= old_store_buffer_length_);
old_limit_ = old_start_ + initial_length; old_limit_ = old_start_ + initial_length;
old_regular_limit_ = old_start_ + kOldRegularStoreBufferLength; old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
old_reserved_limit_ = old_start_ + old_store_buffer_length_;
CHECK(old_virtual_memory_->Commit( CHECK(old_virtual_memory_->Commit(
reinterpret_cast<void*>(old_start_), reinterpret_cast<void*>(old_start_),
...@@ -108,13 +93,8 @@ void StoreBuffer::TearDown() { ...@@ -108,13 +93,8 @@ void StoreBuffer::TearDown() {
delete old_virtual_memory_; delete old_virtual_memory_;
delete[] hash_set_1_; delete[] hash_set_1_;
delete[] hash_set_2_; delete[] hash_set_2_;
old_start_ = NULL; old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
old_top_ = NULL; start_ = limit_ = NULL;
old_limit_ = NULL;
old_reserved_limit_ = NULL;
old_regular_limit_ = NULL;
start_ = NULL;
limit_ = NULL;
heap_->public_set_store_buffer_top(start_); heap_->public_set_store_buffer_top(start_);
} }
...@@ -148,35 +128,9 @@ bool StoreBuffer::SpaceAvailable(intptr_t space_needed) { ...@@ -148,35 +128,9 @@ bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
} }
template<StoreBuffer::ExemptPopularPagesMode mode>
void StoreBuffer::IterativelyExemptPopularPages(intptr_t space_needed) {
// Sample 1 entry in 97 and filter out the pages where we estimate that more
// than 1 in 8 pointers are to new space.
static const int kSampleFinenesses = 5;
static const struct Samples {
int prime_sample_step;
int threshold;
} samples[kSampleFinenesses] = {
{ 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
{ 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
{ 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
{ 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
{ 1, 0}
};
for (int i = 0; i < kSampleFinenesses; i++) {
ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
// As a last resort we mark all pages as being exempt from the store buffer.
ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
if (mode == ENSURE_SPACE && SpaceAvailable(space_needed)) return;
else if (mode == SHRINK_TO_REGULAR_SIZE && old_top_ < old_limit_) return;
}
}
void StoreBuffer::EnsureSpace(intptr_t space_needed) { void StoreBuffer::EnsureSpace(intptr_t space_needed) {
while (old_limit_ - old_top_ < space_needed && while (old_limit_ - old_top_ < space_needed &&
((!allow_overflow_ && old_limit_ < old_regular_limit_) || old_limit_ < old_reserved_limit_) {
(allow_overflow_ && old_limit_ < old_reserved_limit_))) {
size_t grow = old_limit_ - old_start_; // Double size. size_t grow = old_limit_ - old_start_; // Double size.
CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_), CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
grow * kPointerSize, grow * kPointerSize,
...@@ -208,8 +162,26 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) { ...@@ -208,8 +162,26 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) {
if (SpaceAvailable(space_needed)) return; if (SpaceAvailable(space_needed)) return;
IterativelyExemptPopularPages<ENSURE_SPACE>(space_needed); // Sample 1 entry in 97 and filter out the pages where we estimate that more
ASSERT(SpaceAvailable(space_needed)); // than 1 in 8 pointers are to new space.
static const int kSampleFinenesses = 5;
static const struct Samples {
int prime_sample_step;
int threshold;
} samples[kSampleFinenesses] = {
{ 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
{ 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
{ 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
{ 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
{ 1, 0}
};
for (int i = 0; i < kSampleFinenesses; i++) {
ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
// As a last resort we mark all pages as being exempt from the store buffer.
ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
if (SpaceAvailable(space_needed)) return;
}
UNREACHABLE();
} }
...@@ -356,9 +328,9 @@ void StoreBuffer::ClearFilteringHashSets() { ...@@ -356,9 +328,9 @@ void StoreBuffer::ClearFilteringHashSets() {
} }
void StoreBuffer::GCPrologue(bool allow_overflow) { void StoreBuffer::GCPrologue() {
ClearFilteringHashSets(); ClearFilteringHashSets();
allow_overflow_ = allow_overflow; during_gc_ = true;
} }
...@@ -394,13 +366,7 @@ void StoreBuffer::Verify() { ...@@ -394,13 +366,7 @@ void StoreBuffer::Verify() {
void StoreBuffer::GCEpilogue() { void StoreBuffer::GCEpilogue() {
if (allow_overflow_ && old_limit_ > old_regular_limit_) { during_gc_ = false;
IterativelyExemptPopularPages<SHRINK_TO_REGULAR_SIZE>(0);
ASSERT(old_limit_ < old_regular_limit_);
old_virtual_memory_->Uncommit(old_limit_, old_regular_limit_ - old_limit_);
}
allow_overflow_ = false;
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
if (FLAG_verify_heap) { if (FLAG_verify_heap) {
Verify(); Verify();
...@@ -522,22 +488,25 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback, ...@@ -522,22 +488,25 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps); FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps);
} else { } else {
Page* page = reinterpret_cast<Page*>(chunk); Page* page = reinterpret_cast<Page*>(chunk);
ASSERT(page->owner() == heap_->map_space() || PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
page->owner() == heap_->old_pointer_space()); Address start = page->area_start();
CHECK(!page->WasSweptConservatively()); Address end = page->area_end();
if (owner == heap_->map_space()) {
HeapObjectIterator iterator(page, NULL); ASSERT(page->WasSweptPrecisely());
for (HeapObject* heap_object = iterator.Next(); HeapObjectIterator iterator(page, NULL);
heap_object != NULL; for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
heap_object = iterator.Next()) { heap_object = iterator.Next()) {
// We iterate over objects that contain pointers only. // We skip free space objects.
if (heap_object->ContainsPointers()) { if (!heap_object->IsFiller()) {
FindPointersToNewSpaceInRegion( FindPointersToNewSpaceInRegion(
heap_object->address() + HeapObject::kHeaderSize, heap_object->address() + HeapObject::kHeaderSize,
heap_object->address() + heap_object->Size(), heap_object->address() + heap_object->Size(), slot_callback,
slot_callback, clear_maps);
clear_maps); }
} }
} else {
FindPointersToNewSpaceInRegion(
start, end, slot_callback, clear_maps);
} }
} }
} }
......
...@@ -19,6 +19,11 @@ class StoreBuffer; ...@@ -19,6 +19,11 @@ class StoreBuffer;
typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to); typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
typedef void (StoreBuffer::*RegionCallback)(Address start,
Address end,
ObjectSlotCallback slot_callback,
bool clear_maps);
// Used to implement the write barrier by collecting addresses of pointers // Used to implement the write barrier by collecting addresses of pointers
// between spaces. // between spaces.
class StoreBuffer { class StoreBuffer {
...@@ -63,13 +68,13 @@ class StoreBuffer { ...@@ -63,13 +68,13 @@ class StoreBuffer {
static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2); static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
static const int kStoreBufferSize = kStoreBufferOverflowBit; static const int kStoreBufferSize = kStoreBufferOverflowBit;
static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address); static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
static const int kOldRegularStoreBufferLength = kStoreBufferLength * 16; static const int kOldStoreBufferLength = kStoreBufferLength * 16;
static const int kHashSetLengthLog2 = 12; static const int kHashSetLengthLog2 = 12;
static const int kHashSetLength = 1 << kHashSetLengthLog2; static const int kHashSetLength = 1 << kHashSetLengthLog2;
void Compact(); void Compact();
void GCPrologue(bool allow_overflow); void GCPrologue();
void GCEpilogue(); void GCEpilogue();
Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); } Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
...@@ -113,27 +118,12 @@ class StoreBuffer { ...@@ -113,27 +118,12 @@ class StoreBuffer {
Address* old_start_; Address* old_start_;
Address* old_limit_; Address* old_limit_;
Address* old_top_; Address* old_top_;
// The regular limit specifies how big the store buffer may become during
// mutator execution or while scavenging.
Address* old_regular_limit_;
// The reserved limit is bigger then the regular limit. It should be the size
// of a semi-space to avoid new scan-on-scavenge during new space evacuation
// after sweeping in a full garbage collection.
Address* old_reserved_limit_; Address* old_reserved_limit_;
base::VirtualMemory* old_virtual_memory_; base::VirtualMemory* old_virtual_memory_;
int old_store_buffer_length_;
bool old_buffer_is_sorted_; bool old_buffer_is_sorted_;
bool old_buffer_is_filtered_; bool old_buffer_is_filtered_;
bool during_gc_;
// If allow_overflow_ is set, we allow the store buffer to grow until
// old_reserved_limit_. But we will shrink the store buffer in the epilogue to
// stay within the old_regular_limit_.
bool allow_overflow_;
// The garbage collector iterates over many pointers to new space that are not // The garbage collector iterates over many pointers to new space that are not
// handled by the store buffer. This flag indicates whether the pointers // handled by the store buffer. This flag indicates whether the pointers
// found by the callbacks should be added to the store buffer or not. // found by the callbacks should be added to the store buffer or not.
...@@ -156,14 +146,6 @@ class StoreBuffer { ...@@ -156,14 +146,6 @@ class StoreBuffer {
void Uniq(); void Uniq();
void ExemptPopularPages(int prime_sample_step, int threshold); void ExemptPopularPages(int prime_sample_step, int threshold);
enum ExemptPopularPagesMode {
ENSURE_SPACE,
SHRINK_TO_REGULAR_SIZE
};
template <ExemptPopularPagesMode mode>
void IterativelyExemptPopularPages(intptr_t space_needed);
// Set the map field of the object to NULL if contains a map. // Set the map field of the object to NULL if contains a map.
inline void ClearDeadObject(HeapObject *object); inline void ClearDeadObject(HeapObject *object);
...@@ -174,6 +156,17 @@ class StoreBuffer { ...@@ -174,6 +156,17 @@ class StoreBuffer {
ObjectSlotCallback slot_callback, ObjectSlotCallback slot_callback,
bool clear_maps); bool clear_maps);
// For each region of pointers on a page in use from an old space call
// visit_pointer_region callback.
// If either visit_pointer_region or callback can cause an allocation
// in old space and changes in allocation watermark then
// can_preallocate_during_iteration should be set to true.
void IteratePointersOnPage(
PagedSpace* space,
Page* page,
RegionCallback region_callback,
ObjectSlotCallback slot_callback);
void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback, void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
bool clear_maps); bool clear_maps);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment