Commit f8b9e6dd authored by antonm@chromium.org's avatar antonm@chromium.org

Force mark sweep instead of compcation if size of map space is too big to...

Force mark sweep instead of compcation if size of map space is too big to allow forward pointers encoding.

Review URL: http://codereview.chromium.org/507025

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@3497 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 12866185
......@@ -198,6 +198,9 @@ DEFINE_bool(cleanup_caches_in_maps_at_gc, true,
DEFINE_bool(canonicalize_object_literal_maps, true,
"Canonicalize maps for object literals.")
DEFINE_bool(use_big_map_space, true,
"Use big map space, but don't compact if it grew too big.")
// mksnapshot.cc
DEFINE_bool(h, false, "print this message")
DEFINE_bool(new_snapshot, true, "use new snapshot implementation")
......
......@@ -3437,7 +3437,10 @@ bool Heap::Setup(bool create_heap_objects) {
if (!code_space_->Setup(NULL, 0)) return false;
// Initialize map space.
map_space_ = new MapSpace(kMaxMapSpaceSize, MAP_SPACE);
map_space_ = new MapSpace(FLAG_use_big_map_space
? max_old_generation_size_
: (MapSpace::kMaxMapPageIndex + 1) * Page::kPageSize,
MAP_SPACE);
if (map_space_ == NULL) return false;
if (!map_space_->Setup(NULL, 0)) return false;
......
......@@ -890,11 +890,6 @@ class Heap : public AllStatic {
static int linear_allocation_scope_depth_;
static bool context_disposed_pending_;
// The number of MapSpace pages is limited by the way we pack
// Map pointers during GC.
static const int kMaxMapSpaceSize =
(1 << (MapWord::kMapPageIndexBits)) * Page::kPageSize;
#if defined(V8_TARGET_ARCH_X64)
static const int kMaxObjectSizeInNewSpace = 512*KB;
#else
......
......@@ -116,6 +116,8 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
compact_on_next_gc_ = false;
if (FLAG_never_compact) compacting_collection_ = false;
if (!Heap::map_space()->MapPointersEncodable())
compacting_collection_ = false;
if (FLAG_collect_maps) CreateBackPointers();
#ifdef DEBUG
......
......@@ -1735,7 +1735,8 @@ void FixedSizeFreeList::Free(Address start) {
Memory::Address_at(start + i) = kZapValue;
}
#endif
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
// We only use the freelists with mark-sweep.
ASSERT(!MarkCompactCollector::IsCompacting());
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(object_size_);
node->set_next(head_);
......
......@@ -993,6 +993,9 @@ class PagedSpace : public Space {
HeapObject* SlowMCAllocateRaw(int size_in_bytes);
#ifdef DEBUG
// Returns the number of total pages in this space.
int CountTotalPages();
void DoPrintRSet(const char* space_name);
#endif
private:
......@@ -1002,11 +1005,6 @@ class PagedSpace : public Space {
// Returns a pointer to the page of the relocation pointer.
Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
#ifdef DEBUG
// Returns the number of total pages in this space.
int CountTotalPages();
#endif
friend class PageIterator;
};
......@@ -1740,6 +1738,17 @@ class MapSpace : public FixedSpace {
// Constants.
static const int kMaxMapPageIndex = (1 << MapWord::kMapPageIndexBits) - 1;
// Are map pointers encodable into map word?
bool MapPointersEncodable() {
if (!FLAG_use_big_map_space) {
ASSERT(CountTotalPages() <= kMaxMapPageIndex);
return true;
}
int n_of_pages = Capacity() / Page::kObjectAreaSize;
ASSERT(n_of_pages == CountTotalPages());
return n_of_pages <= kMaxMapPageIndex;
}
protected:
#ifdef DEBUG
virtual void VerifyObject(HeapObject* obj);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment