Create a new paged heap space for global property cells. The new

space is similar to map space in that it has fixed-size objects.  A
common superclass for a space with fixed size objects is used for the
map space and cell space.

Allocate all cells in cell space.  Handle it during all GCs.  Modify
the free-list node representation (so that the size is not at a fixed
offset in all cells) to allow two-pointer free-list nodes.  Clean up
some stuff in the MC collector.

Review URL: http://codereview.chromium.org/155211

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2411 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 68671854
......@@ -257,14 +257,16 @@ typedef bool (*WeakSlotCallback)(Object** pointer);
// NOTE: SpaceIterator depends on AllocationSpace enumeration values being
// consecutive.
enum AllocationSpace {
NEW_SPACE, // Semispaces collected with copying collector.
OLD_POINTER_SPACE, // Must be first of the paged spaces - see PagedSpaces.
OLD_DATA_SPACE, // May not have pointers to new space.
CODE_SPACE, // Also one of the old spaces. Marked executable.
MAP_SPACE, // Only map objects.
LO_SPACE, // Large objects.
NEW_SPACE, // Semispaces collected with copying collector.
OLD_POINTER_SPACE, // May contain pointers to new space.
OLD_DATA_SPACE, // Must not have pointers to new space.
CODE_SPACE, // No pointers to new space, marked executable.
MAP_SPACE, // Only and all map objects.
CELL_SPACE, // Only and all cell objects.
LO_SPACE, // Promoted large objects.
FIRST_SPACE = NEW_SPACE,
LAST_SPACE = LO_SPACE // <= 5 (see kSpaceBits and kLOSpacePointer)
LAST_SPACE = LO_SPACE
};
const int kSpaceTagSize = 3;
const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
......
......@@ -82,6 +82,8 @@ Object* Heap::AllocateRaw(int size_in_bytes,
result = code_space_->AllocateRaw(size_in_bytes);
} else if (LO_SPACE == space) {
result = lo_space_->AllocateRaw(size_in_bytes);
} else if (CELL_SPACE == space) {
result = cell_space_->AllocateRaw(size_in_bytes);
} else {
ASSERT(MAP_SPACE == space);
result = map_space_->AllocateRaw(size_in_bytes);
......@@ -107,12 +109,23 @@ Object* Heap::NumberFromUint32(uint32_t value) {
}
Object* Heap::AllocateRawMap(int size_in_bytes) {
Object* Heap::AllocateRawMap() {
#ifdef DEBUG
Counters::objs_since_last_full.Increment();
Counters::objs_since_last_young.Increment();
#endif
Object* result = map_space_->AllocateRaw(size_in_bytes);
Object* result = map_space_->AllocateRaw(Map::kSize);
if (result->IsFailure()) old_gen_exhausted_ = true;
return result;
}
Object* Heap::AllocateRawCell() {
#ifdef DEBUG
Counters::objs_since_last_full.Increment();
Counters::objs_since_last_young.Increment();
#endif
Object* result = cell_space_->AllocateRaw(JSGlobalPropertyCell::kSize);
if (result->IsFailure()) old_gen_exhausted_ = true;
return result;
}
......
This diff is collapsed.
......@@ -105,8 +105,8 @@ namespace internal {
V(Map, boilerplate_function_map, BoilerplateFunctionMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, proxy_map, ProxyMap) \
V(Map, one_word_filler_map, OneWordFillerMap) \
V(Map, two_word_filler_map, TwoWordFillerMap) \
V(Map, one_pointer_filler_map, OnePointerFillerMap) \
V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
V(Object, nan_value, NanValue) \
V(Object, undefined_value, UndefinedValue) \
V(Object, minus_zero_value, MinusZeroValue) \
......@@ -263,6 +263,7 @@ class Heap : public AllStatic {
static OldSpace* old_data_space() { return old_data_space_; }
static OldSpace* code_space() { return code_space_; }
static MapSpace* map_space() { return map_space_; }
static CellSpace* cell_space() { return cell_space_; }
static LargeObjectSpace* lo_space() { return lo_space_; }
static bool always_allocate() { return always_allocate_scope_depth_ != 0; }
......@@ -852,6 +853,7 @@ class Heap : public AllStatic {
static OldSpace* old_data_space_;
static OldSpace* code_space_;
static MapSpace* map_space_;
static CellSpace* cell_space_;
static LargeObjectSpace* lo_space_;
static HeapState gc_state_;
......@@ -975,7 +977,10 @@ class Heap : public AllStatic {
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
// have to test the allocation space argument and (b) can reduce code size
// (since both AllocateRaw and AllocateRawMap are inlined).
static inline Object* AllocateRawMap(int size_in_bytes);
static inline Object* AllocateRawMap();
// Allocate an uninitialized object in the global property cell space.
static inline Object* AllocateRawCell();
// Initializes a JSObject based on its map.
static void InitializeJSObjectFromMap(JSObject* obj,
......
This diff is collapsed.
......@@ -293,6 +293,7 @@ class MarkCompactCollector: public AllStatic {
static void DeallocateOldDataBlock(Address start, int size_in_bytes);
static void DeallocateCodeBlock(Address start, int size_in_bytes);
static void DeallocateMapBlock(Address start, int size_in_bytes);
static void DeallocateCellBlock(Address start, int size_in_bytes);
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
......@@ -352,8 +353,11 @@ class MarkCompactCollector: public AllStatic {
static int RelocateOldPointerObject(HeapObject* obj);
static int RelocateOldDataObject(HeapObject* obj);
// Relocate a property cell object.
static int RelocateCellObject(HeapObject* obj);
// Helper function.
static inline int RelocateOldNonCodeObject(HeapObject* obj, OldSpace* space);
static inline int RelocateOldNonCodeObject(HeapObject* obj, PagedSpace* space);
// Relocates an object in the code space.
static int RelocateCodeObject(HeapObject* obj);
......@@ -393,6 +397,9 @@ class MarkCompactCollector: public AllStatic {
// Number of live objects in Heap::map_space_.
static int live_map_objects_;
// Number of live objects in Heap::cell_space_.
static int live_cell_objects_;
// Number of live objects in Heap::lo_space_.
static int live_lo_objects_;
......
......@@ -768,6 +768,8 @@ int Failure::value() const {
Failure* Failure::RetryAfterGC(int requested_bytes) {
// Assert that the space encoding fits in the three bytes allotted for it.
ASSERT((LAST_SPACE & ~kSpaceTagMask) == 0);
int requested = requested_bytes >> kObjectAlignmentBits;
int value = (requested << kSpaceTagSize) | NEW_SPACE;
ASSERT(value >> kSpaceTagSize == requested);
......
......@@ -928,12 +928,14 @@ class Smi: public Object {
// Failure is used for reporting out of memory situations and
// propagating exceptions through the runtime system. Failure objects
// are transient and cannot occur as part of the objects graph.
// are transient and cannot occur as part of the object graph.
//
// Failures are a single word, encoded as follows:
// +-------------------------+---+--+--+
// |rrrrrrrrrrrrrrrrrrrrrrrrr|sss|tt|11|
// +-------------------------+---+--+--+
// 3 7 6 4 32 10
// 1
//
// The low two bits, 0-1, are the failure tag, 11. The next two bits,
// 2-3, are a failure type tag 'tt' with possible values:
......@@ -944,18 +946,13 @@ class Smi: public Object {
//
// The next three bits, 4-6, are an allocation space tag 'sss'. The
// allocation space tag is 000 for all failure types except
// RETRY_AFTER_GC. For RETRY_AFTER_GC, the possible values are
// (the encoding is found in globals.h):
// 000 NEW_SPACE
// 001 OLD_SPACE
// 010 CODE_SPACE
// 011 MAP_SPACE
// 100 LO_SPACE
// RETRY_AFTER_GC. For RETRY_AFTER_GC, the possible values are the
// allocation spaces (the encoding is found in globals.h).
//
// The remaining bits is the number of words requested by the
// allocation request that failed, and is zeroed except for
// RETRY_AFTER_GC failures. The 25 bits (on a 32 bit platform) gives
// a representable range of 2^27 bytes (128MB).
// The remaining bits is the size of the allocation request in units
// of the pointer size, and is zeroed except for RETRY_AFTER_GC
// failures. The 25 bits (on a 32 bit platform) gives a representable
// range of 2^27 bytes (128MB).
// Failure type tag info.
const int kFailureTypeTagSize = 2;
......@@ -1085,14 +1082,6 @@ class MapWord BASE_EMBEDDED {
inline Address ToEncodedAddress();
private:
// HeapObject calls the private constructor and directly reads the value.
friend class HeapObject;
explicit MapWord(uintptr_t value) : value_(value) {}
uintptr_t value_;
// Bits used by the marking phase of the garbage collector.
//
// The first word of a heap object is normally a map pointer. The last two
......@@ -1134,6 +1123,14 @@ class MapWord BASE_EMBEDDED {
// 0xFFE00000
static const uint32_t kForwardingOffsetMask =
~(kMapPageIndexMask | kMapPageOffsetMask);
private:
// HeapObject calls the private constructor and directly reads the value.
friend class HeapObject;
explicit MapWord(uintptr_t value) : value_(value) {}
uintptr_t value_;
};
......
......@@ -42,47 +42,44 @@
namespace v8 {
namespace internal {
// Encoding: a RelativeAddress must be able to fit in a pointer:
// it is encoded as an Address with (from MS to LS bits):
// 27 bits identifying a word in the space, in one of three formats:
// - MAP and OLD spaces: 16 bits of page number, 11 bits of word offset in page
// - NEW space: 27 bits of word offset
// - LO space: 27 bits of page number
// 3 bits to encode the AllocationSpace (special values for code in LO space)
// 2 bits identifying this as a HeapObject
// 32-bit encoding: a RelativeAddress must be able to fit in a
// pointer: it is encoded as an Address with (from LS to MS bits):
// - 2 bits identifying this as a HeapObject.
// - 4 bits to encode the AllocationSpace (including special values for
// code and fixed arrays in LO space)
// - 27 bits identifying a word in the space, in one of three formats:
// - paged spaces: 16 bits of page number, 11 bits of word offset in page
// - NEW space: 27 bits of word offset
// - LO space: 27 bits of page number
const int kSpaceShift = kHeapObjectTagSize;
const int kSpaceBits = kSpaceTagSize;
const int kSpaceMask = kSpaceTagMask;
// These value are used instead of space numbers when serializing/
// deserializing. They indicate an object that is in large object space, but
// should be treated specially.
// Make the pages executable on platforms that support it:
const int kLOSpaceExecutable = LAST_SPACE + 1;
// Reserve space for write barrier bits (for objects that can contain
// references to new space):
const int kLOSpacePointer = LAST_SPACE + 2;
const int kSpaceBits = 4;
const int kSpaceMask = (1 << kSpaceBits) - 1;
const int kOffsetShift = kSpaceShift + kSpaceBits;
const int kOffsetBits = 11;
const int kOffsetMask = (1 << kOffsetBits) - 1;
const int kPageBits = 32 - (kOffsetBits + kSpaceBits + kHeapObjectTagSize);
const int kPageShift = kOffsetShift + kOffsetBits;
const int kPageBits = 32 - (kOffsetBits + kSpaceBits + kHeapObjectTagSize);
const int kPageMask = (1 << kPageBits) - 1;
const int kPageAndOffsetShift = kOffsetShift;
const int kPageAndOffsetBits = kPageBits + kOffsetBits;
const int kPageAndOffsetMask = (1 << kPageAndOffsetBits) - 1;
// These values are special allocation space tags used for
// serialization.
// Mar the pages executable on platforms that support it.
const int kLargeCode = LAST_SPACE + 1;
// Allocate extra remembered-set bits.
const int kLargeFixedArray = LAST_SPACE + 2;
static inline AllocationSpace GetSpace(Address addr) {
const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
int space_number = (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
if (space_number == kLOSpaceExecutable) space_number = LO_SPACE;
else if (space_number == kLOSpacePointer) space_number = LO_SPACE;
if (space_number > LAST_SPACE) space_number = LO_SPACE;
return static_cast<AllocationSpace>(space_number);
}
......@@ -91,7 +88,7 @@ static inline bool IsLargeExecutableObject(Address addr) {
const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
const int space_number =
(static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
return (space_number == kLOSpaceExecutable);
return (space_number == kLargeCode);
}
......@@ -99,7 +96,7 @@ static inline bool IsLargeFixedArray(Address addr) {
const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
const int space_number =
(static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
return (space_number == kLOSpacePointer);
return (space_number == kLargeFixedArray);
}
......@@ -147,6 +144,9 @@ class RelativeAddress {
int page_index,
int page_offset)
: space_(space), page_index_(page_index), page_offset_(page_offset) {
// Assert that the space encoding (plus the two pseudo-spaces for
// special large objects) fits in the available bits.
ASSERT(((LAST_SPACE + 2) & ~kSpaceMask) == 0);
ASSERT(space <= LAST_SPACE && space >= 0);
}
......@@ -154,8 +154,7 @@ class RelativeAddress {
Address Encode() const;
AllocationSpace space() const {
if (space_ == kLOSpaceExecutable) return LO_SPACE;
if (space_ == kLOSpacePointer) return LO_SPACE;
if (space_ > LAST_SPACE) return LO_SPACE;
return static_cast<AllocationSpace>(space_);
}
int page_index() const { return page_index_; }
......@@ -165,7 +164,8 @@ class RelativeAddress {
return space_ == CODE_SPACE ||
space_ == OLD_POINTER_SPACE ||
space_ == OLD_DATA_SPACE ||
space_ == MAP_SPACE;
space_ == MAP_SPACE ||
space_ == CELL_SPACE;
}
void next_address(int offset) { page_offset_ += offset; }
......@@ -180,11 +180,11 @@ class RelativeAddress {
void set_to_large_code_object() {
ASSERT(space_ == LO_SPACE);
space_ = kLOSpaceExecutable;
space_ = kLargeCode;
}
void set_to_large_fixed_array() {
ASSERT(space_ == LO_SPACE);
space_ = kLOSpacePointer;
space_ = kLargeFixedArray;
}
......@@ -201,6 +201,7 @@ Address RelativeAddress::Encode() const {
int result = 0;
switch (space_) {
case MAP_SPACE:
case CELL_SPACE:
case OLD_POINTER_SPACE:
case OLD_DATA_SPACE:
case CODE_SPACE:
......@@ -216,8 +217,8 @@ Address RelativeAddress::Encode() const {
result = word_offset << kPageAndOffsetShift;
break;
case LO_SPACE:
case kLOSpaceExecutable:
case kLOSpacePointer:
case kLargeCode:
case kLargeFixedArray:
ASSERT_EQ(0, page_offset_);
ASSERT_EQ(0, page_index_ & ~kPageAndOffsetMask);
result = page_index_ << kPageAndOffsetShift;
......@@ -235,6 +236,7 @@ void RelativeAddress::Verify() {
ASSERT(page_offset_ >= 0 && page_index_ >= 0);
switch (space_) {
case MAP_SPACE:
case CELL_SPACE:
case OLD_POINTER_SPACE:
case OLD_DATA_SPACE:
case CODE_SPACE:
......@@ -245,8 +247,8 @@ void RelativeAddress::Verify() {
ASSERT(page_index_ == 0);
break;
case LO_SPACE:
case kLOSpaceExecutable:
case kLOSpacePointer:
case kLargeCode:
case kLargeFixedArray:
ASSERT(page_offset_ == 0);
break;
}
......@@ -291,6 +293,7 @@ class SimulatedHeapSpace {
void SimulatedHeapSpace::InitEmptyHeap(AllocationSpace space) {
switch (space) {
case MAP_SPACE:
case CELL_SPACE:
case OLD_POINTER_SPACE:
case OLD_DATA_SPACE:
case CODE_SPACE:
......@@ -307,12 +310,15 @@ void SimulatedHeapSpace::InitEmptyHeap(AllocationSpace space) {
void SimulatedHeapSpace::InitCurrentHeap(AllocationSpace space) {
switch (space) {
case MAP_SPACE:
case CELL_SPACE:
case OLD_POINTER_SPACE:
case OLD_DATA_SPACE:
case CODE_SPACE: {
PagedSpace* ps;
if (space == MAP_SPACE) {
ps = Heap::map_space();
} else if (space == CELL_SPACE) {
ps = Heap::cell_space();
} else if (space == OLD_POINTER_SPACE) {
ps = Heap::old_pointer_space();
} else if (space == OLD_DATA_SPACE) {
......@@ -1121,6 +1127,8 @@ void Serializer::PutHeader() {
writer_->PutInt(Heap::code_space()->Size() + Heap::new_space()->Size());
writer_->PutC('|');
writer_->PutInt(Heap::map_space()->Size());
writer_->PutC('|');
writer_->PutInt(Heap::cell_space()->Size());
writer_->PutC(']');
// Write global handles.
writer_->PutC('G');
......@@ -1303,6 +1311,7 @@ static const int kInitArraySize = 32;
Deserializer::Deserializer(const byte* str, int len)
: reader_(str, len),
map_pages_(kInitArraySize),
cell_pages_(kInitArraySize),
old_pointer_pages_(kInitArraySize),
old_data_pages_(kInitArraySize),
code_pages_(kInitArraySize),
......@@ -1475,6 +1484,8 @@ void Deserializer::GetHeader() {
InitPagedSpace(Heap::code_space(), reader_.GetInt(), &code_pages_);
reader_.ExpectC('|');
InitPagedSpace(Heap::map_space(), reader_.GetInt(), &map_pages_);
reader_.ExpectC('|');
InitPagedSpace(Heap::cell_space(), reader_.GetInt(), &cell_pages_);
reader_.ExpectC(']');
// Create placeholders for global handles later to be fill during
// IterateRoots.
......@@ -1607,6 +1618,9 @@ Object* Deserializer::Resolve(Address encoded) {
case MAP_SPACE:
return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
Heap::map_space(), &map_pages_);
case CELL_SPACE:
return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
Heap::cell_space(), &cell_pages_);
case OLD_POINTER_SPACE:
return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
Heap::old_pointer_space(), &old_pointer_pages_);
......
......@@ -320,10 +320,11 @@ class Deserializer: public ObjectVisitor {
bool has_log_; // The file has log information.
// Resolve caches the following:
List<Page*> map_pages_; // All pages in the map space.
List<Page*> map_pages_; // All pages in the map space.
List<Page*> cell_pages_; // All pages in the cell space.
List<Page*> old_pointer_pages_; // All pages in the old pointer space.
List<Page*> old_data_pages_; // All pages in the old data space.
List<Page*> code_pages_;
List<Page*> old_data_pages_; // All pages in the old data space.
List<Page*> code_pages_; // All pages in the code space.
List<Object*> large_objects_; // All known large objects.
// A list of global handles at deserialization time.
List<Object**> global_handles_;
......
This diff is collapsed.
......@@ -1315,8 +1315,7 @@ class OldSpaceFreeList BASE_EMBEDDED {
private:
// The size range of blocks, in bytes. (Smaller allocations are allowed, but
// will always result in waste.)
static const int kMinBlockSize =
POINTER_SIZE_ALIGN(ByteArray::kHeaderSize) + kPointerSize;
static const int kMinBlockSize = 2 * kPointerSize;
static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
// The identity of the owning space, for building allocation Failure
......@@ -1391,9 +1390,9 @@ class OldSpaceFreeList BASE_EMBEDDED {
// The free list for the map space.
class MapSpaceFreeList BASE_EMBEDDED {
class FixedSizeFreeList BASE_EMBEDDED {
public:
explicit MapSpaceFreeList(AllocationSpace owner);
FixedSizeFreeList(AllocationSpace owner, int object_size);
// Clear the free list.
void Reset();
......@@ -1402,12 +1401,12 @@ class MapSpaceFreeList BASE_EMBEDDED {
int available() { return available_; }
// Place a node on the free list. The block starting at 'start' (assumed to
// have size Map::kSize) is placed on the free list. Bookkeeping
// have size object_size_) is placed on the free list. Bookkeeping
// information will be written to the block, ie, its contents will be
// destroyed. The start address should be word aligned.
void Free(Address start);
// Allocate a map-sized block from the free list. The block is unitialized.
// Allocate a fixed sized block from the free list. The block is unitialized.
// A failure is returned if no block is available.
Object* Allocate();
......@@ -1422,7 +1421,10 @@ class MapSpaceFreeList BASE_EMBEDDED {
// objects.
AllocationSpace owner_;
DISALLOW_COPY_AND_ASSIGN(MapSpaceFreeList);
// The size of the objects in this space.
int object_size_;
DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
};
......@@ -1460,12 +1462,6 @@ class OldSpace : public PagedSpace {
// clears the free list.
virtual void PrepareForMarkCompact(bool will_compact);
// Adjust the top of relocation pointer to point to the end of the object
// given by 'address' and 'size_in_bytes'. Move it to the next page if
// necessary, ensure that it points to the address, then increment it by the
// size.
void MCAdjustRelocationEnd(Address address, int size_in_bytes);
// Updates the allocation pointer to the relocation top after a mark-compact
// collection.
virtual void MCCommitRelocationInfo();
......@@ -1492,39 +1488,40 @@ class OldSpace : public PagedSpace {
// The space's free list.
OldSpaceFreeList free_list_;
// During relocation, we keep a pointer to the most recently relocated
// object in order to know when to move to the next page.
Address mc_end_of_relocation_;
public:
TRACK_MEMORY("OldSpace")
};
// -----------------------------------------------------------------------------
// Old space for all map objects
// Old space for objects of a fixed size
class MapSpace : public PagedSpace {
class FixedSpace : public PagedSpace {
public:
// Creates a map space object with a maximum capacity.
explicit MapSpace(int max_capacity, AllocationSpace id)
: PagedSpace(max_capacity, id, NOT_EXECUTABLE), free_list_(id) { }
FixedSpace(int max_capacity,
AllocationSpace id,
int object_size_in_bytes,
const char* name)
: PagedSpace(max_capacity, id, NOT_EXECUTABLE),
object_size_in_bytes_(object_size_in_bytes),
name_(name),
free_list_(id, object_size_in_bytes),
page_extra_(Page::kObjectAreaSize % object_size_in_bytes) { }
// The top of allocation in a page in this space. Undefined if page is unused.
virtual Address PageAllocationTop(Page* page) {
return page == TopPageOf(allocation_info_) ? top()
: page->ObjectAreaEnd() - kPageExtra;
: page->ObjectAreaEnd() - page_extra_;
}
// Give a map-sized block of memory to the space's free list.
int object_size_in_bytes() { return object_size_in_bytes_; }
// Give a fixed sized block of memory to the space's free list.
void Free(Address start) {
free_list_.Free(start);
accounting_stats_.DeallocateBytes(Map::kSize);
}
// Given an index, returns the page address.
Address PageAddress(int page_index) { return page_addresses_[page_index]; }
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact(bool will_compact);
......@@ -1536,18 +1533,16 @@ class MapSpace : public PagedSpace {
// Verify integrity of this space.
virtual void Verify();
// Implement by subclasses to verify an actual object in the space.
virtual void VerifyObject(HeapObject* obj) = 0;
// Reports statistic info of the space
void ReportStatistics();
// Dump the remembered sets in the space to stdout.
void PrintRSet();
#endif
// Constants.
static const int kMapPageIndexBits = 10;
static const int kMaxMapPageIndex = (1 << kMapPageIndexBits) - 1;
static const int kPageExtra = Page::kObjectAreaSize % Map::kSize;
protected:
// Virtual function in the superclass. Slow path of AllocateRaw.
HeapObject* SlowAllocateRaw(int size_in_bytes);
......@@ -1557,9 +1552,44 @@ class MapSpace : public PagedSpace {
HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
private:
// The size of objects in this space.
int object_size_in_bytes_;
// The name of this space.
const char* name_;
// The space's free list.
MapSpaceFreeList free_list_;
FixedSizeFreeList free_list_;
// Bytes of each page that cannot be allocated.
int page_extra_;
};
// -----------------------------------------------------------------------------
// Old space for all map objects
class MapSpace : public FixedSpace {
public:
// Creates a map space object with a maximum capacity.
MapSpace(int max_capacity, AllocationSpace id)
: FixedSpace(max_capacity, id, Map::kSize, "map") {}
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact(bool will_compact);
// Given an index, returns the page address.
Address PageAddress(int page_index) { return page_addresses_[page_index]; }
// Constants.
static const int kMaxMapPageIndex = (1 << MapWord::kMapPageIndexBits) - 1;
protected:
#ifdef DEBUG
virtual void VerifyObject(HeapObject* obj);
#endif
private:
// An array of page start address in a map space.
Address page_addresses_[kMaxMapPageIndex + 1];
......@@ -1568,6 +1598,25 @@ class MapSpace : public PagedSpace {
};
// -----------------------------------------------------------------------------
// Old space for all global object property cell objects
class CellSpace : public FixedSpace {
public:
// Creates a property cell space object with a maximum capacity.
CellSpace(int max_capacity, AllocationSpace id)
: FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
protected:
#ifdef DEBUG
virtual void VerifyObject(HeapObject* obj);
#endif
public:
TRACK_MEMORY("MapSpace")
};
// -----------------------------------------------------------------------------
// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
// the large object space. A large object is allocated from OS heap with
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment