Create a new paged heap space for global property cells. The new

space is similar to map space in that it has fixed-size objects.  A
common superclass for a space with fixed size objects is used for the
map space and cell space.

Allocate all cells in cell space.  Handle it during all GCs.  Modify
the free-list node representation (so that the size is not at a fixed
offset in all cells) to allow two-pointer free-list nodes.  Clean up
some stuff in the MC collector.

Review URL: http://codereview.chromium.org/155211

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2411 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 68671854
......@@ -257,14 +257,16 @@ typedef bool (*WeakSlotCallback)(Object** pointer);
// NOTE: SpaceIterator depends on AllocationSpace enumeration values being
// consecutive.
enum AllocationSpace {
NEW_SPACE, // Semispaces collected with copying collector.
OLD_POINTER_SPACE, // Must be first of the paged spaces - see PagedSpaces.
OLD_DATA_SPACE, // May not have pointers to new space.
CODE_SPACE, // Also one of the old spaces. Marked executable.
MAP_SPACE, // Only map objects.
LO_SPACE, // Large objects.
NEW_SPACE, // Semispaces collected with copying collector.
OLD_POINTER_SPACE, // May contain pointers to new space.
OLD_DATA_SPACE, // Must not have pointers to new space.
CODE_SPACE, // No pointers to new space, marked executable.
MAP_SPACE, // Only and all map objects.
CELL_SPACE, // Only and all cell objects.
LO_SPACE, // Promoted large objects.
FIRST_SPACE = NEW_SPACE,
LAST_SPACE = LO_SPACE // <= 5 (see kSpaceBits and kLOSpacePointer)
LAST_SPACE = LO_SPACE
};
const int kSpaceTagSize = 3;
const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
......
......@@ -82,6 +82,8 @@ Object* Heap::AllocateRaw(int size_in_bytes,
result = code_space_->AllocateRaw(size_in_bytes);
} else if (LO_SPACE == space) {
result = lo_space_->AllocateRaw(size_in_bytes);
} else if (CELL_SPACE == space) {
result = cell_space_->AllocateRaw(size_in_bytes);
} else {
ASSERT(MAP_SPACE == space);
result = map_space_->AllocateRaw(size_in_bytes);
......@@ -107,12 +109,23 @@ Object* Heap::NumberFromUint32(uint32_t value) {
}
Object* Heap::AllocateRawMap(int size_in_bytes) {
Object* Heap::AllocateRawMap() {
#ifdef DEBUG
Counters::objs_since_last_full.Increment();
Counters::objs_since_last_young.Increment();
#endif
Object* result = map_space_->AllocateRaw(size_in_bytes);
Object* result = map_space_->AllocateRaw(Map::kSize);
if (result->IsFailure()) old_gen_exhausted_ = true;
return result;
}
Object* Heap::AllocateRawCell() {
#ifdef DEBUG
Counters::objs_since_last_full.Increment();
Counters::objs_since_last_young.Increment();
#endif
Object* result = cell_space_->AllocateRaw(JSGlobalPropertyCell::kSize);
if (result->IsFailure()) old_gen_exhausted_ = true;
return result;
}
......
......@@ -53,6 +53,7 @@ OldSpace* Heap::old_pointer_space_ = NULL;
OldSpace* Heap::old_data_space_ = NULL;
OldSpace* Heap::code_space_ = NULL;
MapSpace* Heap::map_space_ = NULL;
CellSpace* Heap::cell_space_ = NULL;
LargeObjectSpace* Heap::lo_space_ = NULL;
static const int kMinimumPromotionLimit = 2*MB;
......@@ -110,7 +111,8 @@ int Heap::Capacity() {
old_pointer_space_->Capacity() +
old_data_space_->Capacity() +
code_space_->Capacity() +
map_space_->Capacity();
map_space_->Capacity() +
cell_space_->Capacity();
}
......@@ -121,7 +123,8 @@ int Heap::Available() {
old_pointer_space_->Available() +
old_data_space_->Available() +
code_space_->Available() +
map_space_->Available();
map_space_->Available() +
cell_space_->Available();
}
......@@ -130,6 +133,7 @@ bool Heap::HasBeenSetup() {
old_data_space_ != NULL &&
code_space_ != NULL &&
map_space_ != NULL &&
cell_space_ != NULL &&
lo_space_ != NULL;
}
......@@ -359,6 +363,8 @@ bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
return code_space_->Available() >= requested_size;
case MAP_SPACE:
return map_space_->Available() >= requested_size;
case CELL_SPACE:
return cell_space_->Available() >= requested_size;
case LO_SPACE:
return lo_space_->Available() >= requested_size;
}
......@@ -653,7 +659,7 @@ void Heap::Scavenge() {
// Copy objects reachable from weak pointers.
GlobalHandles::IterateWeakRoots(&scavenge_visitor);
#if V8_HOST_ARCH_64_BIT
#ifdef V8_HOST_ARCH_64_BIT
// TODO(X64): Make this go away again. We currently disable RSets for
// 64-bit-mode.
HeapObjectIterator old_pointer_iterator(old_pointer_space_);
......@@ -661,6 +667,12 @@ void Heap::Scavenge() {
HeapObject* heap_object = old_pointer_iterator.next();
heap_object->Iterate(&scavenge_visitor);
}
HeapObjectIterator cell_iterator(cell_space_);
while (cell_iterator.has_next()) {
cell_iterator.next()->Iterate(&scavenge_visitor));
}
HeapObjectIterator map_iterator(map_space_);
while (map_iterator.has_next()) {
HeapObject* heap_object = map_iterator.next();
......@@ -673,13 +685,14 @@ void Heap::Scavenge() {
heap_object->Iterate(&scavenge_visitor);
}
}
#else // V8_HOST_ARCH_64_BIT
#else // !defined(V8_HOST_ARCH_64_BIT)
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
IterateRSet(old_pointer_space_, &ScavengePointer);
IterateRSet(cell_space_, &ScavengePointer);
IterateRSet(map_space_, &ScavengePointer);
lo_space_->IterateRSet(&ScavengePointer);
#endif // V8_HOST_ARCH_64_BIT
#endif
do {
ASSERT(new_space_front <= new_space_.top());
......@@ -827,6 +840,9 @@ void Heap::RebuildRSets() {
old_pointer_space_->ClearRSet();
RebuildRSets(old_pointer_space_);
cell_space_->ClearRSet();
RebuildRSets(cell_space_);
Heap::lo_space_->ClearRSet();
RebuildRSets(lo_space_);
}
......@@ -995,7 +1011,7 @@ void Heap::ScavengePointer(HeapObject** p) {
Object* Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result = AllocateRawMap(Map::kSize);
Object* result = AllocateRawMap();
if (result->IsFailure()) return result;
// Map::cast cannot be used due to uninitialized map field.
......@@ -1009,7 +1025,7 @@ Object* Heap::AllocatePartialMap(InstanceType instance_type,
Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
Object* result = AllocateRawMap(Map::kSize);
Object* result = AllocateRawMap();
if (result->IsFailure()) return result;
Map* map = reinterpret_cast<Map*>(result);
......@@ -1055,7 +1071,6 @@ const Heap::StructTable Heap::struct_table[] = {
bool Heap::CreateInitialMaps() {
Object* obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
if (obj->IsFailure()) return false;
// Map::cast cannot be used due to uninitialized map field.
Map* new_meta_map = reinterpret_cast<Map*>(obj);
set_meta_map(new_meta_map);
......@@ -1069,11 +1084,6 @@ bool Heap::CreateInitialMaps() {
if (obj->IsFailure()) return false;
set_oddball_map(Map::cast(obj));
obj = AllocatePartialMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
JSGlobalPropertyCell::kSize);
if (obj->IsFailure()) return false;
set_global_property_cell_map(Map::cast(obj));
// Allocate the empty array
obj = AllocateEmptyFixedArray();
if (obj->IsFailure()) return false;
......@@ -1083,11 +1093,10 @@ bool Heap::CreateInitialMaps() {
if (obj->IsFailure()) return false;
set_null_value(obj);
// Allocate the empty descriptor array. AllocateMap can now be used.
// Allocate the empty descriptor array.
obj = AllocateEmptyFixedArray();
if (obj->IsFailure()) return false;
// There is a check against empty_descriptor_array() in cast().
set_empty_descriptor_array(reinterpret_cast<DescriptorArray*>(obj));
set_empty_descriptor_array(DescriptorArray::cast(obj));
// Fix the instance_descriptors for the existing maps.
meta_map()->set_instance_descriptors(empty_descriptor_array());
......@@ -1099,22 +1108,16 @@ bool Heap::CreateInitialMaps() {
oddball_map()->set_instance_descriptors(empty_descriptor_array());
oddball_map()->set_code_cache(empty_fixed_array());
global_property_cell_map()->set_instance_descriptors(
empty_descriptor_array());
global_property_cell_map()->set_code_cache(empty_fixed_array());
// Fix prototype object for existing maps.
meta_map()->set_prototype(null_value());
meta_map()->set_constructor(null_value());
fixed_array_map()->set_prototype(null_value());
fixed_array_map()->set_constructor(null_value());
oddball_map()->set_prototype(null_value());
oddball_map()->set_constructor(null_value());
global_property_cell_map()->set_prototype(null_value());
global_property_cell_map()->set_constructor(null_value());
obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
if (obj->IsFailure()) return false;
set_heap_number_map(Map::cast(obj));
......@@ -1168,13 +1171,18 @@ bool Heap::CreateInitialMaps() {
if (obj->IsFailure()) return false;
set_code_map(Map::cast(obj));
obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
JSGlobalPropertyCell::kSize);
if (obj->IsFailure()) return false;
set_global_property_cell_map(Map::cast(obj));
obj = AllocateMap(FILLER_TYPE, kPointerSize);
if (obj->IsFailure()) return false;
set_one_word_filler_map(Map::cast(obj));
set_one_pointer_filler_map(Map::cast(obj));
obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
if (obj->IsFailure()) return false;
set_two_word_filler_map(Map::cast(obj));
set_two_pointer_filler_map(Map::cast(obj));
for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
const StructTable& entry = struct_table[i];
......@@ -1242,9 +1250,7 @@ Object* Heap::AllocateHeapNumber(double value) {
Object* Heap::AllocateJSGlobalPropertyCell(Object* value) {
Object* result = AllocateRaw(JSGlobalPropertyCell::kSize,
OLD_POINTER_SPACE,
OLD_POINTER_SPACE);
Object* result = AllocateRawCell();
if (result->IsFailure()) return result;
HeapObject::cast(result)->set_map(global_property_cell_map());
JSGlobalPropertyCell::cast(result)->set_value(value);
......@@ -1821,7 +1827,7 @@ void Heap::CreateFillerObjectAt(Address addr, int size) {
if (size == 0) return;
HeapObject* filler = HeapObject::FromAddress(addr);
if (size == kPointerSize) {
filler->set_map(Heap::one_word_filler_map());
filler->set_map(Heap::one_pointer_filler_map());
} else {
filler->set_map(Heap::byte_array_map());
ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
......@@ -2697,6 +2703,8 @@ void Heap::ReportHeapStatistics(const char* title) {
code_space_->ReportStatistics();
PrintF("Map space : ");
map_space_->ReportStatistics();
PrintF("Cell space : ");
cell_space_->ReportStatistics();
PrintF("Large object space : ");
lo_space_->ReportStatistics();
PrintF(">>>>>> ========================================= >>>>>>\n");
......@@ -2717,6 +2725,7 @@ bool Heap::Contains(Address addr) {
old_data_space_->Contains(addr) ||
code_space_->Contains(addr) ||
map_space_->Contains(addr) ||
cell_space_->Contains(addr) ||
lo_space_->SlowContains(addr));
}
......@@ -2741,6 +2750,8 @@ bool Heap::InSpace(Address addr, AllocationSpace space) {
return code_space_->Contains(addr);
case MAP_SPACE:
return map_space_->Contains(addr);
case CELL_SPACE:
return cell_space_->Contains(addr);
case LO_SPACE:
return lo_space_->SlowContains(addr);
}
......@@ -2853,7 +2864,9 @@ int Heap::IterateRSetRange(Address object_start,
void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
ASSERT(Page::is_rset_in_use());
ASSERT(space == old_pointer_space_ || space == map_space_);
ASSERT(space == old_pointer_space_ ||
space == cell_space_ ||
space == map_space_);
static void* paged_rset_histogram = StatsTable::CreateHistogram(
"V8.RSetPaged",
......@@ -2964,6 +2977,7 @@ int Heap::PromotedSpaceSize() {
+ old_data_space_->Size()
+ code_space_->Size()
+ map_space_->Size()
+ cell_space_->Size()
+ lo_space_->Size();
}
......@@ -3041,6 +3055,13 @@ bool Heap::Setup(bool create_heap_objects) {
// enough to hold at least a page will cause it to allocate.
if (!map_space_->Setup(NULL, 0)) return false;
// Initialize global property cell space.
cell_space_ = new CellSpace(old_generation_size_, CELL_SPACE);
if (cell_space_ == NULL) return false;
// Setting up a paged space without giving it a virtual memory range big
// enough to hold at least a page will cause it to allocate.
if (!cell_space_->Setup(NULL, 0)) return false;
// The large object code space may contain code or data. We set the memory
// to be non-executable here for safety, but this means we need to enable it
// explicitly when allocating large code objects.
......@@ -3093,6 +3114,12 @@ void Heap::TearDown() {
map_space_ = NULL;
}
if (cell_space_ != NULL) {
cell_space_->TearDown();
delete cell_space_;
cell_space_ = NULL;
}
if (lo_space_ != NULL) {
lo_space_->TearDown();
delete lo_space_;
......@@ -3104,11 +3131,9 @@ void Heap::TearDown() {
void Heap::Shrink() {
// Try to shrink map, old, and code spaces.
map_space_->Shrink();
old_pointer_space_->Shrink();
old_data_space_->Shrink();
code_space_->Shrink();
// Try to shrink all paged spaces.
PagedSpaces spaces;
while (PagedSpace* space = spaces.next()) space->Shrink();
}
......@@ -3116,24 +3141,16 @@ void Heap::Shrink() {
void Heap::Protect() {
if (HasBeenSetup()) {
new_space_.Protect();
map_space_->Protect();
old_pointer_space_->Protect();
old_data_space_->Protect();
code_space_->Protect();
lo_space_->Protect();
AllSpaces spaces;
while (Space* space = spaces.next()) space->Protect();
}
}
void Heap::Unprotect() {
if (HasBeenSetup()) {
new_space_.Unprotect();
map_space_->Unprotect();
old_pointer_space_->Unprotect();
old_data_space_->Unprotect();
code_space_->Unprotect();
lo_space_->Unprotect();
AllSpaces spaces;
while (Space* space = spaces.next()) space->Unprotect();
}
}
......@@ -3171,6 +3188,8 @@ Space* AllSpaces::next() {
return Heap::code_space();
case MAP_SPACE:
return Heap::map_space();
case CELL_SPACE:
return Heap::cell_space();
case LO_SPACE:
return Heap::lo_space();
default:
......@@ -3189,6 +3208,8 @@ PagedSpace* PagedSpaces::next() {
return Heap::code_space();
case MAP_SPACE:
return Heap::map_space();
case CELL_SPACE:
return Heap::cell_space();
default:
return NULL;
}
......@@ -3262,6 +3283,9 @@ ObjectIterator* SpaceIterator::CreateIterator() {
case MAP_SPACE:
iterator_ = new HeapObjectIterator(Heap::map_space());
break;
case CELL_SPACE:
iterator_ = new HeapObjectIterator(Heap::cell_space());
break;
case LO_SPACE:
iterator_ = new LargeObjectIterator(Heap::lo_space());
break;
......
......@@ -105,8 +105,8 @@ namespace internal {
V(Map, boilerplate_function_map, BoilerplateFunctionMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, proxy_map, ProxyMap) \
V(Map, one_word_filler_map, OneWordFillerMap) \
V(Map, two_word_filler_map, TwoWordFillerMap) \
V(Map, one_pointer_filler_map, OnePointerFillerMap) \
V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
V(Object, nan_value, NanValue) \
V(Object, undefined_value, UndefinedValue) \
V(Object, minus_zero_value, MinusZeroValue) \
......@@ -263,6 +263,7 @@ class Heap : public AllStatic {
static OldSpace* old_data_space() { return old_data_space_; }
static OldSpace* code_space() { return code_space_; }
static MapSpace* map_space() { return map_space_; }
static CellSpace* cell_space() { return cell_space_; }
static LargeObjectSpace* lo_space() { return lo_space_; }
static bool always_allocate() { return always_allocate_scope_depth_ != 0; }
......@@ -852,6 +853,7 @@ class Heap : public AllStatic {
static OldSpace* old_data_space_;
static OldSpace* code_space_;
static MapSpace* map_space_;
static CellSpace* cell_space_;
static LargeObjectSpace* lo_space_;
static HeapState gc_state_;
......@@ -975,7 +977,10 @@ class Heap : public AllStatic {
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
// have to test the allocation space argument and (b) can reduce code size
// (since both AllocateRaw and AllocateRawMap are inlined).
static inline Object* AllocateRawMap(int size_in_bytes);
static inline Object* AllocateRawMap();
// Allocate an uninitialized object in the global property cell space.
static inline Object* AllocateRawCell();
// Initializes a JSObject based on its map.
static void InitializeJSObjectFromMap(JSObject* obj,
......
......@@ -56,6 +56,7 @@ int MarkCompactCollector::live_old_data_objects_ = 0;
int MarkCompactCollector::live_old_pointer_objects_ = 0;
int MarkCompactCollector::live_code_objects_ = 0;
int MarkCompactCollector::live_map_objects_ = 0;
int MarkCompactCollector::live_cell_objects_ = 0;
int MarkCompactCollector::live_lo_objects_ = 0;
#endif
......@@ -155,6 +156,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
live_old_data_objects_ = 0;
live_code_objects_ = 0;
live_map_objects_ = 0;
live_cell_objects_ = 0;
live_lo_objects_ = 0;
#endif
}
......@@ -710,6 +712,10 @@ void MarkCompactCollector::RefillMarkingStack() {
ScanOverflowedObjects(&map_it);
if (marking_stack.is_full()) return;
HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize);
ScanOverflowedObjects(&cell_it);
if (marking_stack.is_full()) return;
LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
ScanOverflowedObjects(&lo_it);
if (marking_stack.is_full()) return;
......@@ -809,6 +815,9 @@ void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
} else if (Heap::map_space()->Contains(obj)) {
ASSERT(obj->IsMap());
live_map_objects_++;
} else if (Heap::cell_space()->Contains(obj)) {
ASSERT(obj->IsJSGlobalPropertyCell());
live_cell_objects_++;
} else if (Heap::old_pointer_space()->Contains(obj)) {
live_old_pointer_objects_++;
} else if (Heap::old_data_space()->Contains(obj)) {
......@@ -968,27 +977,32 @@ inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
// Allocation functions for the paged spaces call the space's MCAllocateRaw.
inline Object* MCAllocateFromOldPointerSpace(HeapObject* object,
inline Object* MCAllocateFromOldPointerSpace(HeapObject* ignore,
int object_size) {
return Heap::old_pointer_space()->MCAllocateRaw(object_size);
}
inline Object* MCAllocateFromOldDataSpace(HeapObject* object, int object_size) {
inline Object* MCAllocateFromOldDataSpace(HeapObject* ignore, int object_size) {
return Heap::old_data_space()->MCAllocateRaw(object_size);
}
inline Object* MCAllocateFromCodeSpace(HeapObject* object, int object_size) {
inline Object* MCAllocateFromCodeSpace(HeapObject* ignore, int object_size) {
return Heap::code_space()->MCAllocateRaw(object_size);
}
inline Object* MCAllocateFromMapSpace(HeapObject* object, int object_size) {
inline Object* MCAllocateFromMapSpace(HeapObject* ignore, int object_size) {
return Heap::map_space()->MCAllocateRaw(object_size);
}
inline Object* MCAllocateFromCellSpace(HeapObject* ignore, int object_size) {
return Heap::cell_space()->MCAllocateRaw(object_size);
}
// The forwarding address is encoded at the same offset as the current
// to-space object, but in from space.
inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
......@@ -1147,7 +1161,7 @@ static void SweepSpace(NewSpace* space) {
ByteArray::cast(object)->set_length(ByteArray::LengthFor(size));
} else {
ASSERT(size == kPointerSize);
object->set_map(Heap::raw_unchecked_one_word_filler_map());
object->set_map(Heap::raw_unchecked_one_pointer_filler_map());
}
ASSERT(object->Size() == size);
}
......@@ -1197,8 +1211,8 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// loop.
}
// If the last region was not live we need to from free_start to the
// allocation top in the page.
// If the last region was not live we need to deallocate from
// free_start to the allocation top in the page.
if (!is_previous_alive) {
int free_size = p->AllocationTop() - free_start;
if (free_size > 0) {
......@@ -1242,6 +1256,21 @@ void MarkCompactCollector::DeallocateMapBlock(Address start,
}
void MarkCompactCollector::DeallocateCellBlock(Address start,
int size_in_bytes) {
// Free-list elements in cell space are assumed to have a fixed size.
// We break the free block into chunks and add them to the free list
// individually.
int size = Heap::cell_space()->object_size_in_bytes();
ASSERT(size_in_bytes % size == 0);
Heap::ClearRSetRange(start, size_in_bytes);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += size) {
Heap::cell_space()->Free(a);
}
}
void MarkCompactCollector::EncodeForwardingAddresses() {
ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
// Objects in the active semispace of the young generation may be
......@@ -1262,6 +1291,11 @@ void MarkCompactCollector::EncodeForwardingAddresses() {
LogNonLiveCodeObject>(
Heap::code_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
IgnoreNonLiveObject>(
Heap::cell_space());
// Compute new space next to last after the old and code spaces have been
// compacted. Objects in new space can be promoted to old or code space.
EncodeForwardingAddressesInNewSpace();
......@@ -1280,6 +1314,7 @@ void MarkCompactCollector::EncodeForwardingAddresses() {
Heap::old_data_space()->MCWriteRelocationInfoToPage();
Heap::code_space()->MCWriteRelocationInfoToPage();
Heap::map_space()->MCWriteRelocationInfoToPage();
Heap::cell_space()->MCWriteRelocationInfoToPage();
}
......@@ -1294,6 +1329,7 @@ void MarkCompactCollector::SweepSpaces() {
SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
SweepSpace(Heap::new_space());
SweepSpace(Heap::map_space(), &DeallocateMapBlock);
}
......@@ -1372,15 +1408,16 @@ class UpdatingVisitor: public ObjectVisitor {
ASSERT(!Heap::InFromSpace(obj));
if (Heap::new_space()->Contains(obj)) {
Address f_addr = Heap::new_space()->FromSpaceLow() +
Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
new_addr = Memory::Address_at(f_addr);
Address forwarding_pointer_addr =
Heap::new_space()->FromSpaceLow() +
Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
new_addr = Memory::Address_at(forwarding_pointer_addr);
#ifdef DEBUG
ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
Heap::old_data_space()->Contains(new_addr) ||
Heap::code_space()->Contains(new_addr) ||
Heap::new_space()->FromSpaceContains(new_addr));
Heap::new_space()->FromSpaceContains(new_addr) ||
Heap::lo_space()->Contains(HeapObject::FromAddress(new_addr)));
if (Heap::new_space()->FromSpaceContains(new_addr)) {
ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
......@@ -1393,32 +1430,19 @@ class UpdatingVisitor: public ObjectVisitor {
return;
} else {
ASSERT(Heap::old_pointer_space()->Contains(obj) ||
Heap::old_data_space()->Contains(obj) ||
Heap::code_space()->Contains(obj) ||
Heap::map_space()->Contains(obj));
new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
Heap::old_data_space()->Contains(new_addr) ||
Heap::code_space()->Contains(new_addr) ||
Heap::map_space()->Contains(new_addr));
#ifdef DEBUG
if (Heap::old_pointer_space()->Contains(obj)) {
ASSERT(Heap::old_pointer_space()->MCSpaceOffsetForAddress(new_addr) <=
Heap::old_pointer_space()->MCSpaceOffsetForAddress(old_addr));
} else if (Heap::old_data_space()->Contains(obj)) {
ASSERT(Heap::old_data_space()->MCSpaceOffsetForAddress(new_addr) <=
Heap::old_data_space()->MCSpaceOffsetForAddress(old_addr));
} else if (Heap::code_space()->Contains(obj)) {
ASSERT(Heap::code_space()->MCSpaceOffsetForAddress(new_addr) <=
Heap::code_space()->MCSpaceOffsetForAddress(old_addr));
} else {
ASSERT(Heap::map_space()->MCSpaceOffsetForAddress(new_addr) <=
Heap::map_space()->MCSpaceOffsetForAddress(old_addr));
PagedSpaces spaces;
PagedSpace* original_space = spaces.next();
while (original_space != NULL) {
if (original_space->Contains(obj)) break;
original_space = spaces.next();
}
ASSERT(original_space != NULL);
#endif
new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
ASSERT(original_space->Contains(new_addr));
ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
original_space->MCSpaceOffsetForAddress(old_addr));
}
*p = HeapObject::FromAddress(new_addr);
......@@ -1450,6 +1474,8 @@ void MarkCompactCollector::UpdatePointers() {
&UpdatePointersInOldObject);
int live_codes = IterateLiveObjects(Heap::code_space(),
&UpdatePointersInOldObject);
int live_cells = IterateLiveObjects(Heap::cell_space(),
&UpdatePointersInOldObject);
int live_news = IterateLiveObjects(Heap::new_space(),
&UpdatePointersInNewObject);
......@@ -1461,15 +1487,14 @@ void MarkCompactCollector::UpdatePointers() {
USE(live_pointer_olds);
USE(live_data_olds);
USE(live_codes);
USE(live_cells);
USE(live_news);
#ifdef DEBUG
ASSERT(live_maps == live_map_objects_);
ASSERT(live_data_olds == live_old_data_objects_);
ASSERT(live_pointer_olds == live_old_pointer_objects_);
ASSERT(live_codes == live_code_objects_);
ASSERT(live_cells == live_cell_objects_);
ASSERT(live_news == live_young_objects_);
#endif
}
......@@ -1590,30 +1615,31 @@ void MarkCompactCollector::RelocateObjects() {
int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
&RelocateOldDataObject);
int live_codes = IterateLiveObjects(Heap::code_space(), &RelocateCodeObject);
int live_cells = IterateLiveObjects(Heap::cell_space(), &RelocateCellObject);
int live_news = IterateLiveObjects(Heap::new_space(), &RelocateNewObject);
USE(live_maps);
USE(live_data_olds);
USE(live_pointer_olds);
USE(live_codes);
USE(live_cells);
USE(live_news);
#ifdef DEBUG
ASSERT(live_maps == live_map_objects_);
ASSERT(live_data_olds == live_old_data_objects_);
ASSERT(live_pointer_olds == live_old_pointer_objects_);
ASSERT(live_codes == live_code_objects_);
ASSERT(live_cells == live_cell_objects_);
ASSERT(live_news == live_young_objects_);
#endif
// Notify code object in LO to convert IC target to address
// This must happen after lo_space_->Compact
LargeObjectIterator it(Heap::lo_space());
while (it.has_next()) { ConvertCodeICTargetToAddress(it.next()); }
// Flips from and to spaces
// Flip from and to spaces
Heap::new_space()->Flip();
// Sets age_mark to bottom in to space
// Set age_mark to bottom in to space
Address mark = Heap::new_space()->bottom();
Heap::new_space()->set_age_mark(mark);
......@@ -1637,7 +1663,7 @@ int MarkCompactCollector::ConvertCodeICTargetToAddress(HeapObject* obj) {
int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
// decode map pointer (forwarded address)
// Recover map pointer.
MapWord encoding = obj->map_word();
Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
......@@ -1645,10 +1671,10 @@ int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
// Get forwarding address before resetting map pointer
Address new_addr = GetForwardingAddressInOldSpace(obj);
// recover map pointer
// Reset map pointer. The meta map object may not be copied yet so
// Map::cast does not yet work.
obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
// The meta map object may not be copied yet.
Address old_addr = obj->address();
if (new_addr != old_addr) {
......@@ -1665,23 +1691,23 @@ int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
}
static inline int RelocateOldObject(HeapObject* obj,
OldSpace* space,
Address new_addr,
Address map_addr) {
// recover map pointer
obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
static inline int RestoreMap(HeapObject* obj,
PagedSpace* space,
Address new_addr,
Address map_addr) {
// This must be a non-map object, and the function relies on the
// assumption that the Map space is compacted before the other paged
// spaces (see RelocateObjects).
// Reset map pointer.
obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
// This is a non-map object, it relies on the assumption that the Map space
// is compacted before the Old space (see RelocateObjects).
int obj_size = obj->Size();
ASSERT_OBJECT_SIZE(obj_size);
ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
space->MCSpaceOffsetForAddress(obj->address()));
space->MCAdjustRelocationEnd(new_addr, obj_size);
#ifdef DEBUG
if (FLAG_gc_verbose) {
PrintF("relocate %p -> %p\n", obj->address(), new_addr);
......@@ -1693,21 +1719,22 @@ static inline int RelocateOldObject(HeapObject* obj,
int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
OldSpace* space) {
// decode map pointer (forwarded address)
PagedSpace* space) {
// Recover map pointer.
MapWord encoding = obj->map_word();
Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
ASSERT(Heap::map_space()->Contains(map_addr));
// Get forwarding address before resetting map pointer
// Get forwarding address before resetting map pointer.
Address new_addr = GetForwardingAddressInOldSpace(obj);
int obj_size = RelocateOldObject(obj, space, new_addr, map_addr);
// Reset the map pointer.
int obj_size = RestoreMap(obj, space, new_addr, map_addr);
Address old_addr = obj->address();
if (new_addr != old_addr) {
memmove(new_addr, old_addr, obj_size); // copy contents
memmove(new_addr, old_addr, obj_size); // Copy contents
}
ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
......@@ -1726,8 +1753,13 @@ int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
}
int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
return RelocateOldNonCodeObject(obj, Heap::cell_space());
}
int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
// decode map pointer (forwarded address)
// Recover map pointer.
MapWord encoding = obj->map_word();
Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
......@@ -1735,23 +1767,23 @@ int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
// Get forwarding address before resetting map pointer
Address new_addr = GetForwardingAddressInOldSpace(obj);
int obj_size = RelocateOldObject(obj, Heap::code_space(), new_addr, map_addr);
// Reset the map pointer.
int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
// convert inline cache target to address using old address
// Convert inline cache target to address using old address.
if (obj->IsCode()) {
// convert target to address first related to old_address
Code::cast(obj)->ConvertICTargetsFromObjectToAddress();
}
Address old_addr = obj->address();
if (new_addr != old_addr) {
memmove(new_addr, old_addr, obj_size); // copy contents
memmove(new_addr, old_addr, obj_size); // Copy contents.
}
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsCode()) {
// may also update inline cache target.
// May also update inline cache target.
Code::cast(copied_to)->Relocate(new_addr - old_addr);
// Notify the logger that compiled code has moved.
LOG(CodeMoveEvent(old_addr, new_addr));
......@@ -1771,15 +1803,15 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
Address new_addr =
Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset);
#ifdef DEBUG
if (Heap::new_space()->FromSpaceContains(new_addr)) {
ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
} else {
OldSpace* target_space = Heap::TargetSpace(obj);
ASSERT(target_space == Heap::old_pointer_space() ||
target_space == Heap::old_data_space());
target_space->MCAdjustRelocationEnd(new_addr, obj_size);
ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() ||
Heap::TargetSpace(obj) == Heap::old_data_space());
}
#endif
// New and old addresses cannot overlap.
memcpy(reinterpret_cast<void*>(new_addr),
......
......@@ -293,6 +293,7 @@ class MarkCompactCollector: public AllStatic {
static void DeallocateOldDataBlock(Address start, int size_in_bytes);
static void DeallocateCodeBlock(Address start, int size_in_bytes);
static void DeallocateMapBlock(Address start, int size_in_bytes);
static void DeallocateCellBlock(Address start, int size_in_bytes);
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
......@@ -352,8 +353,11 @@ class MarkCompactCollector: public AllStatic {
static int RelocateOldPointerObject(HeapObject* obj);
static int RelocateOldDataObject(HeapObject* obj);
// Relocate a property cell object.
static int RelocateCellObject(HeapObject* obj);
// Helper function.
static inline int RelocateOldNonCodeObject(HeapObject* obj, OldSpace* space);
static inline int RelocateOldNonCodeObject(HeapObject* obj, PagedSpace* space);
// Relocates an object in the code space.
static int RelocateCodeObject(HeapObject* obj);
......@@ -393,6 +397,9 @@ class MarkCompactCollector: public AllStatic {
// Number of live objects in Heap::map_space_.
static int live_map_objects_;
// Number of live objects in Heap::cell_space_.
static int live_cell_objects_;
// Number of live objects in Heap::lo_space_.
static int live_lo_objects_;
......
......@@ -768,6 +768,8 @@ int Failure::value() const {
Failure* Failure::RetryAfterGC(int requested_bytes) {
// Assert that the space encoding fits in the three bytes allotted for it.
ASSERT((LAST_SPACE & ~kSpaceTagMask) == 0);
int requested = requested_bytes >> kObjectAlignmentBits;
int value = (requested << kSpaceTagSize) | NEW_SPACE;
ASSERT(value >> kSpaceTagSize == requested);
......
......@@ -928,12 +928,14 @@ class Smi: public Object {
// Failure is used for reporting out of memory situations and
// propagating exceptions through the runtime system. Failure objects
// are transient and cannot occur as part of the objects graph.
// are transient and cannot occur as part of the object graph.
//
// Failures are a single word, encoded as follows:
// +-------------------------+---+--+--+
// |rrrrrrrrrrrrrrrrrrrrrrrrr|sss|tt|11|
// +-------------------------+---+--+--+
// 3 7 6 4 32 10
// 1
//
// The low two bits, 0-1, are the failure tag, 11. The next two bits,
// 2-3, are a failure type tag 'tt' with possible values:
......@@ -944,18 +946,13 @@ class Smi: public Object {
//
// The next three bits, 4-6, are an allocation space tag 'sss'. The
// allocation space tag is 000 for all failure types except
// RETRY_AFTER_GC. For RETRY_AFTER_GC, the possible values are
// (the encoding is found in globals.h):
// 000 NEW_SPACE
// 001 OLD_SPACE
// 010 CODE_SPACE
// 011 MAP_SPACE
// 100 LO_SPACE
// RETRY_AFTER_GC. For RETRY_AFTER_GC, the possible values are the
// allocation spaces (the encoding is found in globals.h).
//
// The remaining bits is the number of words requested by the
// allocation request that failed, and is zeroed except for
// RETRY_AFTER_GC failures. The 25 bits (on a 32 bit platform) gives
// a representable range of 2^27 bytes (128MB).
// The remaining bits is the size of the allocation request in units
// of the pointer size, and is zeroed except for RETRY_AFTER_GC
// failures. The 25 bits (on a 32 bit platform) gives a representable
// range of 2^27 bytes (128MB).
// Failure type tag info.
const int kFailureTypeTagSize = 2;
......@@ -1085,14 +1082,6 @@ class MapWord BASE_EMBEDDED {
inline Address ToEncodedAddress();
private:
// HeapObject calls the private constructor and directly reads the value.
friend class HeapObject;
explicit MapWord(uintptr_t value) : value_(value) {}
uintptr_t value_;
// Bits used by the marking phase of the garbage collector.
//
// The first word of a heap object is normally a map pointer. The last two
......@@ -1134,6 +1123,14 @@ class MapWord BASE_EMBEDDED {
// 0xFFE00000
static const uint32_t kForwardingOffsetMask =
~(kMapPageIndexMask | kMapPageOffsetMask);
private:
// HeapObject calls the private constructor and directly reads the value.
friend class HeapObject;
explicit MapWord(uintptr_t value) : value_(value) {}
uintptr_t value_;
};
......
......@@ -42,47 +42,44 @@
namespace v8 {
namespace internal {
// Encoding: a RelativeAddress must be able to fit in a pointer:
// it is encoded as an Address with (from MS to LS bits):
// 27 bits identifying a word in the space, in one of three formats:
// - MAP and OLD spaces: 16 bits of page number, 11 bits of word offset in page
// - NEW space: 27 bits of word offset
// - LO space: 27 bits of page number
// 3 bits to encode the AllocationSpace (special values for code in LO space)
// 2 bits identifying this as a HeapObject
// 32-bit encoding: a RelativeAddress must be able to fit in a
// pointer: it is encoded as an Address with (from LS to MS bits):
// - 2 bits identifying this as a HeapObject.
// - 4 bits to encode the AllocationSpace (including special values for
// code and fixed arrays in LO space)
// - 27 bits identifying a word in the space, in one of three formats:
// - paged spaces: 16 bits of page number, 11 bits of word offset in page
// - NEW space: 27 bits of word offset
// - LO space: 27 bits of page number
const int kSpaceShift = kHeapObjectTagSize;
const int kSpaceBits = kSpaceTagSize;
const int kSpaceMask = kSpaceTagMask;
// These value are used instead of space numbers when serializing/
// deserializing. They indicate an object that is in large object space, but
// should be treated specially.
// Make the pages executable on platforms that support it:
const int kLOSpaceExecutable = LAST_SPACE + 1;
// Reserve space for write barrier bits (for objects that can contain
// references to new space):
const int kLOSpacePointer = LAST_SPACE + 2;
const int kSpaceBits = 4;
const int kSpaceMask = (1 << kSpaceBits) - 1;
const int kOffsetShift = kSpaceShift + kSpaceBits;
const int kOffsetBits = 11;
const int kOffsetMask = (1 << kOffsetBits) - 1;
const int kPageBits = 32 - (kOffsetBits + kSpaceBits + kHeapObjectTagSize);
const int kPageShift = kOffsetShift + kOffsetBits;
const int kPageBits = 32 - (kOffsetBits + kSpaceBits + kHeapObjectTagSize);
const int kPageMask = (1 << kPageBits) - 1;
const int kPageAndOffsetShift = kOffsetShift;
const int kPageAndOffsetBits = kPageBits + kOffsetBits;
const int kPageAndOffsetMask = (1 << kPageAndOffsetBits) - 1;
// These values are special allocation space tags used for
// serialization.
// Mar the pages executable on platforms that support it.
const int kLargeCode = LAST_SPACE + 1;
// Allocate extra remembered-set bits.
const int kLargeFixedArray = LAST_SPACE + 2;
static inline AllocationSpace GetSpace(Address addr) {
const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
int space_number = (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
if (space_number == kLOSpaceExecutable) space_number = LO_SPACE;
else if (space_number == kLOSpacePointer) space_number = LO_SPACE;
if (space_number > LAST_SPACE) space_number = LO_SPACE;
return static_cast<AllocationSpace>(space_number);
}
......@@ -91,7 +88,7 @@ static inline bool IsLargeExecutableObject(Address addr) {
const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
const int space_number =
(static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
return (space_number == kLOSpaceExecutable);
return (space_number == kLargeCode);
}
......@@ -99,7 +96,7 @@ static inline bool IsLargeFixedArray(Address addr) {
const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
const int space_number =
(static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
return (space_number == kLOSpacePointer);
return (space_number == kLargeFixedArray);
}
......@@ -147,6 +144,9 @@ class RelativeAddress {
int page_index,
int page_offset)
: space_(space), page_index_(page_index), page_offset_(page_offset) {
// Assert that the space encoding (plus the two pseudo-spaces for
// special large objects) fits in the available bits.
ASSERT(((LAST_SPACE + 2) & ~kSpaceMask) == 0);
ASSERT(space <= LAST_SPACE && space >= 0);
}
......@@ -154,8 +154,7 @@ class RelativeAddress {
Address Encode() const;
AllocationSpace space() const {
if (space_ == kLOSpaceExecutable) return LO_SPACE;
if (space_ == kLOSpacePointer) return LO_SPACE;
if (space_ > LAST_SPACE) return LO_SPACE;
return static_cast<AllocationSpace>(space_);
}
int page_index() const { return page_index_; }
......@@ -165,7 +164,8 @@ class RelativeAddress {
return space_ == CODE_SPACE ||
space_ == OLD_POINTER_SPACE ||
space_ == OLD_DATA_SPACE ||
space_ == MAP_SPACE;
space_ == MAP_SPACE ||
space_ == CELL_SPACE;
}
void next_address(int offset) { page_offset_ += offset; }
......@@ -180,11 +180,11 @@ class RelativeAddress {
void set_to_large_code_object() {
ASSERT(space_ == LO_SPACE);
space_ = kLOSpaceExecutable;
space_ = kLargeCode;
}
void set_to_large_fixed_array() {
ASSERT(space_ == LO_SPACE);
space_ = kLOSpacePointer;
space_ = kLargeFixedArray;
}
......@@ -201,6 +201,7 @@ Address RelativeAddress::Encode() const {
int result = 0;
switch (space_) {
case MAP_SPACE:
case CELL_SPACE:
case OLD_POINTER_SPACE:
case OLD_DATA_SPACE:
case CODE_SPACE:
......@@ -216,8 +217,8 @@ Address RelativeAddress::Encode() const {
result = word_offset << kPageAndOffsetShift;
break;
case LO_SPACE:
case kLOSpaceExecutable:
case kLOSpacePointer:
case kLargeCode:
case kLargeFixedArray:
ASSERT_EQ(0, page_offset_);
ASSERT_EQ(0, page_index_ & ~kPageAndOffsetMask);
result = page_index_ << kPageAndOffsetShift;
......@@ -235,6 +236,7 @@ void RelativeAddress::Verify() {
ASSERT(page_offset_ >= 0 && page_index_ >= 0);
switch (space_) {
case MAP_SPACE:
case CELL_SPACE:
case OLD_POINTER_SPACE:
case OLD_DATA_SPACE:
case CODE_SPACE:
......@@ -245,8 +247,8 @@ void RelativeAddress::Verify() {
ASSERT(page_index_ == 0);
break;
case LO_SPACE:
case kLOSpaceExecutable:
case kLOSpacePointer:
case kLargeCode:
case kLargeFixedArray:
ASSERT(page_offset_ == 0);
break;
}
......@@ -291,6 +293,7 @@ class SimulatedHeapSpace {
void SimulatedHeapSpace::InitEmptyHeap(AllocationSpace space) {
switch (space) {
case MAP_SPACE:
case CELL_SPACE:
case OLD_POINTER_SPACE:
case OLD_DATA_SPACE:
case CODE_SPACE:
......@@ -307,12 +310,15 @@ void SimulatedHeapSpace::InitEmptyHeap(AllocationSpace space) {
void SimulatedHeapSpace::InitCurrentHeap(AllocationSpace space) {
switch (space) {
case MAP_SPACE:
case CELL_SPACE:
case OLD_POINTER_SPACE:
case OLD_DATA_SPACE:
case CODE_SPACE: {
PagedSpace* ps;
if (space == MAP_SPACE) {
ps = Heap::map_space();
} else if (space == CELL_SPACE) {
ps = Heap::cell_space();
} else if (space == OLD_POINTER_SPACE) {
ps = Heap::old_pointer_space();
} else if (space == OLD_DATA_SPACE) {
......@@ -1121,6 +1127,8 @@ void Serializer::PutHeader() {
writer_->PutInt(Heap::code_space()->Size() + Heap::new_space()->Size());
writer_->PutC('|');
writer_->PutInt(Heap::map_space()->Size());
writer_->PutC('|');
writer_->PutInt(Heap::cell_space()->Size());
writer_->PutC(']');
// Write global handles.
writer_->PutC('G');
......@@ -1303,6 +1311,7 @@ static const int kInitArraySize = 32;
Deserializer::Deserializer(const byte* str, int len)
: reader_(str, len),
map_pages_(kInitArraySize),
cell_pages_(kInitArraySize),
old_pointer_pages_(kInitArraySize),
old_data_pages_(kInitArraySize),
code_pages_(kInitArraySize),
......@@ -1475,6 +1484,8 @@ void Deserializer::GetHeader() {
InitPagedSpace(Heap::code_space(), reader_.GetInt(), &code_pages_);
reader_.ExpectC('|');
InitPagedSpace(Heap::map_space(), reader_.GetInt(), &map_pages_);
reader_.ExpectC('|');
InitPagedSpace(Heap::cell_space(), reader_.GetInt(), &cell_pages_);
reader_.ExpectC(']');
// Create placeholders for global handles later to be fill during
// IterateRoots.
......@@ -1607,6 +1618,9 @@ Object* Deserializer::Resolve(Address encoded) {
case MAP_SPACE:
return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
Heap::map_space(), &map_pages_);
case CELL_SPACE:
return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
Heap::cell_space(), &cell_pages_);
case OLD_POINTER_SPACE:
return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
Heap::old_pointer_space(), &old_pointer_pages_);
......
......@@ -320,10 +320,11 @@ class Deserializer: public ObjectVisitor {
bool has_log_; // The file has log information.
// Resolve caches the following:
List<Page*> map_pages_; // All pages in the map space.
List<Page*> map_pages_; // All pages in the map space.
List<Page*> cell_pages_; // All pages in the cell space.
List<Page*> old_pointer_pages_; // All pages in the old pointer space.
List<Page*> old_data_pages_; // All pages in the old data space.
List<Page*> code_pages_;
List<Page*> old_data_pages_; // All pages in the old data space.
List<Page*> code_pages_; // All pages in the code space.
List<Object*> large_objects_; // All known large objects.
// A list of global handles at deserialization time.
List<Object**> global_handles_;
......
......@@ -1269,9 +1269,9 @@ void FreeListNode::set_size(int size_in_bytes) {
set_map(Heap::raw_unchecked_byte_array_map());
ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes));
} else if (size_in_bytes == kPointerSize) {
set_map(Heap::raw_unchecked_one_word_filler_map());
set_map(Heap::raw_unchecked_one_pointer_filler_map());
} else if (size_in_bytes == 2 * kPointerSize) {
set_map(Heap::raw_unchecked_two_word_filler_map());
set_map(Heap::raw_unchecked_two_pointer_filler_map());
} else {
UNREACHABLE();
}
......@@ -1280,16 +1280,26 @@ void FreeListNode::set_size(int size_in_bytes) {
Address FreeListNode::next() {
ASSERT(map() == Heap::raw_unchecked_byte_array_map());
ASSERT(Size() >= kNextOffset + kPointerSize);
return Memory::Address_at(address() + kNextOffset);
ASSERT(map() == Heap::raw_unchecked_byte_array_map() ||
map() == Heap::raw_unchecked_two_pointer_filler_map());
if (map() == Heap::raw_unchecked_byte_array_map()) {
ASSERT(Size() >= kNextOffset + kPointerSize);
return Memory::Address_at(address() + kNextOffset);
} else {
return Memory::Address_at(address() + kPointerSize);
}
}
void FreeListNode::set_next(Address next) {
ASSERT(map() == Heap::raw_unchecked_byte_array_map());
ASSERT(Size() >= kNextOffset + kPointerSize);
Memory::Address_at(address() + kNextOffset) = next;
ASSERT(map() == Heap::raw_unchecked_byte_array_map() ||
map() == Heap::raw_unchecked_two_pointer_filler_map());
if (map() == Heap::raw_unchecked_byte_array_map()) {
ASSERT(Size() >= kNextOffset + kPointerSize);
Memory::Address_at(address() + kNextOffset) = next;
} else {
Memory::Address_at(address() + kPointerSize) = next;
}
}
......@@ -1445,42 +1455,42 @@ bool OldSpaceFreeList::Contains(FreeListNode* node) {
#endif
MapSpaceFreeList::MapSpaceFreeList(AllocationSpace owner) {
owner_ = owner;
FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
: owner_(owner), object_size_(object_size) {
Reset();
}
void MapSpaceFreeList::Reset() {
void FixedSizeFreeList::Reset() {
available_ = 0;
head_ = NULL;
}
void MapSpaceFreeList::Free(Address start) {
void FixedSizeFreeList::Free(Address start) {
#ifdef DEBUG
for (int i = 0; i < Map::kSize; i += kPointerSize) {
for (int i = 0; i < object_size_; i += kPointerSize) {
Memory::Address_at(start + i) = kZapValue;
}
#endif
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(Map::kSize);
node->set_size(object_size_);
node->set_next(head_);
head_ = node->address();
available_ += Map::kSize;
available_ += object_size_;
}
Object* MapSpaceFreeList::Allocate() {
Object* FixedSizeFreeList::Allocate() {
if (head_ == NULL) {
return Failure::RetryAfterGC(Map::kSize, owner_);
return Failure::RetryAfterGC(object_size_, owner_);
}
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
FreeListNode* node = FreeListNode::FromAddress(head_);
head_ = node->next();
available_ -= Map::kSize;
available_ -= object_size_;
return node;
}
......@@ -1494,7 +1504,6 @@ void OldSpace::PrepareForMarkCompact(bool will_compact) {
// the space is considered 'available' and we will rediscover live data
// and waste during the collection.
MCResetRelocationInfo();
mc_end_of_relocation_ = bottom();
ASSERT(Available() == Capacity());
} else {
// During a non-compacting collection, everything below the linear
......@@ -1510,24 +1519,6 @@ void OldSpace::PrepareForMarkCompact(bool will_compact) {
}
void OldSpace::MCAdjustRelocationEnd(Address address, int size_in_bytes) {
ASSERT(Contains(address));
Address current_top = mc_end_of_relocation_;
Page* current_page = Page::FromAllocationTop(current_top);
// No more objects relocated to this page? Move to the next.
ASSERT(current_top <= current_page->mc_relocation_top);
if (current_top == current_page->mc_relocation_top) {
// The space should already be properly expanded.
Page* next_page = current_page->next_page();
CHECK(next_page->is_valid());
mc_end_of_relocation_ = next_page->ObjectAreaStart();
}
ASSERT(mc_end_of_relocation_ == address);
mc_end_of_relocation_ += size_in_bytes;
}
void OldSpace::MCCommitRelocationInfo() {
// Update fast allocation info.
allocation_info_.top = mc_forwarding_info_.top;
......@@ -1987,25 +1978,13 @@ void OldSpace::PrintRSet() { DoPrintRSet("old"); }
#endif
// -----------------------------------------------------------------------------
// MapSpace implementation
// FixedSpace implementation
void MapSpace::PrepareForMarkCompact(bool will_compact) {
void FixedSpace::PrepareForMarkCompact(bool will_compact) {
if (will_compact) {
// Reset relocation info.
MCResetRelocationInfo();
// Initialize map index entry.
int page_count = 0;
PageIterator it(this, PageIterator::ALL_PAGES);
while (it.has_next()) {
ASSERT_MAP_PAGE_INDEX(page_count);
Page* p = it.next();
ASSERT(p->mc_page_index == page_count);
page_addresses_[page_count++] = p->address();
}
// During a compacting collection, everything in the space is considered
// 'available' (set by the call to MCResetRelocationInfo) and we will
// rediscover live and wasted bytes during the collection.
......@@ -2023,7 +2002,7 @@ void MapSpace::PrepareForMarkCompact(bool will_compact) {
}
void MapSpace::MCCommitRelocationInfo() {
void FixedSpace::MCCommitRelocationInfo() {
// Update fast allocation info.
allocation_info_.top = mc_forwarding_info_.top;
allocation_info_.limit = mc_forwarding_info_.limit;
......@@ -2053,7 +2032,8 @@ void MapSpace::MCCommitRelocationInfo() {
// Slow case for normal allocation. Try in order: (1) allocate in the next
// page in the space, (2) allocate off the space's free list, (3) expand the
// space, (4) fail.
HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) {
HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
// Linear allocation in this space has failed. If there is another page
// in the space, move to that page and allocate there. This allocation
// should succeed.
......@@ -2062,10 +2042,10 @@ HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) {
return AllocateInNextPage(current_page, size_in_bytes);
}
// There is no next page in this space. Try free list allocation. The
// map space free list implicitly assumes that all free blocks are map
// sized.
if (size_in_bytes == Map::kSize) {
// There is no next page in this space. Try free list allocation.
// The fixed space free list implicitly assumes that all free blocks
// are of the fixed size.
if (size_in_bytes == object_size_in_bytes_) {
Object* result = free_list_.Allocate();
if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
......@@ -2094,11 +2074,12 @@ HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) {
// Move to the next page (there is assumed to be one) and allocate there.
// The top of page block is always wasted, because it is too small to hold a
// map.
HeapObject* MapSpace::AllocateInNextPage(Page* current_page,
int size_in_bytes) {
HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
int size_in_bytes) {
ASSERT(current_page->next_page()->is_valid());
ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == kPageExtra);
accounting_stats_.WasteBytes(kPageExtra);
ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == page_extra_);
ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
accounting_stats_.WasteBytes(page_extra_);
SetAllocationInfo(&allocation_info_, current_page->next_page());
return AllocateLinearly(&allocation_info_, size_in_bytes);
}
......@@ -2107,7 +2088,7 @@ HeapObject* MapSpace::AllocateInNextPage(Page* current_page,
#ifdef DEBUG
// We do not assume that the PageIterator works, because it depends on the
// invariants we are checking during verification.
void MapSpace::Verify() {
void FixedSpace::Verify() {
// The allocation pointer should be valid, and it should be in a page in the
// space.
ASSERT(allocation_info_.VerifyPagedAllocation());
......@@ -2130,7 +2111,7 @@ void MapSpace::Verify() {
// The next page will be above the allocation top.
above_allocation_top = true;
} else {
ASSERT(top == current_page->ObjectAreaEnd() - kPageExtra);
ASSERT(top == current_page->ObjectAreaEnd() - page_extra_);
}
// It should be packed with objects from the bottom to the top.
......@@ -2144,8 +2125,8 @@ void MapSpace::Verify() {
ASSERT(map->IsMap());
ASSERT(Heap::map_space()->Contains(map));
// The object should be a map or a byte array.
ASSERT(object->IsMap() || object->IsByteArray());
// Verify the object in the space.
VerifyObject(object);
// The object itself should look OK.
object->Verify();
......@@ -2168,7 +2149,7 @@ void MapSpace::Verify() {
}
void MapSpace::ReportStatistics() {
void FixedSpace::ReportStatistics() {
int pct = Available() * 100 / Capacity();
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
Capacity(), Waste(), Available(), pct);
......@@ -2215,7 +2196,50 @@ void MapSpace::ReportStatistics() {
}
void MapSpace::PrintRSet() { DoPrintRSet("map"); }
void FixedSpace::PrintRSet() { DoPrintRSet(name_); }
#endif
// -----------------------------------------------------------------------------
// MapSpace implementation
void MapSpace::PrepareForMarkCompact(bool will_compact) {
// Call prepare of the super class.
FixedSpace::PrepareForMarkCompact(will_compact);
if (will_compact) {
// Initialize map index entry.
int page_count = 0;
PageIterator it(this, PageIterator::ALL_PAGES);
while (it.has_next()) {
ASSERT_MAP_PAGE_INDEX(page_count);
Page* p = it.next();
ASSERT(p->mc_page_index == page_count);
page_addresses_[page_count++] = p->address();
}
}
}
#ifdef DEBUG
void MapSpace::VerifyObject(HeapObject* object) {
// The object should be a map or a free-list node.
ASSERT(object->IsMap() || object->IsByteArray());
}
#endif
// -----------------------------------------------------------------------------
// GlobalPropertyCellSpace implementation
#ifdef DEBUG
void CellSpace::VerifyObject(HeapObject* object) {
// The object should be a global object property cell or a free-list node.
ASSERT(object->IsJSGlobalPropertyCell() ||
object->map() == Heap::two_pointer_filler_map());
}
#endif
......
......@@ -1315,8 +1315,7 @@ class OldSpaceFreeList BASE_EMBEDDED {
private:
// The size range of blocks, in bytes. (Smaller allocations are allowed, but
// will always result in waste.)
static const int kMinBlockSize =
POINTER_SIZE_ALIGN(ByteArray::kHeaderSize) + kPointerSize;
static const int kMinBlockSize = 2 * kPointerSize;
static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
// The identity of the owning space, for building allocation Failure
......@@ -1391,9 +1390,9 @@ class OldSpaceFreeList BASE_EMBEDDED {
// The free list for the map space.
class MapSpaceFreeList BASE_EMBEDDED {
class FixedSizeFreeList BASE_EMBEDDED {
public:
explicit MapSpaceFreeList(AllocationSpace owner);
FixedSizeFreeList(AllocationSpace owner, int object_size);
// Clear the free list.
void Reset();
......@@ -1402,12 +1401,12 @@ class MapSpaceFreeList BASE_EMBEDDED {
int available() { return available_; }
// Place a node on the free list. The block starting at 'start' (assumed to
// have size Map::kSize) is placed on the free list. Bookkeeping
// have size object_size_) is placed on the free list. Bookkeeping
// information will be written to the block, ie, its contents will be
// destroyed. The start address should be word aligned.
void Free(Address start);
// Allocate a map-sized block from the free list. The block is unitialized.
// Allocate a fixed sized block from the free list. The block is unitialized.
// A failure is returned if no block is available.
Object* Allocate();
......@@ -1422,7 +1421,10 @@ class MapSpaceFreeList BASE_EMBEDDED {
// objects.
AllocationSpace owner_;
DISALLOW_COPY_AND_ASSIGN(MapSpaceFreeList);
// The size of the objects in this space.
int object_size_;
DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
};
......@@ -1460,12 +1462,6 @@ class OldSpace : public PagedSpace {
// clears the free list.
virtual void PrepareForMarkCompact(bool will_compact);
// Adjust the top of relocation pointer to point to the end of the object
// given by 'address' and 'size_in_bytes'. Move it to the next page if
// necessary, ensure that it points to the address, then increment it by the
// size.
void MCAdjustRelocationEnd(Address address, int size_in_bytes);
// Updates the allocation pointer to the relocation top after a mark-compact
// collection.
virtual void MCCommitRelocationInfo();
......@@ -1492,39 +1488,40 @@ class OldSpace : public PagedSpace {
// The space's free list.
OldSpaceFreeList free_list_;
// During relocation, we keep a pointer to the most recently relocated
// object in order to know when to move to the next page.
Address mc_end_of_relocation_;
public:
TRACK_MEMORY("OldSpace")
};
// -----------------------------------------------------------------------------
// Old space for all map objects
// Old space for objects of a fixed size
class MapSpace : public PagedSpace {
class FixedSpace : public PagedSpace {
public:
// Creates a map space object with a maximum capacity.
explicit MapSpace(int max_capacity, AllocationSpace id)
: PagedSpace(max_capacity, id, NOT_EXECUTABLE), free_list_(id) { }
FixedSpace(int max_capacity,
AllocationSpace id,
int object_size_in_bytes,
const char* name)
: PagedSpace(max_capacity, id, NOT_EXECUTABLE),
object_size_in_bytes_(object_size_in_bytes),
name_(name),
free_list_(id, object_size_in_bytes),
page_extra_(Page::kObjectAreaSize % object_size_in_bytes) { }
// The top of allocation in a page in this space. Undefined if page is unused.
virtual Address PageAllocationTop(Page* page) {
return page == TopPageOf(allocation_info_) ? top()
: page->ObjectAreaEnd() - kPageExtra;
: page->ObjectAreaEnd() - page_extra_;
}
// Give a map-sized block of memory to the space's free list.
int object_size_in_bytes() { return object_size_in_bytes_; }
// Give a fixed sized block of memory to the space's free list.
void Free(Address start) {
free_list_.Free(start);
accounting_stats_.DeallocateBytes(Map::kSize);
}
// Given an index, returns the page address.
Address PageAddress(int page_index) { return page_addresses_[page_index]; }
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact(bool will_compact);
......@@ -1536,18 +1533,16 @@ class MapSpace : public PagedSpace {
// Verify integrity of this space.
virtual void Verify();
// Implement by subclasses to verify an actual object in the space.
virtual void VerifyObject(HeapObject* obj) = 0;
// Reports statistic info of the space
void ReportStatistics();
// Dump the remembered sets in the space to stdout.
void PrintRSet();
#endif
// Constants.
static const int kMapPageIndexBits = 10;
static const int kMaxMapPageIndex = (1 << kMapPageIndexBits) - 1;
static const int kPageExtra = Page::kObjectAreaSize % Map::kSize;
protected:
// Virtual function in the superclass. Slow path of AllocateRaw.
HeapObject* SlowAllocateRaw(int size_in_bytes);
......@@ -1557,9 +1552,44 @@ class MapSpace : public PagedSpace {
HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
private:
// The size of objects in this space.
int object_size_in_bytes_;
// The name of this space.
const char* name_;
// The space's free list.
MapSpaceFreeList free_list_;
FixedSizeFreeList free_list_;
// Bytes of each page that cannot be allocated.
int page_extra_;
};
// -----------------------------------------------------------------------------
// Old space for all map objects
class MapSpace : public FixedSpace {
public:
// Creates a map space object with a maximum capacity.
MapSpace(int max_capacity, AllocationSpace id)
: FixedSpace(max_capacity, id, Map::kSize, "map") {}
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact(bool will_compact);
// Given an index, returns the page address.
Address PageAddress(int page_index) { return page_addresses_[page_index]; }
// Constants.
static const int kMaxMapPageIndex = (1 << MapWord::kMapPageIndexBits) - 1;
protected:
#ifdef DEBUG
virtual void VerifyObject(HeapObject* obj);
#endif
private:
// An array of page start address in a map space.
Address page_addresses_[kMaxMapPageIndex + 1];
......@@ -1568,6 +1598,25 @@ class MapSpace : public PagedSpace {
};
// -----------------------------------------------------------------------------
// Old space for all global object property cell objects
class CellSpace : public FixedSpace {
public:
// Creates a property cell space object with a maximum capacity.
CellSpace(int max_capacity, AllocationSpace id)
: FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
protected:
#ifdef DEBUG
virtual void VerifyObject(HeapObject* obj);
#endif
public:
TRACK_MEMORY("MapSpace")
};
// -----------------------------------------------------------------------------
// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
// the large object space. A large object is allocated from OS heap with
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment