Commit 612d9b82 authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

Add some interfaces to the GC that allow us to reserve space. This is needed

for partial snapshots.  After reserving space we can be sure that allocations
will happen linearly (no GCs and no free-list allocation).  This change also
contains the start of the partial snapshot support, which, however is not yet
completed or tested.
Review URL: http://codereview.chromium.org/545026

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@3584 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent b63f5cf4
......@@ -1581,8 +1581,8 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
false_label_ = &push_false;
break;
}
// Convert current context to test context: End pre-test code.
// Convert current context to test context: End pre-test code.
switch (expr->op()) {
case Token::IN: {
__ InvokeBuiltin(Builtins::IN, CALL_JS);
......
......@@ -306,7 +306,7 @@ void StackHandler::Cook(Code* code) {
void StackHandler::Uncook(Code* code) {
ASSERT(MarkCompactCollector::IsCompacting());
ASSERT(MarkCompactCollector::HasCompacted());
set_pc(code->instruction_start() + OffsetFrom(pc()));
ASSERT(code->contains(pc()));
}
......@@ -336,7 +336,7 @@ void StackFrame::CookFramesForThread(ThreadLocalTop* thread) {
void StackFrame::UncookFramesForThread(ThreadLocalTop* thread) {
// Only uncooking frames when the collector is compacting and thus moving code
// around.
ASSERT(MarkCompactCollector::IsCompacting());
ASSERT(MarkCompactCollector::HasCompacted());
ASSERT(thread->stack_is_cooked());
for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
it.frame()->Uncook();
......
......@@ -269,7 +269,7 @@ class Heap : public AllStatic {
return reinterpret_cast<Address>(&always_allocate_scope_depth_);
}
static bool linear_allocation() {
return linear_allocation_scope_depth_ != 0;
return linear_allocation_scope_depth_ != 0;
}
static Address* NewSpaceAllocationTopAddress() {
......@@ -834,11 +834,15 @@ class Heap : public AllStatic {
> old_gen_promotion_limit_;
}
static intptr_t OldGenerationSpaceAvailable() {
return old_gen_allocation_limit_ -
(PromotedSpaceSize() + PromotedExternalMemorySize());
}
// True if we have reached the allocation limit in the old generation that
// should artificially cause a GC right now.
static bool OldGenerationAllocationLimitReached() {
return (PromotedSpaceSize() + PromotedExternalMemorySize())
> old_gen_allocation_limit_;
return OldGenerationSpaceAvailable() < 0;
}
// Can be called when the embedding application is idle.
......@@ -1052,9 +1056,9 @@ class Heap : public AllStatic {
// Helper function used by CopyObject to copy a source object to an
// allocated target object and update the forwarding pointer in the source
// object. Returns the target object.
static HeapObject* MigrateObject(HeapObject* source,
HeapObject* target,
int size);
static inline HeapObject* MigrateObject(HeapObject* source,
HeapObject* target,
int size);
// Helper function that governs the promotion policy from new space to
// old. If the object's old address lies below the new space's age
......
......@@ -92,7 +92,15 @@ class MarkCompactCollector: public AllStatic {
static bool HasCompacted() { return compacting_collection_; }
// True after the Prepare phase if the compaction is taking place.
static bool IsCompacting() { return compacting_collection_; }
static bool IsCompacting() {
#ifdef DEBUG
// For the purposes of asserts we don't want this to keep returning true
// after the collection is completed.
return state_ != IDLE && compacting_collection_;
#else
return compacting_collection_;
#endif
}
// The count of the number of objects left marked at the end of the last
// completed full GC (expected to be zero).
......
......@@ -918,7 +918,8 @@ void Serializer::Synchronize(const char* tag) {
Serializer::Serializer(SnapshotByteSink* sink)
: sink_(sink),
current_root_index_(0),
external_reference_encoder_(NULL) {
external_reference_encoder_(NULL),
partial_(false) {
for (int i = 0; i <= LAST_SPACE; i++) {
fullness_[i] = 0;
}
......@@ -946,6 +947,16 @@ void Serializer::Serialize() {
}
void Serializer::SerializePartial(Object** object) {
partial_ = true;
external_reference_encoder_ = new ExternalReferenceEncoder();
this->VisitPointer(object);
delete external_reference_encoder_;
external_reference_encoder_ = NULL;
SerializationAddressMapper::Zap();
}
void Serializer::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsSmi()) {
......@@ -961,11 +972,29 @@ void Serializer::VisitPointers(Object** start, Object** end) {
}
int Serializer::RootIndex(HeapObject* heap_object) {
for (int i = 0; i < Heap::kRootListLength; i++) {
Object* root = Heap::roots_address()[i];
if (root == heap_object) return i;
}
return kInvalidRootIndex;
}
void Serializer::SerializeObject(
Object* o,
ReferenceRepresentation reference_representation) {
CHECK(o->IsHeapObject());
HeapObject* heap_object = HeapObject::cast(o);
if (partial_) {
int root_index = RootIndex(heap_object);
if (root_index != kInvalidRootIndex) {
sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
sink_->PutInt(root_index, "root_index");
return;
}
// TODO(erikcorry): Handle symbols here.
}
if (SerializationAddressMapper::IsMapped(heap_object)) {
int space = SpaceOfAlreadySerializedObject(heap_object);
int address = SerializationAddressMapper::MappedTo(heap_object);
......
......@@ -199,7 +199,8 @@ class SerDes: public ObjectVisitor {
SYNCHRONIZE = 36,
START_NEW_PAGE_SERIALIZATION = 37,
NATIVES_STRING_RESOURCE = 38,
// Free: 39-47.
ROOT_SERIALIZATION = 39,
// Free: 40-47.
BACKREF_SERIALIZATION = 48,
// One per space, must be kSpaceMask aligned.
// Free: 57-63.
......@@ -293,10 +294,17 @@ class SnapshotByteSink {
class Serializer : public SerDes {
public:
explicit Serializer(SnapshotByteSink* sink);
// Serialize the current state of the heap. This operation destroys the
// heap contents.
// Serialize the current state of the heap.
void Serialize();
// Serialize a single object and the objects reachable from it.
void SerializePartial(Object** obj);
void VisitPointers(Object** start, Object** end);
// You can call this after serialization to find out how much space was used
// in each space.
int CurrentAllocationAddress(int space) {
if (SpaceIsLarge(space)) space = LO_SPACE;
return fullness_[space];
}
static void Enable() {
if (!serialization_enabled_) {
......@@ -366,13 +374,11 @@ class Serializer : public SerDes {
// once the map has been used for the serialization address.
static int SpaceOfAlreadySerializedObject(HeapObject* object);
int Allocate(int space, int size, bool* new_page_started);
int CurrentAllocationAddress(int space) {
if (SpaceIsLarge(space)) space = LO_SPACE;
return fullness_[space];
}
int EncodeExternalReference(Address addr) {
return external_reference_encoder_->Encode(addr);
}
int RootIndex(HeapObject* heap_object);
static const int kInvalidRootIndex = -1;
// Keep track of the fullness of each space in order to generate
// relative addresses for back references. Large objects are
......@@ -382,6 +388,7 @@ class Serializer : public SerDes {
SnapshotByteSink* sink_;
int current_root_index_;
ExternalReferenceEncoder* external_reference_encoder_;
bool partial_;
static bool serialization_enabled_;
// Did we already make use of the fact that serialization was not enabled?
static bool too_late_to_enable_now_;
......
......@@ -92,6 +92,7 @@ bool HeapObjectIterator::HasNextInNextPage() {
cur_addr_ = cur_page->ObjectAreaStart();
cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
if (cur_addr_ == end_addr_) return false;
ASSERT(cur_addr_ < cur_limit_);
#ifdef DEBUG
Verify();
......@@ -1822,6 +1823,53 @@ void OldSpace::MCCommitRelocationInfo() {
}
bool NewSpace::ReserveSpace(int bytes) {
// We can't reliably unpack a partial snapshot that needs more new space
// space than the minimum NewSpace size.
ASSERT(bytes <= InitialCapacity());
Address limit = allocation_info_.limit;
Address top = allocation_info_.top;
return limit - top >= bytes;
}
bool PagedSpace::ReserveSpace(int bytes) {
Address limit = allocation_info_.limit;
Address top = allocation_info_.top;
if (limit - top >= bytes) return true;
// There wasn't enough space in the current page. Lets put the rest
// of the page on the free list and start a fresh page.
PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
Page* reserved_page = TopPageOf(allocation_info_);
int bytes_left_to_reserve = bytes;
while (bytes_left_to_reserve > 0) {
if (!reserved_page->next_page()->is_valid()) {
if (Heap::OldGenerationAllocationLimitReached()) return false;
Expand(reserved_page);
}
bytes_left_to_reserve -= Page::kPageSize;
reserved_page = reserved_page->next_page();
if (!reserved_page->is_valid()) return false;
}
ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
SetAllocationInfo(&allocation_info_,
TopPageOf(allocation_info_)->next_page());
return true;
}
// You have to call this last, since the implementation from PagedSpace
// doesn't know that memory was 'promised' to large object space.
bool LargeObjectSpace::ReserveSpace(int bytes) {
// We add a slack-factor of 2 in order to have space for the remembered
// set and a series of large-object allocations that are only just larger
// than the page size.
return Heap::OldGenerationSpaceAvailable() >= bytes * 2;
}
// Slow case for normal allocation. Try in order: (1) allocate in the next
// page in the space, (2) allocate off the space's free list, (3) expand the
// space, (4) fail.
......@@ -1865,19 +1913,37 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
}
// Add the block at the top of the page to the space's free list, set the
// allocation info to the next page (assumed to be one), and allocate
// linearly there.
HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
int size_in_bytes) {
ASSERT(current_page->next_page()->is_valid());
// Add the block at the top of this page to the free list.
void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
int free_size =
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
if (free_size > 0) {
int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
accounting_stats_.WasteBytes(wasted_bytes);
}
}
void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
int free_size =
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
// In the fixed space free list all the free list items have the right size.
// We use up the rest of the page while preserving this invariant.
while (free_size >= object_size_in_bytes_) {
free_list_.Free(allocation_info_.top);
allocation_info_.top += object_size_in_bytes_;
free_size -= object_size_in_bytes_;
accounting_stats_.WasteBytes(object_size_in_bytes_);
}
}
// Add the block at the top of the page to the space's free list, set the
// allocation info to the next page (assumed to be one), and allocate
// linearly there.
HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
int size_in_bytes) {
ASSERT(current_page->next_page()->is_valid());
PutRestOfCurrentPageOnFreeList(current_page);
SetAllocationInfo(&allocation_info_, current_page->next_page());
return AllocateLinearly(&allocation_info_, size_in_bytes);
}
......
......@@ -305,6 +305,14 @@ class Space : public Malloced {
virtual void Print() = 0;
#endif
// After calling this we can allocate a certain number of bytes using only
// linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
// without using freelists or causing a GC. This is used by partial
// snapshots. It returns true of space was reserved or false if a GC is
// needed. For paged spaces the space requested must include the space wasted
// at the end of each when allocating linearly.
virtual bool ReserveSpace(int bytes) = 0;
private:
AllocationSpace id_;
Executability executable_;
......@@ -887,6 +895,10 @@ class PagedSpace : public Space {
// collection.
inline Object* MCAllocateRaw(int size_in_bytes);
virtual bool ReserveSpace(int bytes);
// Used by ReserveSpace.
virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
// ---------------------------------------------------------------------------
// Mark-compact collection support functions
......@@ -1115,13 +1127,18 @@ class SemiSpace : public Space {
return static_cast<int>(addr - low());
}
// If we don't have this here then SemiSpace will be abstract. However
// it should never be called.
// If we don't have these here then SemiSpace will be abstract. However
// they should never be called.
virtual int Size() {
UNREACHABLE();
return 0;
}
virtual bool ReserveSpace(int bytes) {
UNREACHABLE();
return false;
}
bool is_committed() { return committed_; }
bool Commit();
bool Uncommit();
......@@ -1345,6 +1362,8 @@ class NewSpace : public Space {
bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
virtual bool ReserveSpace(int bytes);
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect the space by marking it read-only/writable.
virtual void Protect();
......@@ -1631,6 +1650,8 @@ class OldSpace : public PagedSpace {
// collection.
virtual void MCCommitRelocationInfo();
virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
#ifdef DEBUG
// Reports statistics for the space
void ReportStatistics();
......@@ -1692,6 +1713,8 @@ class FixedSpace : public PagedSpace {
// collection.
virtual void MCCommitRelocationInfo();
virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
#ifdef DEBUG
// Reports statistic info of the space
void ReportStatistics();
......@@ -1899,6 +1922,11 @@ class LargeObjectSpace : public Space {
// Checks whether the space is empty.
bool IsEmpty() { return first_chunk_ == NULL; }
// See the comments for ReserveSpace in the Space class. This has to be
// called after ReserveSpace has been called on the paged spaces, since they
// may use some memory, leaving less for large objects.
virtual bool ReserveSpace(int bytes);
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect the space by marking it read-only/writable.
void Protect();
......
......@@ -37,6 +37,8 @@
#include "scopeinfo.h"
#include "snapshot.h"
#include "cctest.h"
#include "spaces.h"
#include "objects.h"
using namespace v8::internal;
......@@ -277,6 +279,141 @@ DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
}
TEST(LinearAllocation) {
v8::V8::Initialize();
NewSpace* new_space = Heap::new_space();
PagedSpace* old_pointer_space = Heap::old_pointer_space();
PagedSpace* old_data_space = Heap::old_data_space();
PagedSpace* code_space = Heap::code_space();
PagedSpace* map_space = Heap::map_space();
PagedSpace* cell_space = Heap::cell_space();
int new_space_max = 512 * KB;
for (int size = 1000; size < 5 * MB; size *= 1.5) {
bool gc_performed = true;
while (gc_performed) {
gc_performed = false;
if (size < new_space_max) {
if (!new_space->ReserveSpace(size)) {
Heap::CollectGarbage(size, NEW_SPACE);
gc_performed = true;
CHECK(new_space->ReserveSpace(size));
}
}
if (!old_pointer_space->ReserveSpace(size)) {
Heap::CollectGarbage(size, OLD_POINTER_SPACE);
gc_performed = true;
CHECK(old_pointer_space->ReserveSpace(size));
}
if (!(old_data_space->ReserveSpace(size))) {
Heap::CollectGarbage(size, OLD_DATA_SPACE);
gc_performed = true;
CHECK(old_data_space->ReserveSpace(size));
}
if (!(code_space->ReserveSpace(size))) {
Heap::CollectGarbage(size, CODE_SPACE);
gc_performed = true;
CHECK(code_space->ReserveSpace(size));
}
if (!(map_space->ReserveSpace(size))) {
Heap::CollectGarbage(size, MAP_SPACE);
gc_performed = true;
CHECK(map_space->ReserveSpace(size));
}
if (!(cell_space->ReserveSpace(size))) {
Heap::CollectGarbage(size, CELL_SPACE);
gc_performed = true;
CHECK(cell_space->ReserveSpace(size));
}
}
LinearAllocationScope scope;
const int kSmallFixedArrayLength = 4;
const int kSmallFixedArraySize =
FixedArray::kHeaderSize + kSmallFixedArrayLength * kPointerSize;
const int kSmallStringLength = 16;
const int kSmallStringSize =
SeqAsciiString::kHeaderSize + kSmallStringLength;
const int kMapSize = Map::kSize;
if (size < new_space_max) {
Object* new_last = NULL;
for (int i = 0;
i + kSmallFixedArraySize <= size; i += kSmallFixedArraySize) {
Object* o = Heap::AllocateFixedArray(kSmallFixedArrayLength);
if (new_last != NULL) {
CHECK_EQ(reinterpret_cast<char*>(o),
reinterpret_cast<char*>(new_last) + kSmallFixedArraySize);
}
new_last = o;
}
}
Object* new_pointer = NULL;
for (int i = 0;
i + kSmallFixedArraySize <= size;
i += kSmallFixedArraySize) {
Object* o = Heap::AllocateFixedArray(kSmallFixedArrayLength, TENURED);
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kSmallFixedArraySize) % Page::kPageSize;
if (page_fullness < old_page_fullness ||
page_fullness > Page::kObjectAreaSize) {
i = RoundUp(i, Page::kPageSize);
new_pointer = NULL;
}
if (new_pointer != NULL) {
CHECK_EQ(reinterpret_cast<char*>(o),
reinterpret_cast<char*>(new_pointer) + kSmallFixedArraySize);
}
new_pointer = o;
}
new_pointer = NULL;
for (int i = 0; i + kSmallStringSize <= size; i += kSmallStringSize) {
Object* o = Heap::AllocateRawAsciiString(kSmallStringLength, TENURED);
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kSmallStringSize) % Page::kPageSize;
if (page_fullness < old_page_fullness ||
page_fullness > Page::kObjectAreaSize) {
i = RoundUp(i, Page::kPageSize);
new_pointer = NULL;
}
if (new_pointer != NULL) {
CHECK_EQ(reinterpret_cast<char*>(o),
reinterpret_cast<char*>(new_pointer) + kSmallStringSize);
}
new_pointer = o;
}
new_pointer = NULL;
for (int i = 0; i + kMapSize <= size; i += kMapSize) {
Object* o = Heap::AllocateMap(JS_OBJECT_TYPE, 42 * kPointerSize);
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kMapSize) % Page::kPageSize;
if (page_fullness < old_page_fullness ||
page_fullness > Page::kObjectAreaSize) {
i = RoundUp(i, Page::kPageSize);
new_pointer = NULL;
}
if (new_pointer != NULL) {
CHECK_EQ(reinterpret_cast<char*>(o),
reinterpret_cast<char*>(new_pointer) + kMapSize);
}
new_pointer = o;
}
if (size > Page::kObjectAreaSize) {
// Support for reserving space in large object space is not there yet,
// but using an always-allocate scope is fine for now.
AlwaysAllocateScope always;
int large_object_array_length =
(size - FixedArray::kHeaderSize) / kPointerSize;
new_pointer = Heap::AllocateFixedArray(large_object_array_length,
TENURED);
ASSERT(!new_pointer->IsFailure());
}
}
}
TEST(TestThatAlwaysSucceeds) {
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment