Commit bc334df1 authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

* Improve the interface to the memory-reservation functionality.

* Add a test case that generates a serialization of a single flat string.
Review URL: http://codereview.chromium.org/542073

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@3606 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 41475c17
......@@ -479,6 +479,65 @@ static void VerifySymbolTable() {
}
void Heap::ReserveSpace(
int new_space_size,
int pointer_space_size,
int data_space_size,
int code_space_size,
int map_space_size,
int cell_space_size,
int large_object_size) {
NewSpace* new_space = Heap::new_space();
PagedSpace* old_pointer_space = Heap::old_pointer_space();
PagedSpace* old_data_space = Heap::old_data_space();
PagedSpace* code_space = Heap::code_space();
PagedSpace* map_space = Heap::map_space();
PagedSpace* cell_space = Heap::cell_space();
LargeObjectSpace* lo_space = Heap::lo_space();
bool gc_performed = true;
while (gc_performed) {
gc_performed = false;
if (!new_space->ReserveSpace(new_space_size)) {
Heap::CollectGarbage(new_space_size, NEW_SPACE);
gc_performed = true;
}
if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Heap::CollectGarbage(pointer_space_size, OLD_POINTER_SPACE);
gc_performed = true;
}
if (!(old_data_space->ReserveSpace(data_space_size))) {
Heap::CollectGarbage(data_space_size, OLD_DATA_SPACE);
gc_performed = true;
}
if (!(code_space->ReserveSpace(code_space_size))) {
Heap::CollectGarbage(code_space_size, CODE_SPACE);
gc_performed = true;
}
if (!(map_space->ReserveSpace(map_space_size))) {
Heap::CollectGarbage(map_space_size, MAP_SPACE);
gc_performed = true;
}
if (!(cell_space->ReserveSpace(cell_space_size))) {
Heap::CollectGarbage(cell_space_size, CELL_SPACE);
gc_performed = true;
}
// We add a slack-factor of 2 in order to have space for the remembered
// set and a series of large-object allocations that are only just larger
// than the page size.
large_object_size *= 2;
// The ReserveSpace method on the large object space checks how much
// we can expand the old generation. This includes expansion caused by
// allocation in the other spaces.
large_object_size += cell_space_size + map_space_size + code_space_size +
data_space_size + pointer_space_size;
if (!(lo_space->ReserveSpace(large_object_size))) {
Heap::CollectGarbage(large_object_size, LO_SPACE);
gc_performed = true;
}
}
}
void Heap::EnsureFromSpaceIsCommitted() {
if (new_space_.CommitFromSpaceIfNeeded()) return;
......
......@@ -810,6 +810,21 @@ class Heap : public AllStatic {
// Commits from space if it is uncommitted.
static void EnsureFromSpaceIsCommitted();
// Support for partial snapshots. After calling this we can allocate a
// certain number of bytes using only linear allocation (with a
// LinearAllocationScope and an AlwaysAllocateScope) without using freelists
// or causing a GC. It returns true of space was reserved or false if a GC is
// needed. For paged spaces the space requested must include the space wasted
// at the end of each page when allocating linearly.
static void ReserveSpace(
int new_space_size,
int pointer_space_size,
int data_space_size,
int code_space_size,
int map_space_size,
int cell_space_size,
int large_object_size);
//
// Support for the API.
//
......
......@@ -993,7 +993,8 @@ void Serializer::SerializeObject(
sink_->PutInt(root_index, "root_index");
return;
}
// TODO(erikcorry): Handle symbols here.
// All the symbols that the snapshot needs should be in the root table.
ASSERT(!heap_object->IsSymbol());
}
if (SerializationAddressMapper::IsMapped(heap_object)) {
int space = SpaceOfAlreadySerializedObject(heap_object);
......
......@@ -1863,10 +1863,7 @@ bool PagedSpace::ReserveSpace(int bytes) {
// You have to call this last, since the implementation from PagedSpace
// doesn't know that memory was 'promised' to large object space.
bool LargeObjectSpace::ReserveSpace(int bytes) {
// We add a slack-factor of 2 in order to have space for the remembered
// set and a series of large-object allocations that are only just larger
// than the page size.
return Heap::OldGenerationSpaceAvailable() >= bytes * 2;
return Heap::OldGenerationSpaceAvailable() >= bytes;
}
......
......@@ -279,53 +279,62 @@ DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
}
class FileByteSink : public SnapshotByteSink {
public:
explicit FileByteSink(const char* snapshot_file) {
fp_ = OS::FOpen(snapshot_file, "wb");
if (fp_ == NULL) {
PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
exit(1);
}
}
virtual ~FileByteSink() {
if (fp_ != NULL) {
fclose(fp_);
}
}
virtual void Put(int byte, const char* description) {
if (fp_ != NULL) {
fputc(byte, fp_);
}
}
private:
FILE* fp_;
};
TEST(PartialSerialization) {
Serializer::Enable();
v8::V8::Initialize();
v8::Persistent<v8::Context> env = v8::Context::New();
env->Enter();
v8::HandleScope handle_scope;
v8::Local<v8::String> foo = v8::String::New("foo");
FileByteSink file(FLAG_testing_serialization_file);
Serializer ser(&file);
i::Handle<i::String> internal_foo = v8::Utils::OpenHandle(*foo);
Object* raw_foo = *internal_foo;
ser.SerializePartial(&raw_foo);
}
TEST(LinearAllocation) {
v8::V8::Initialize();
NewSpace* new_space = Heap::new_space();
PagedSpace* old_pointer_space = Heap::old_pointer_space();
PagedSpace* old_data_space = Heap::old_data_space();
PagedSpace* code_space = Heap::code_space();
PagedSpace* map_space = Heap::map_space();
PagedSpace* cell_space = Heap::cell_space();
int new_space_max = 512 * KB;
for (int size = 1000; size < 5 * MB; size += size >> 1) {
bool gc_performed = true;
while (gc_performed) {
gc_performed = false;
if (size < new_space_max) {
if (!new_space->ReserveSpace(size)) {
Heap::CollectGarbage(size, NEW_SPACE);
gc_performed = true;
CHECK(new_space->ReserveSpace(size));
}
}
if (!old_pointer_space->ReserveSpace(size)) {
Heap::CollectGarbage(size, OLD_POINTER_SPACE);
gc_performed = true;
CHECK(old_pointer_space->ReserveSpace(size));
}
if (!(old_data_space->ReserveSpace(size))) {
Heap::CollectGarbage(size, OLD_DATA_SPACE);
gc_performed = true;
CHECK(old_data_space->ReserveSpace(size));
}
if (!(code_space->ReserveSpace(size))) {
Heap::CollectGarbage(size, CODE_SPACE);
gc_performed = true;
CHECK(code_space->ReserveSpace(size));
}
if (!(map_space->ReserveSpace(size))) {
Heap::CollectGarbage(size, MAP_SPACE);
gc_performed = true;
CHECK(map_space->ReserveSpace(size));
}
if (!(cell_space->ReserveSpace(size))) {
Heap::CollectGarbage(size, CELL_SPACE);
gc_performed = true;
CHECK(cell_space->ReserveSpace(size));
}
}
LinearAllocationScope scope;
int new_space_size = (size < new_space_max) ? size : new_space_max;
Heap::ReserveSpace(
new_space_size,
size, // Old pointer space.
size, // Old data space.
size, // Code space.
size, // Map space.
size, // Cell space.
size); // Large object space.
LinearAllocationScope linear_allocation_scope;
const int kSmallFixedArrayLength = 4;
const int kSmallFixedArraySize =
FixedArray::kHeaderSize + kSmallFixedArrayLength * kPointerSize;
......@@ -334,70 +343,69 @@ TEST(LinearAllocation) {
SeqAsciiString::kHeaderSize + kSmallStringLength;
const int kMapSize = Map::kSize;
if (size < new_space_max) {
Object* new_last = NULL;
for (int i = 0;
i + kSmallFixedArraySize <= size; i += kSmallFixedArraySize) {
Object* o = Heap::AllocateFixedArray(kSmallFixedArrayLength);
if (new_last != NULL) {
CHECK_EQ(reinterpret_cast<char*>(o),
reinterpret_cast<char*>(new_last) + kSmallFixedArraySize);
}
new_last = o;
Object* new_last = NULL;
for (int i = 0;
i + kSmallFixedArraySize <= new_space_size;
i += kSmallFixedArraySize) {
Object* obj = Heap::AllocateFixedArray(kSmallFixedArrayLength);
if (new_last != NULL) {
CHECK_EQ(reinterpret_cast<char*>(obj),
reinterpret_cast<char*>(new_last) + kSmallFixedArraySize);
}
new_last = obj;
}
Object* new_pointer = NULL;
Object* pointer_last = NULL;
for (int i = 0;
i + kSmallFixedArraySize <= size;
i += kSmallFixedArraySize) {
Object* o = Heap::AllocateFixedArray(kSmallFixedArrayLength, TENURED);
Object* obj = Heap::AllocateFixedArray(kSmallFixedArrayLength, TENURED);
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kSmallFixedArraySize) % Page::kPageSize;
if (page_fullness < old_page_fullness ||
page_fullness > Page::kObjectAreaSize) {
i = RoundUp(i, Page::kPageSize);
new_pointer = NULL;
pointer_last = NULL;
}
if (new_pointer != NULL) {
CHECK_EQ(reinterpret_cast<char*>(o),
reinterpret_cast<char*>(new_pointer) + kSmallFixedArraySize);
if (pointer_last != NULL) {
CHECK_EQ(reinterpret_cast<char*>(obj),
reinterpret_cast<char*>(pointer_last) + kSmallFixedArraySize);
}
new_pointer = o;
pointer_last = obj;
}
new_pointer = NULL;
Object* data_last = NULL;
for (int i = 0; i + kSmallStringSize <= size; i += kSmallStringSize) {
Object* o = Heap::AllocateRawAsciiString(kSmallStringLength, TENURED);
Object* obj = Heap::AllocateRawAsciiString(kSmallStringLength, TENURED);
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kSmallStringSize) % Page::kPageSize;
if (page_fullness < old_page_fullness ||
page_fullness > Page::kObjectAreaSize) {
i = RoundUp(i, Page::kPageSize);
new_pointer = NULL;
data_last = NULL;
}
if (new_pointer != NULL) {
CHECK_EQ(reinterpret_cast<char*>(o),
reinterpret_cast<char*>(new_pointer) + kSmallStringSize);
if (data_last != NULL) {
CHECK_EQ(reinterpret_cast<char*>(obj),
reinterpret_cast<char*>(data_last) + kSmallStringSize);
}
new_pointer = o;
data_last = obj;
}
new_pointer = NULL;
Object* map_last = NULL;
for (int i = 0; i + kMapSize <= size; i += kMapSize) {
Object* o = Heap::AllocateMap(JS_OBJECT_TYPE, 42 * kPointerSize);
Object* obj = Heap::AllocateMap(JS_OBJECT_TYPE, 42 * kPointerSize);
int old_page_fullness = i % Page::kPageSize;
int page_fullness = (i + kMapSize) % Page::kPageSize;
if (page_fullness < old_page_fullness ||
page_fullness > Page::kObjectAreaSize) {
i = RoundUp(i, Page::kPageSize);
new_pointer = NULL;
map_last = NULL;
}
if (new_pointer != NULL) {
CHECK_EQ(reinterpret_cast<char*>(o),
reinterpret_cast<char*>(new_pointer) + kMapSize);
if (map_last != NULL) {
CHECK_EQ(reinterpret_cast<char*>(obj),
reinterpret_cast<char*>(map_last) + kMapSize);
}
new_pointer = o;
map_last = obj;
}
if (size > Page::kObjectAreaSize) {
......@@ -406,9 +414,9 @@ TEST(LinearAllocation) {
AlwaysAllocateScope always;
int large_object_array_length =
(size - FixedArray::kHeaderSize) / kPointerSize;
new_pointer = Heap::AllocateFixedArray(large_object_array_length,
Object* obj = Heap::AllocateFixedArray(large_object_array_length,
TENURED);
ASSERT(!new_pointer->IsFailure());
CHECK(!obj->IsFailure());
}
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment