Commit fb2317b6 authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

New snapshot framework. Doesn't work on ARM yet (code targets

are different).  Is able to deserialize the whole heap and run
some stuff.  Not available as the primary snapshot system yet.
Review URL: http://codereview.chromium.org/335009

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@3142 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 0695df6e
...@@ -452,8 +452,8 @@ class V8EXPORT HandleScope { ...@@ -452,8 +452,8 @@ class V8EXPORT HandleScope {
void* operator new(size_t size); void* operator new(size_t size);
void operator delete(void*, size_t); void operator delete(void*, size_t);
// This Data class is accessible internally through a typedef in the // This Data class is accessible internally as HandleScopeData through a
// ImplementationUtilities class. // typedef in the ImplementationUtilities class.
class V8EXPORT Data { class V8EXPORT Data {
public: public:
int extensions; int extensions;
......
...@@ -437,6 +437,11 @@ class Assembler : public Malloced { ...@@ -437,6 +437,11 @@ class Assembler : public Malloced {
INLINE(static Address target_address_at(Address pc)); INLINE(static Address target_address_at(Address pc));
INLINE(static void set_target_address_at(Address pc, Address target)); INLINE(static void set_target_address_at(Address pc, Address target));
// Here we are patching the address in the constant pool, not the actual call
// instruction. The address in the constant pool is the same size as a
// pointer.
static const int kCallTargetSize = kPointerSize;
// Size of an instruction. // Size of an instruction.
static const int kInstrSize = sizeof(Instr); static const int kInstrSize = sizeof(Instr);
......
...@@ -316,8 +316,11 @@ Genesis* Genesis::current_ = NULL; ...@@ -316,8 +316,11 @@ Genesis* Genesis::current_ = NULL;
void Bootstrapper::Iterate(ObjectVisitor* v) { void Bootstrapper::Iterate(ObjectVisitor* v) {
natives_cache.Iterate(v); natives_cache.Iterate(v);
v->Synchronize("NativesCache");
extensions_cache.Iterate(v); extensions_cache.Iterate(v);
v->Synchronize("Extensions");
PendingFixups::Iterate(v); PendingFixups::Iterate(v);
v->Synchronize("PendingFixups");
} }
......
...@@ -248,6 +248,7 @@ class Variable; ...@@ -248,6 +248,7 @@ class Variable;
class VariableProxy; class VariableProxy;
class RelocInfo; class RelocInfo;
class Deserializer; class Deserializer;
class GenericDeserializer; // TODO(erikcorry): Get rid of this.
class MessageLocation; class MessageLocation;
class ObjectGroup; class ObjectGroup;
class TickSample; class TickSample;
...@@ -272,7 +273,9 @@ enum AllocationSpace { ...@@ -272,7 +273,9 @@ enum AllocationSpace {
LO_SPACE, // Promoted large objects. LO_SPACE, // Promoted large objects.
FIRST_SPACE = NEW_SPACE, FIRST_SPACE = NEW_SPACE,
LAST_SPACE = LO_SPACE LAST_SPACE = LO_SPACE,
FIRST_PAGED_SPACE = OLD_POINTER_SPACE,
LAST_PAGED_SPACE = CELL_SPACE
}; };
const int kSpaceTagSize = 3; const int kSpaceTagSize = 3;
const int kSpaceTagMask = (1 << kSpaceTagSize) - 1; const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
......
...@@ -114,6 +114,7 @@ int Heap::mc_count_ = 0; ...@@ -114,6 +114,7 @@ int Heap::mc_count_ = 0;
int Heap::gc_count_ = 0; int Heap::gc_count_ = 0;
int Heap::always_allocate_scope_depth_ = 0; int Heap::always_allocate_scope_depth_ = 0;
int Heap::linear_allocation_scope_depth_ = 0;
bool Heap::context_disposed_pending_ = false; bool Heap::context_disposed_pending_ = false;
#ifdef DEBUG #ifdef DEBUG
...@@ -3243,60 +3244,53 @@ void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) { ...@@ -3243,60 +3244,53 @@ void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
} }
#ifdef DEBUG
#define SYNCHRONIZE_TAG(tag) v->Synchronize(tag)
#else
#define SYNCHRONIZE_TAG(tag)
#endif
void Heap::IterateRoots(ObjectVisitor* v) { void Heap::IterateRoots(ObjectVisitor* v) {
IterateStrongRoots(v); IterateStrongRoots(v);
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex])); v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
SYNCHRONIZE_TAG("symbol_table"); v->Synchronize("symbol_table");
} }
void Heap::IterateStrongRoots(ObjectVisitor* v) { void Heap::IterateStrongRoots(ObjectVisitor* v) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]); v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
SYNCHRONIZE_TAG("strong_root_list"); v->Synchronize("strong_root_list");
v->VisitPointer(bit_cast<Object**, String**>(&hidden_symbol_)); v->VisitPointer(bit_cast<Object**, String**>(&hidden_symbol_));
SYNCHRONIZE_TAG("symbol"); v->Synchronize("symbol");
Bootstrapper::Iterate(v); Bootstrapper::Iterate(v);
SYNCHRONIZE_TAG("bootstrapper"); v->Synchronize("bootstrapper");
Top::Iterate(v); Top::Iterate(v);
SYNCHRONIZE_TAG("top"); v->Synchronize("top");
Relocatable::Iterate(v); Relocatable::Iterate(v);
SYNCHRONIZE_TAG("relocatable"); v->Synchronize("relocatable");
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
Debug::Iterate(v); Debug::Iterate(v);
#endif #endif
SYNCHRONIZE_TAG("debug"); v->Synchronize("debug");
CompilationCache::Iterate(v); CompilationCache::Iterate(v);
SYNCHRONIZE_TAG("compilationcache"); v->Synchronize("compilationcache");
// Iterate over local handles in handle scopes. // Iterate over local handles in handle scopes.
HandleScopeImplementer::Iterate(v); HandleScopeImplementer::Iterate(v);
SYNCHRONIZE_TAG("handlescope"); v->Synchronize("handlescope");
// Iterate over the builtin code objects and code stubs in the heap. Note // Iterate over the builtin code objects and code stubs in the heap. Note
// that it is not strictly necessary to iterate over code objects on // that it is not strictly necessary to iterate over code objects on
// scavenge collections. We still do it here because this same function // scavenge collections. We still do it here because this same function
// is used by the mark-sweep collector and the deserializer. // is used by the mark-sweep collector and the deserializer.
Builtins::IterateBuiltins(v); Builtins::IterateBuiltins(v);
SYNCHRONIZE_TAG("builtins"); v->Synchronize("builtins");
// Iterate over global handles. // Iterate over global handles.
GlobalHandles::IterateRoots(v); GlobalHandles::IterateRoots(v);
SYNCHRONIZE_TAG("globalhandles"); v->Synchronize("globalhandles");
// Iterate over pointers being held by inactive threads. // Iterate over pointers being held by inactive threads.
ThreadManager::Iterate(v); ThreadManager::Iterate(v);
SYNCHRONIZE_TAG("threadmanager"); v->Synchronize("threadmanager");
} }
#undef SYNCHRONIZE_TAG
// Flag is set when the heap has been configured. The heap can be repeatedly // Flag is set when the heap has been configured. The heap can be repeatedly
......
...@@ -38,6 +38,12 @@ namespace internal { ...@@ -38,6 +38,12 @@ namespace internal {
// Defines all the roots in Heap. // Defines all the roots in Heap.
#define UNCONDITIONAL_STRONG_ROOT_LIST(V) \ #define UNCONDITIONAL_STRONG_ROOT_LIST(V) \
/* Put the byte array map early. We need it to be in place by the time */ \
/* the deserializer hits the next page, since it wants to put a byte */ \
/* array in the unused space at the end of the page. */ \
V(Map, byte_array_map, ByteArrayMap) \
V(Map, one_pointer_filler_map, OnePointerFillerMap) \
V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
/* Cluster the most popular ones in a few cache lines here at the top. */ \ /* Cluster the most popular ones in a few cache lines here at the top. */ \
V(Smi, stack_limit, StackLimit) \ V(Smi, stack_limit, StackLimit) \
V(Object, undefined_value, UndefinedValue) \ V(Object, undefined_value, UndefinedValue) \
...@@ -109,7 +115,6 @@ namespace internal { ...@@ -109,7 +115,6 @@ namespace internal {
undetectable_medium_ascii_string_map, \ undetectable_medium_ascii_string_map, \
UndetectableMediumAsciiStringMap) \ UndetectableMediumAsciiStringMap) \
V(Map, undetectable_long_ascii_string_map, UndetectableLongAsciiStringMap) \ V(Map, undetectable_long_ascii_string_map, UndetectableLongAsciiStringMap) \
V(Map, byte_array_map, ByteArrayMap) \
V(Map, pixel_array_map, PixelArrayMap) \ V(Map, pixel_array_map, PixelArrayMap) \
V(Map, external_byte_array_map, ExternalByteArrayMap) \ V(Map, external_byte_array_map, ExternalByteArrayMap) \
V(Map, external_unsigned_byte_array_map, ExternalUnsignedByteArrayMap) \ V(Map, external_unsigned_byte_array_map, ExternalUnsignedByteArrayMap) \
...@@ -126,8 +131,6 @@ namespace internal { ...@@ -126,8 +131,6 @@ namespace internal {
V(Map, boilerplate_function_map, BoilerplateFunctionMap) \ V(Map, boilerplate_function_map, BoilerplateFunctionMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \ V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, proxy_map, ProxyMap) \ V(Map, proxy_map, ProxyMap) \
V(Map, one_pointer_filler_map, OnePointerFillerMap) \
V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
V(Object, nan_value, NanValue) \ V(Object, nan_value, NanValue) \
V(Object, minus_zero_value, MinusZeroValue) \ V(Object, minus_zero_value, MinusZeroValue) \
V(String, empty_string, EmptyString) \ V(String, empty_string, EmptyString) \
...@@ -305,6 +308,9 @@ class Heap : public AllStatic { ...@@ -305,6 +308,9 @@ class Heap : public AllStatic {
static Address always_allocate_scope_depth_address() { static Address always_allocate_scope_depth_address() {
return reinterpret_cast<Address>(&always_allocate_scope_depth_); return reinterpret_cast<Address>(&always_allocate_scope_depth_);
} }
static bool linear_allocation() {
return linear_allocation_scope_depth_ != 0;
}
static Address* NewSpaceAllocationTopAddress() { static Address* NewSpaceAllocationTopAddress() {
return new_space_.allocation_top_address(); return new_space_.allocation_top_address();
...@@ -750,7 +756,7 @@ class Heap : public AllStatic { ...@@ -750,7 +756,7 @@ class Heap : public AllStatic {
static bool Contains(HeapObject* value); static bool Contains(HeapObject* value);
// Checks whether an address/object in a space. // Checks whether an address/object in a space.
// Currently used by tests and heap verification only. // Currently used by tests, serialization and heap verification only.
static bool InSpace(Address addr, AllocationSpace space); static bool InSpace(Address addr, AllocationSpace space);
static bool InSpace(HeapObject* value, AllocationSpace space); static bool InSpace(HeapObject* value, AllocationSpace space);
...@@ -921,6 +927,7 @@ class Heap : public AllStatic { ...@@ -921,6 +927,7 @@ class Heap : public AllStatic {
static int survived_since_last_expansion_; static int survived_since_last_expansion_;
static int always_allocate_scope_depth_; static int always_allocate_scope_depth_;
static int linear_allocation_scope_depth_;
static bool context_disposed_pending_; static bool context_disposed_pending_;
static const int kMaxMapSpaceSize = 8*MB; static const int kMaxMapSpaceSize = 8*MB;
...@@ -1136,6 +1143,7 @@ class Heap : public AllStatic { ...@@ -1136,6 +1143,7 @@ class Heap : public AllStatic {
friend class Factory; friend class Factory;
friend class DisallowAllocationFailure; friend class DisallowAllocationFailure;
friend class AlwaysAllocateScope; friend class AlwaysAllocateScope;
friend class LinearAllocationScope;
}; };
...@@ -1157,6 +1165,19 @@ class AlwaysAllocateScope { ...@@ -1157,6 +1165,19 @@ class AlwaysAllocateScope {
}; };
class LinearAllocationScope {
public:
LinearAllocationScope() {
Heap::linear_allocation_scope_depth_++;
}
~LinearAllocationScope() {
Heap::linear_allocation_scope_depth_--;
ASSERT(Heap::linear_allocation_scope_depth_ >= 0);
}
};
#ifdef DEBUG #ifdef DEBUG
// Visitor class to verify interior pointers that do not have remembered set // Visitor class to verify interior pointers that do not have remembered set
// bits. All heap object pointers have to point into the heap to a location // bits. All heap object pointers have to point into the heap to a location
......
...@@ -439,6 +439,8 @@ class Assembler : public Malloced { ...@@ -439,6 +439,8 @@ class Assembler : public Malloced {
inline static Address target_address_at(Address pc); inline static Address target_address_at(Address pc);
inline static void set_target_address_at(Address pc, Address target); inline static void set_target_address_at(Address pc, Address target);
static const int kCallTargetSize = kPointerSize;
// Distance between the address of the code target in the call instruction // Distance between the address of the code target in the call instruction
// and the return address // and the return address
static const int kCallTargetAddressOffset = kPointerSize; static const int kCallTargetAddressOffset = kPointerSize;
......
...@@ -48,6 +48,7 @@ template <typename T, class P> ...@@ -48,6 +48,7 @@ template <typename T, class P>
class List { class List {
public: public:
List() { Initialize(0); }
INLINE(explicit List(int capacity)) { Initialize(capacity); } INLINE(explicit List(int capacity)) { Initialize(capacity); }
INLINE(~List()) { DeleteData(data_); } INLINE(~List()) { DeleteData(data_); }
......
...@@ -945,6 +945,25 @@ HeapObject* MapWord::ToForwardingAddress() { ...@@ -945,6 +945,25 @@ HeapObject* MapWord::ToForwardingAddress() {
} }
bool MapWord::IsSerializationAddress() {
return HAS_SMI_TAG(reinterpret_cast<Object*>(value_));
}
MapWord MapWord::FromSerializationAddress(int raw) {
// When the map word is being used as a serialization address we Smi-encode
// the serialization address (which is always a smallish positive integer).
return MapWord(reinterpret_cast<uintptr_t>(Smi::FromInt(raw)));
}
int MapWord::ToSerializationAddress() {
// When the map word is being used as a serialization address we treat the
// map word as a Smi and get the small integer that it encodes.
return reinterpret_cast<Smi*>(value_)->value();
}
bool MapWord::IsMarked() { bool MapWord::IsMarked() {
return (value_ & kMarkingMask) == 0; return (value_ & kMarkingMask) == 0;
} }
......
...@@ -1082,6 +1082,15 @@ class MapWord BASE_EMBEDDED { ...@@ -1082,6 +1082,15 @@ class MapWord BASE_EMBEDDED {
// View this map word as a forwarding address. // View this map word as a forwarding address.
inline HeapObject* ToForwardingAddress(); inline HeapObject* ToForwardingAddress();
// True if this map word is a serialization address. This will only be the
// case during a destructive serialization of the heap.
inline bool IsSerializationAddress();
// Create a map word from a serialization address.
static inline MapWord FromSerializationAddress(int raw);
// View this map word as a serialization address.
inline int ToSerializationAddress();
// Marking phase of full collection: the map word of live objects is // Marking phase of full collection: the map word of live objects is
// marked, and may be marked as overflowed (eg, the object is live, its // marked, and may be marked as overflowed (eg, the object is live, its
...@@ -5105,6 +5114,8 @@ class ObjectVisitor BASE_EMBEDDED { ...@@ -5105,6 +5114,8 @@ class ObjectVisitor BASE_EMBEDDED {
// Intended for serialization/deserialization checking: insert, or // Intended for serialization/deserialization checking: insert, or
// check for the presence of, a tag at this position in the stream. // check for the presence of, a tag at this position in the stream.
virtual void Synchronize(const char* tag) {} virtual void Synchronize(const char* tag) {}
#else
inline void Synchronize(const char* tag) {}
#endif #endif
}; };
......
This diff is collapsed.
...@@ -262,7 +262,18 @@ class SnapshotReader { ...@@ -262,7 +262,18 @@ class SnapshotReader {
// A Deserializer reads a snapshot and reconstructs the Object graph it defines. // A Deserializer reads a snapshot and reconstructs the Object graph it defines.
class Deserializer: public ObjectVisitor {
// TODO(erikcorry): Get rid of this superclass when we are using the new
// snapshot code exclusively.
class GenericDeserializer: public ObjectVisitor {
public:
virtual void GetLog() = 0;
virtual void Deserialize() = 0;
};
// TODO(erikcorry): Get rid of this class.
class Deserializer: public GenericDeserializer {
public: public:
// Create a deserializer. The snapshot is held in str and has size len. // Create a deserializer. The snapshot is held in str and has size len.
Deserializer(const byte* str, int len); Deserializer(const byte* str, int len);
...@@ -339,6 +350,223 @@ class Deserializer: public ObjectVisitor { ...@@ -339,6 +350,223 @@ class Deserializer: public ObjectVisitor {
DISALLOW_COPY_AND_ASSIGN(Deserializer); DISALLOW_COPY_AND_ASSIGN(Deserializer);
}; };
class SnapshotByteSource {
public:
SnapshotByteSource(const byte* array, int length)
: data_(array), length_(length), position_(0) { }
bool HasMore() { return position_ < length_; }
int Get() {
ASSERT(position_ < length_);
return data_[position_++];
}
int GetInt() {
// A little unwind to catch the really small ints.
int snapshot_byte = Get();
if ((snapshot_byte & 0x80) == 0) {
return snapshot_byte;
}
uintptr_t accumulator = (snapshot_byte & 0x7f) << 7;
while (true) {
snapshot_byte = Get();
if ((snapshot_byte & 0x80) == 0) {
return accumulator | snapshot_byte;
}
accumulator = (accumulator | (snapshot_byte & 0x7f)) << 7;
}
UNREACHABLE();
return accumulator;
}
bool AtEOF() {
return position_ == length_;
}
private:
const byte* data_;
int length_;
int position_;
};
// The SerDes class is a common superclass for Serializer2 and Deserializer2
// which is used to store common constants and methods used by both.
// TODO(erikcorry): This should inherit from ObjectVisitor.
class SerDes: public GenericDeserializer {
protected:
enum DataType {
SMI_SERIALIZATION,
RAW_DATA_SERIALIZATION,
OBJECT_SERIALIZATION,
CODE_OBJECT_SERIALIZATION,
BACKREF_SERIALIZATION,
CODE_BACKREF_SERIALIZATION,
EXTERNAL_REFERENCE_SERIALIZATION,
SYNCHRONIZE
};
// Our Smi encoding is much more efficient for small positive integers than it
// is for negative numbers so we add a bias before encoding and subtract it
// after encoding so that popular small negative Smis are efficiently encoded.
static const int kSmiBias = 16;
static const int kLargeData = LAST_SPACE;
static const int kLargeCode = kLargeData + 1;
static const int kLargeFixedArray = kLargeCode + 1;
static const int kNumberOfSpaces = kLargeFixedArray + 1;
static inline bool SpaceIsLarge(int space) { return space >= kLargeData; }
static inline bool SpaceIsPaged(int space) {
return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
}
};
// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
class Deserializer2: public SerDes {
public:
// Create a deserializer from a snapshot byte source.
explicit Deserializer2(SnapshotByteSource* source);
virtual ~Deserializer2() { }
// Deserialize the snapshot into an empty heap.
void Deserialize();
void GetLog() { } // TODO(erikcorry): Get rid of this.
#ifdef DEBUG
virtual void Synchronize(const char* tag);
#endif
private:
virtual void VisitPointers(Object** start, Object** end);
virtual void VisitExternalReferences(Address* start, Address* end) {
UNREACHABLE();
}
virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
UNREACHABLE();
}
int CurrentAllocationAddress(int space) {
// The three different kinds of large objects have different tags in the
// snapshot so the deserializer knows which kind of object to allocate,
// but they share a fullness_ entry.
if (SpaceIsLarge(space)) space = LO_SPACE;
return fullness_[space];
}
HeapObject* GetAddress(int space);
Address Allocate(int space, int size);
bool ReadObject(Object** write_back);
// Keep track of the pages in the paged spaces.
// (In large object space we are keeping track of individual objects
// rather than pages.) In new space we just need the address of the
// first object and the others will flow from that.
List<Address> pages_[SerDes::kNumberOfSpaces];
SnapshotByteSource* source_;
ExternalReferenceDecoder* external_reference_decoder_;
// Keep track of the fullness of each space in order to generate
// relative addresses for back references. Large objects are
// just numbered sequentially since relative addresses make no
// sense in large object space.
int fullness_[LAST_SPACE + 1];
DISALLOW_COPY_AND_ASSIGN(Deserializer2);
};
class SnapshotByteSink {
public:
virtual ~SnapshotByteSink() { }
virtual void Put(int byte, const char* description) = 0;
void PutInt(uintptr_t integer, const char* description);
};
class Serializer2 : public SerDes {
public:
explicit Serializer2(SnapshotByteSink* sink);
// Serialize the current state of the heap. This operation destroys the
// heap contents.
void Serialize();
void VisitPointers(Object** start, Object** end);
void GetLog() { } // TODO(erikcorry): Get rid of this.
void Deserialize() { } // TODO(erikcorry): Get rid of this.
#ifdef DEBUG
virtual void Synchronize(const char* tag);
#endif
private:
enum ReferenceRepresentation {
TAGGED_REPRESENTATION, // A tagged object reference.
CODE_TARGET_REPRESENTATION // A reference to first instruction in target.
};
class ObjectSerializer : public ObjectVisitor {
public:
ObjectSerializer(Serializer2* serializer,
Object* o,
SnapshotByteSink* sink,
ReferenceRepresentation representation)
: serializer_(serializer),
object_(HeapObject::cast(o)),
sink_(sink),
reference_representation_(representation),
bytes_processed_so_far_(0) { }
void Serialize();
void VisitPointers(Object** start, Object** end);
void VisitExternalReferences(Address* start, Address* end);
void VisitCodeTarget(RelocInfo* target);
private:
void OutputRawData(Address up_to);
Serializer2* serializer_;
HeapObject* object_;
SnapshotByteSink* sink_;
ReferenceRepresentation reference_representation_;
int bytes_processed_so_far_;
};
void SerializeObject(Object* o, ReferenceRepresentation representation);
void InitializeAllocators();
// This will return the space for an object. If the object is in large
// object space it may return kLargeCode or kLargeFixedArray in order
// to indicate to the deserializer what kind of large object allocation
// to make.
static int SpaceOfObject(HeapObject* object);
// This just returns the space of the object. It will return LO_SPACE
// for all large objects since you can't check the type of the object
// once the map has been used for the serialization address.
static int SpaceOfAlreadySerializedObject(HeapObject* object);
int Allocate(int space, int size);
int CurrentAllocationAddress(int space) {
if (SpaceIsLarge(space)) space = LO_SPACE;
return fullness_[space];
}
int EncodeExternalReference(Address addr) {
return external_reference_encoder_->Encode(addr);
}
// Keep track of the fullness of each space in order to generate
// relative addresses for back references. Large objects are
// just numbered sequentially since relative addresses make no
// sense in large object space.
int fullness_[LAST_SPACE + 1];
SnapshotByteSink* sink_;
int current_root_index_;
ExternalReferenceEncoder* external_reference_encoder_;
friend class ObjectSerializer;
friend class Deserializer2;
DISALLOW_COPY_AND_ASSIGN(Serializer2);
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_SERIALIZE_H_ #endif // V8_SERIALIZE_H_
...@@ -43,6 +43,13 @@ bool Snapshot::Deserialize(const byte* content, int len) { ...@@ -43,6 +43,13 @@ bool Snapshot::Deserialize(const byte* content, int len) {
} }
bool Snapshot::Deserialize2(const byte* content, int len) {
SnapshotByteSource source(content, len);
Deserializer2 deserializer(&source);
return V8::Initialize(&deserializer);
}
bool Snapshot::Initialize(const char* snapshot_file) { bool Snapshot::Initialize(const char* snapshot_file) {
if (snapshot_file) { if (snapshot_file) {
int len; int len;
...@@ -58,6 +65,20 @@ bool Snapshot::Initialize(const char* snapshot_file) { ...@@ -58,6 +65,20 @@ bool Snapshot::Initialize(const char* snapshot_file) {
} }
bool Snapshot::Initialize2(const char* snapshot_file) {
if (snapshot_file) {
int len;
byte* str = ReadBytes(snapshot_file, &len);
if (!str) return false;
Deserialize2(str, len);
DeleteArray(str);
} else if (size_ > 0) {
Deserialize2(data_, size_);
}
return true;
}
bool Snapshot::WriteToFile(const char* snapshot_file) { bool Snapshot::WriteToFile(const char* snapshot_file) {
Serializer ser; Serializer ser;
ser.Serialize(); ser.Serialize();
...@@ -72,4 +93,38 @@ bool Snapshot::WriteToFile(const char* snapshot_file) { ...@@ -72,4 +93,38 @@ bool Snapshot::WriteToFile(const char* snapshot_file) {
} }
class FileByteSink : public SnapshotByteSink {
public:
explicit FileByteSink(const char* snapshot_file) {
fp_ = fopen(snapshot_file, "wb");
if (fp_ == NULL) {
PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
exit(1);
}
}
virtual ~FileByteSink() {
if (fp_ != NULL) {
fclose(fp_);
}
}
virtual void Put(int byte, const char* description) {
if (fp_ != NULL) {
fputc(byte, fp_);
}
}
private:
FILE* fp_;
};
bool Snapshot::WriteToFile2(const char* snapshot_file) {
FileByteSink file(snapshot_file);
Serializer2 ser(&file);
ser.Serialize();
return true;
}
} } // namespace v8::internal } } // namespace v8::internal
...@@ -37,6 +37,7 @@ class Snapshot { ...@@ -37,6 +37,7 @@ class Snapshot {
// NULL, use the internal snapshot instead. Returns false if no snapshot // NULL, use the internal snapshot instead. Returns false if no snapshot
// could be found. // could be found.
static bool Initialize(const char* snapshot_file = NULL); static bool Initialize(const char* snapshot_file = NULL);
static bool Initialize2(const char* snapshot_file = NULL);
// Returns whether or not the snapshot is enabled. // Returns whether or not the snapshot is enabled.
static bool IsEnabled() { return size_ != 0; } static bool IsEnabled() { return size_ != 0; }
...@@ -44,12 +45,14 @@ class Snapshot { ...@@ -44,12 +45,14 @@ class Snapshot {
// Write snapshot to the given file. Returns true if snapshot was written // Write snapshot to the given file. Returns true if snapshot was written
// successfully. // successfully.
static bool WriteToFile(const char* snapshot_file); static bool WriteToFile(const char* snapshot_file);
static bool WriteToFile2(const char* snapshot_file);
private: private:
static const byte data_[]; static const byte data_[];
static int size_; static int size_;
static bool Deserialize(const byte* content, int len); static bool Deserialize(const byte* content, int len);
static bool Deserialize2(const byte* content, int len);
DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot); DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
}; };
......
...@@ -1527,7 +1527,9 @@ void FreeListNode::set_size(int size_in_bytes) { ...@@ -1527,7 +1527,9 @@ void FreeListNode::set_size(int size_in_bytes) {
// correct size. // correct size.
if (size_in_bytes > ByteArray::kAlignedSize) { if (size_in_bytes > ByteArray::kAlignedSize) {
set_map(Heap::raw_unchecked_byte_array_map()); set_map(Heap::raw_unchecked_byte_array_map());
ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes)); // Can't use ByteArray::cast because it fails during deserialization.
ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
} else if (size_in_bytes == kPointerSize) { } else if (size_in_bytes == kPointerSize) {
set_map(Heap::raw_unchecked_one_pointer_filler_map()); set_map(Heap::raw_unchecked_one_pointer_filler_map());
} else if (size_in_bytes == 2 * kPointerSize) { } else if (size_in_bytes == 2 * kPointerSize) {
...@@ -1535,7 +1537,8 @@ void FreeListNode::set_size(int size_in_bytes) { ...@@ -1535,7 +1537,8 @@ void FreeListNode::set_size(int size_in_bytes) {
} else { } else {
UNREACHABLE(); UNREACHABLE();
} }
ASSERT(Size() == size_in_bytes); // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
// deserialization because the byte array map is not done yet.
} }
...@@ -1828,7 +1831,9 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) { ...@@ -1828,7 +1831,9 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
return AllocateInNextPage(current_page, size_in_bytes); return AllocateInNextPage(current_page, size_in_bytes);
} }
// There is no next page in this space. Try free list allocation. // There is no next page in this space. Try free list allocation unless that
// is currently forbidden.
if (!Heap::linear_allocation()) {
int wasted_bytes; int wasted_bytes;
Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes); Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
accounting_stats_.WasteBytes(wasted_bytes); accounting_stats_.WasteBytes(wasted_bytes);
...@@ -1836,6 +1841,7 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) { ...@@ -1836,6 +1841,7 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
accounting_stats_.AllocateBytes(size_in_bytes); accounting_stats_.AllocateBytes(size_in_bytes);
return HeapObject::cast(result); return HeapObject::cast(result);
} }
}
// Free list allocation failed and there is no next page. Fail if we have // Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage // hit the old generation size limit that should cause a garbage
...@@ -2230,10 +2236,10 @@ HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) { ...@@ -2230,10 +2236,10 @@ HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
return AllocateInNextPage(current_page, size_in_bytes); return AllocateInNextPage(current_page, size_in_bytes);
} }
// There is no next page in this space. Try free list allocation. // There is no next page in this space. Try free list allocation unless
// The fixed space free list implicitly assumes that all free blocks // that is currently forbidden. The fixed space free list implicitly assumes
// are of the fixed size. // that all free blocks are of the fixed size.
if (size_in_bytes == object_size_in_bytes_) { if (!Heap::linear_allocation()) {
Object* result = free_list_.Allocate(); Object* result = free_list_.Allocate();
if (!result->IsFailure()) { if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes); accounting_stats_.AllocateBytes(size_in_bytes);
......
...@@ -45,7 +45,7 @@ bool V8::has_been_setup_ = false; ...@@ -45,7 +45,7 @@ bool V8::has_been_setup_ = false;
bool V8::has_been_disposed_ = false; bool V8::has_been_disposed_ = false;
bool V8::has_fatal_error_ = false; bool V8::has_fatal_error_ = false;
bool V8::Initialize(Deserializer *des) { bool V8::Initialize(GenericDeserializer *des) {
bool create_heap_objects = des == NULL; bool create_heap_objects = des == NULL;
if (has_been_disposed_ || has_fatal_error_) return false; if (has_been_disposed_ || has_fatal_error_) return false;
if (IsRunning()) return true; if (IsRunning()) return true;
......
...@@ -80,7 +80,7 @@ class V8 : public AllStatic { ...@@ -80,7 +80,7 @@ class V8 : public AllStatic {
// created from scratch. If a non-null Deserializer is given, the // created from scratch. If a non-null Deserializer is given, the
// initial state is created by reading the deserialized data into an // initial state is created by reading the deserialized data into an
// empty heap. // empty heap.
static bool Initialize(Deserializer* des); static bool Initialize(GenericDeserializer* des);
static void TearDown(); static void TearDown();
static bool IsRunning() { return is_running_; } static bool IsRunning() { return is_running_; }
// To be dead you have to have lived // To be dead you have to have lived
......
...@@ -459,6 +459,8 @@ class Assembler : public Malloced { ...@@ -459,6 +459,8 @@ class Assembler : public Malloced {
static inline Address target_address_at(Address pc); static inline Address target_address_at(Address pc);
static inline void set_target_address_at(Address pc, Address target); static inline void set_target_address_at(Address pc, Address target);
inline Handle<Object> code_target_object_handle_at(Address pc); inline Handle<Object> code_target_object_handle_at(Address pc);
// Number of bytes taken up by the branch target in the code.
static const int kCallTargetSize = 4; // Use 32-bit displacement.
// Distance between the address of the code target in the call instruction // Distance between the address of the code target in the call instruction
// and the return address pushed on the stack. // and the return address pushed on the stack.
static const int kCallTargetAddressOffset = 4; // Use 32-bit displacement. static const int kCallTargetAddressOffset = 4; // Use 32-bit displacement.
......
...@@ -47,6 +47,10 @@ test-serialize/DependentTestThatAlwaysFails: FAIL ...@@ -47,6 +47,10 @@ test-serialize/DependentTestThatAlwaysFails: FAIL
[ $arch == arm ] [ $arch == arm ]
# New serialization doesn't work on ARM yet.
test-serialize/Deserialize2: SKIP
test-serialize/DeserializeAndRunScript2: SKIP
# BUG(113): Test seems flaky on ARM. # BUG(113): Test seems flaky on ARM.
test-spaces/LargeObjectSpace: PASS || FAIL test-spaces/LargeObjectSpace: PASS || FAIL
......
...@@ -185,6 +185,18 @@ static void Serialize() { ...@@ -185,6 +185,18 @@ static void Serialize() {
} }
static void Serialize2() {
Serializer::Enable();
// We have to create one context. One reason for this is so that the builtins
// can be loaded from v8natives.js and their addresses can be processed. This
// will clear the pending fixups array, which would otherwise contain GC roots
// that would confuse the serialization/deserialization process.
v8::Persistent<v8::Context> env = v8::Context::New();
env.Dispose();
Snapshot::WriteToFile2(FLAG_testing_serialization_file);
}
// Test that the whole heap can be serialized when running from the // Test that the whole heap can be serialized when running from the
// internal snapshot. // internal snapshot.
// (Smoke test.) // (Smoke test.)
...@@ -203,6 +215,13 @@ TEST(Serialize) { ...@@ -203,6 +215,13 @@ TEST(Serialize) {
} }
// Test that the whole heap can be serialized.
TEST(Serialize2) {
v8::V8::Initialize();
Serialize2();
}
// Test that the heap isn't destroyed after a serialization. // Test that the heap isn't destroyed after a serialization.
TEST(SerializeNondestructive) { TEST(SerializeNondestructive) {
if (Snapshot::IsEnabled()) return; if (Snapshot::IsEnabled()) return;
...@@ -230,6 +249,11 @@ static void Deserialize() { ...@@ -230,6 +249,11 @@ static void Deserialize() {
} }
static void Deserialize2() {
CHECK(Snapshot::Initialize2(FLAG_testing_serialization_file));
}
static void SanityCheck() { static void SanityCheck() {
v8::HandleScope scope; v8::HandleScope scope;
#ifdef DEBUG #ifdef DEBUG
...@@ -251,6 +275,21 @@ DEPENDENT_TEST(Deserialize, Serialize) { ...@@ -251,6 +275,21 @@ DEPENDENT_TEST(Deserialize, Serialize) {
SanityCheck(); SanityCheck();
} }
DEPENDENT_TEST(Deserialize2, Serialize2) {
v8::HandleScope scope;
Deserialize2();
fflush(stdout);
v8::Persistent<v8::Context> env = v8::Context::New();
env->Enter();
SanityCheck();
}
DEPENDENT_TEST(DeserializeAndRunScript, Serialize) { DEPENDENT_TEST(DeserializeAndRunScript, Serialize) {
v8::HandleScope scope; v8::HandleScope scope;
...@@ -263,6 +302,21 @@ DEPENDENT_TEST(DeserializeAndRunScript, Serialize) { ...@@ -263,6 +302,21 @@ DEPENDENT_TEST(DeserializeAndRunScript, Serialize) {
} }
DEPENDENT_TEST(DeserializeAndRunScript2, Serialize2) {
v8::HandleScope scope;
Deserialize2();
v8::Persistent<v8::Context> env = v8::Context::New();
env->Enter();
const char* c_source = "\"1234\".length";
v8::Local<v8::String> source = v8::String::New(c_source);
v8::Local<v8::Script> script = v8::Script::Compile(source);
CHECK_EQ(4, script->Run()->Int32Value());
}
DEPENDENT_TEST(DeserializeNatives, Serialize) { DEPENDENT_TEST(DeserializeNatives, Serialize) {
v8::HandleScope scope; v8::HandleScope scope;
...@@ -288,9 +342,6 @@ DEPENDENT_TEST(DeserializeExtensions, Serialize) { ...@@ -288,9 +342,6 @@ DEPENDENT_TEST(DeserializeExtensions, Serialize) {
} }
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
TEST(TestThatAlwaysSucceeds) { TEST(TestThatAlwaysSucceeds) {
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment