Commit b9ddcce3 authored by Peter Marshall's avatar Peter Marshall Committed by Commit Bot

[snapshot] Remove the remaining uses of List in src/snapshot.

Bug: v8:6333
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
Change-Id: I50d4357a7e7a0e8f2d04d5d34952cf524c012d5f
Reviewed-on: https://chromium-review.googlesource.com/625740
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47538}
parent 0cd2ea7c
......@@ -708,7 +708,8 @@ StartupData SnapshotCreator::CreateBlob(
startup_serializer.SerializeStrongReferences();
// Serialize each context with a new partial serializer.
i::List<i::SnapshotData*> context_snapshots(num_additional_contexts + 1);
std::vector<i::SnapshotData*> context_snapshots;
context_snapshots.reserve(num_additional_contexts + 1);
// TODO(6593): generalize rehashing, and remove this flag.
bool can_be_rehashed = true;
......@@ -721,7 +722,7 @@ StartupData SnapshotCreator::CreateBlob(
data->default_embedder_fields_serializer_);
partial_serializer.Serialize(&default_context, false);
can_be_rehashed = can_be_rehashed && partial_serializer.can_be_rehashed();
context_snapshots.Add(new i::SnapshotData(&partial_serializer));
context_snapshots.push_back(new i::SnapshotData(&partial_serializer));
}
for (int i = 0; i < num_additional_contexts; i++) {
......@@ -729,7 +730,7 @@ StartupData SnapshotCreator::CreateBlob(
isolate, &startup_serializer, data->embedder_fields_serializers_[i]);
partial_serializer.Serialize(&contexts[i], true);
can_be_rehashed = can_be_rehashed && partial_serializer.can_be_rehashed();
context_snapshots.Add(new i::SnapshotData(&partial_serializer));
context_snapshots.push_back(new i::SnapshotData(&partial_serializer));
}
startup_serializer.SerializeWeakReferencesAndDeferred();
......@@ -743,10 +744,10 @@ StartupData SnapshotCreator::CreateBlob(
i::SnapshotData startup_snapshot(&startup_serializer);
StartupData result = i::Snapshot::CreateSnapshotBlob(
&startup_snapshot, &context_snapshots, can_be_rehashed);
&startup_snapshot, context_snapshots, can_be_rehashed);
// Delete heap-allocated context snapshot instances.
for (const auto& context_snapshot : context_snapshots) {
for (const auto context_snapshot : context_snapshots) {
delete context_snapshot;
}
data->created_ = true;
......
......@@ -2767,7 +2767,7 @@ bool Isolate::Init(StartupDeserializer* des) {
if (create_heap_objects) {
// Terminate the partial snapshot cache so we can iterate.
partial_snapshot_cache_.Add(heap_.undefined_value());
partial_snapshot_cache_.push_back(heap_.undefined_value());
}
InitializeThreadLocal();
......
......@@ -1207,7 +1207,9 @@ class Isolate {
void AddDetachedContext(Handle<Context> context);
void CheckDetachedContextsAfterGC();
List<Object*>* partial_snapshot_cache() { return &partial_snapshot_cache_; }
std::vector<Object*>* partial_snapshot_cache() {
return &partial_snapshot_cache_;
}
void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
array_buffer_allocator_ = allocator;
......@@ -1578,7 +1580,7 @@ class Isolate {
v8::Isolate::UseCounterCallback use_counter_callback_;
BasicBlockProfiler* basic_block_profiler_;
List<Object*> partial_snapshot_cache_;
std::vector<Object*> partial_snapshot_cache_;
v8::ArrayBuffer::Allocator* array_buffer_allocator_;
......
......@@ -162,7 +162,7 @@ void CodeSerializer::SerializeCodeStub(Code* code_stub, HowToCode how_to_code,
uint32_t stub_key = code_stub->stub_key();
DCHECK(CodeStub::MajorKeyFromKey(stub_key) != CodeStub::NoCache);
DCHECK(!CodeStub::GetCode(isolate(), stub_key).is_null());
stub_keys_.Add(stub_key);
stub_keys_.push_back(stub_key);
SerializerReference reference =
reference_map()->AddAttachedReference(code_stub);
......@@ -333,21 +333,21 @@ class Checksum {
DISALLOW_COPY_AND_ASSIGN(Checksum);
};
SerializedCodeData::SerializedCodeData(const List<byte>* payload,
SerializedCodeData::SerializedCodeData(const std::vector<byte>* payload,
const CodeSerializer* cs) {
DisallowHeapAllocation no_gc;
const List<uint32_t>* stub_keys = cs->stub_keys();
const std::vector<uint32_t>* stub_keys = cs->stub_keys();
List<Reservation> reservations;
std::vector<Reservation> reservations;
cs->EncodeReservations(&reservations);
// Calculate sizes.
int reservation_size = reservations.length() * kInt32Size;
int num_stub_keys = stub_keys->length();
int stub_keys_size = stub_keys->length() * kInt32Size;
int reservation_size = static_cast<int>(reservations.size()) * kInt32Size;
int num_stub_keys = static_cast<int>(stub_keys->size());
int stub_keys_size = num_stub_keys * kInt32Size;
int payload_offset = kHeaderSize + reservation_size + stub_keys_size;
int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
int size = padded_payload_offset + payload->length();
int size = padded_payload_offset + static_cast<int>(payload->size());
// Allocate backing store and create result data.
AllocateData(size);
......@@ -359,27 +359,28 @@ SerializedCodeData::SerializedCodeData(const List<byte>* payload,
SetHeaderValue(kCpuFeaturesOffset,
static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
SetHeaderValue(kFlagHashOffset, FlagList::Hash());
SetHeaderValue(kNumReservationsOffset, reservations.length());
SetHeaderValue(kNumReservationsOffset, static_cast<int>(reservations.size()));
SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
SetHeaderValue(kPayloadLengthOffset, payload->length());
SetHeaderValue(kPayloadLengthOffset, static_cast<int>(payload->size()));
// Zero out any padding in the header.
memset(data_ + kUnalignedHeaderSize, 0, kHeaderSize - kUnalignedHeaderSize);
// Copy reservation chunk sizes.
CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
CopyBytes(data_ + kHeaderSize,
reinterpret_cast<const byte*>(reservations.data()),
reservation_size);
// Copy code stub keys.
CopyBytes(data_ + kHeaderSize + reservation_size,
reinterpret_cast<byte*>(stub_keys->begin()), stub_keys_size);
reinterpret_cast<const byte*>(stub_keys->data()), stub_keys_size);
// Zero out any padding before the payload.
memset(data_ + payload_offset, 0, padded_payload_offset - payload_offset);
// Copy serialized data.
CopyBytes(data_ + padded_payload_offset, payload->begin(),
static_cast<size_t>(payload->length()));
CopyBytes(data_ + padded_payload_offset, payload->data(),
static_cast<size_t>(payload->size()));
Checksum checksum(DataWithoutHeader());
SetHeaderValue(kChecksum1Offset, checksum.a());
......
......@@ -22,7 +22,7 @@ class CodeSerializer : public Serializer {
MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
Isolate* isolate, ScriptData* cached_data, Handle<String> source);
const List<uint32_t>* stub_keys() const { return &stub_keys_; }
const std::vector<uint32_t>* stub_keys() const { return &stub_keys_; }
uint32_t source_hash() const { return source_hash_; }
......@@ -51,7 +51,7 @@ class CodeSerializer : public Serializer {
DisallowHeapAllocation no_gc_;
uint32_t source_hash_;
List<uint32_t> stub_keys_;
std::vector<uint32_t> stub_keys_;
DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
};
......@@ -122,7 +122,8 @@ class SerializedCodeData : public SerializedData {
SanityCheckResult* rejection_result);
// Used when producing.
SerializedCodeData(const List<byte>* payload, const CodeSerializer* cs);
SerializedCodeData(const std::vector<byte>* payload,
const CodeSerializer* cs);
// Return ScriptData object and relinquish ownership over it to the caller.
ScriptData* GetScriptData();
......
......@@ -29,12 +29,12 @@ namespace internal {
class NativesStore {
public:
~NativesStore() {
for (int i = 0; i < native_names_.length(); i++) {
for (size_t i = 0; i < native_names_.size(); i++) {
native_names_[i].Dispose();
}
}
int GetBuiltinsCount() { return native_ids_.length(); }
int GetBuiltinsCount() { return static_cast<int>(native_ids_.size()); }
int GetDebuggerCount() { return debugger_count_; }
Vector<const char> GetScriptSource(int index) {
......@@ -44,20 +44,18 @@ class NativesStore {
Vector<const char> GetScriptName(int index) { return native_names_[index]; }
int GetIndex(const char* id) {
for (int i = 0; i < native_ids_.length(); ++i) {
for (int i = 0; i < static_cast<int>(native_ids_.size()); ++i) {
int native_id_length = native_ids_[i].length();
if ((static_cast<int>(strlen(id)) == native_id_length) &&
(strncmp(id, native_ids_[i].start(), native_id_length) == 0)) {
return i;
}
}
DCHECK(false);
return -1;
UNREACHABLE();
}
Vector<const char> GetScriptsSource() {
DCHECK(false); // Not implemented.
return Vector<const char>();
UNREACHABLE(); // Not implemented.
}
static NativesStore* MakeFromScriptsSource(SnapshotByteSource* source) {
......@@ -98,17 +96,15 @@ class NativesStore {
const byte* source;
int id_length = bytes->GetBlob(&id);
int source_length = bytes->GetBlob(&source);
Vector<const char> id_vector(reinterpret_cast<const char*>(id), id_length);
Vector<const char> source_vector(reinterpret_cast<const char*>(source),
native_ids_.emplace_back(reinterpret_cast<const char*>(id), id_length);
native_source_.emplace_back(reinterpret_cast<const char*>(source),
source_length);
native_ids_.Add(id_vector);
native_source_.Add(source_vector);
native_names_.Add(NameFromId(id, id_length));
native_names_.push_back(NameFromId(id, id_length));
}
List<Vector<const char> > native_ids_;
List<Vector<const char> > native_names_;
List<Vector<const char> > native_source_;
std::vector<Vector<const char>> native_ids_;
std::vector<Vector<const char>> native_names_;
std::vector<Vector<const char>> native_source_;
int debugger_count_;
DISALLOW_COPY_AND_ASSIGN(NativesStore);
......
......@@ -101,7 +101,7 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
JSObject* jsobj = JSObject::cast(obj);
if (jsobj->GetEmbedderFieldCount() > 0) {
DCHECK_NOT_NULL(serialize_embedder_fields_.callback);
embedder_field_holders_.Add(jsobj);
embedder_field_holders_.push_back(jsobj);
}
}
......@@ -126,16 +126,16 @@ bool PartialSerializer::ShouldBeInThePartialSnapshotCache(HeapObject* o) {
}
void PartialSerializer::SerializeEmbedderFields() {
int count = embedder_field_holders_.length();
if (count == 0) return;
if (embedder_field_holders_.empty()) return;
DisallowHeapAllocation no_gc;
DisallowJavascriptExecution no_js(isolate());
DisallowCompilation no_compile(isolate());
DCHECK_NOT_NULL(serialize_embedder_fields_.callback);
sink_.Put(kEmbedderFieldsData, "embedder fields data");
while (embedder_field_holders_.length() > 0) {
while (!embedder_field_holders_.empty()) {
HandleScope scope(isolate());
Handle<JSObject> obj(embedder_field_holders_.RemoveLast(), isolate());
Handle<JSObject> obj(embedder_field_holders_.back(), isolate());
embedder_field_holders_.pop_back();
SerializerReference reference = reference_map_.Lookup(*obj);
DCHECK(reference.is_back_reference());
int embedder_fields_count = obj->GetEmbedderFieldCount();
......
......@@ -36,7 +36,7 @@ class PartialSerializer : public Serializer {
void CheckRehashability(HeapObject* table);
StartupSerializer* startup_serializer_;
List<JSObject*> embedder_field_holders_;
std::vector<JSObject*> embedder_field_holders_;
v8::SerializeEmbedderFieldsCallback serialize_embedder_fields_;
GlobalDictionary* rehashable_global_dictionary_;
// Indicates whether we only serialized hash tables that we can rehash.
......
......@@ -72,10 +72,10 @@ void SerializedData::AllocateData(int size) {
// - not during serialization. The partial serializer adds to it explicitly.
DISABLE_CFI_PERF
void SerializerDeserializer::Iterate(Isolate* isolate, RootVisitor* visitor) {
List<Object*>* cache = isolate->partial_snapshot_cache();
for (int i = 0;; ++i) {
std::vector<Object*>* cache = isolate->partial_snapshot_cache();
for (size_t i = 0;; ++i) {
// Extend the array ready to get a value when deserializing.
if (cache->length() <= i) cache->Add(Smi::kZero);
if (cache->size() <= i) cache->push_back(Smi::kZero);
// During deserialization, the visitor populates the partial snapshot cache
// and eventually terminates the cache with undefined.
visitor->VisitRootPointer(Root::kPartialSnapshotCache, &cache->at(i));
......
......@@ -93,8 +93,9 @@ void Serializer::OutputStatistics(const char* name) {
}
void Serializer::SerializeDeferredObjects() {
while (deferred_objects_.length() > 0) {
HeapObject* obj = deferred_objects_.RemoveLast();
while (!deferred_objects_.empty()) {
HeapObject* obj = deferred_objects_.back();
deferred_objects_.pop_back();
ObjectSerializer obj_serializer(this, obj, &sink_, kPlain, kStartOfObject);
obj_serializer.SerializeDeferred();
}
......@@ -114,21 +115,21 @@ void Serializer::VisitRootPointers(Root root, Object** start, Object** end) {
}
void Serializer::EncodeReservations(
List<SerializedData::Reservation>* out) const {
std::vector<SerializedData::Reservation>* out) const {
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
for (int j = 0; j < completed_chunks_[i].length(); j++) {
out->Add(SerializedData::Reservation(completed_chunks_[i][j]));
for (size_t j = 0; j < completed_chunks_[i].size(); j++) {
out->push_back(SerializedData::Reservation(completed_chunks_[i][j]));
}
if (pending_chunk_[i] > 0 || completed_chunks_[i].length() == 0) {
out->Add(SerializedData::Reservation(pending_chunk_[i]));
if (pending_chunk_[i] > 0 || completed_chunks_[i].size() == 0) {
out->push_back(SerializedData::Reservation(pending_chunk_[i]));
}
out->last().mark_as_last();
out->back().mark_as_last();
}
out->Add(SerializedData::Reservation(num_maps_ * Map::kSize));
out->last().mark_as_last();
out->Add(SerializedData::Reservation(large_objects_total_size_));
out->last().mark_as_last();
out->push_back(SerializedData::Reservation(num_maps_ * Map::kSize));
out->back().mark_as_last();
out->push_back(SerializedData::Reservation(large_objects_total_size_));
out->back().mark_as_last();
}
#ifdef DEBUG
......@@ -141,18 +142,18 @@ bool Serializer::BackReferenceIsAlreadyAllocated(
} else if (space == MAP_SPACE) {
return reference.map_index() < num_maps_;
} else {
int chunk_index = reference.chunk_index();
if (chunk_index == completed_chunks_[space].length()) {
size_t chunk_index = reference.chunk_index();
if (chunk_index == completed_chunks_[space].size()) {
return reference.chunk_offset() < pending_chunk_[space];
} else {
return chunk_index < completed_chunks_[space].length() &&
return chunk_index < completed_chunks_[space].size() &&
reference.chunk_offset() < completed_chunks_[space][chunk_index];
}
}
}
void Serializer::PrintStack() {
for (const auto& o : stack_) {
for (const auto o : stack_) {
o->Print();
PrintF("\n");
}
......@@ -310,14 +311,14 @@ SerializerReference Serializer::Allocate(AllocationSpace space, int size) {
// current chunk and start a new one.
sink_.Put(kNextChunk, "NextChunk");
sink_.Put(space, "NextChunkSpace");
completed_chunks_[space].Add(pending_chunk_[space]);
completed_chunks_[space].push_back(pending_chunk_[space]);
pending_chunk_[space] = 0;
new_chunk_size = size;
}
uint32_t offset = pending_chunk_[space];
pending_chunk_[space] = new_chunk_size;
return SerializerReference::BackReference(
space, completed_chunks_[space].length(), offset);
space, static_cast<uint32_t>(completed_chunks_[space].size()), offset);
}
void Serializer::Pad() {
......@@ -338,15 +339,16 @@ void Serializer::InitializeCodeAddressMap() {
}
Code* Serializer::CopyCode(Code* code) {
code_buffer_.Rewind(0); // Clear buffer without deleting backing store.
code_buffer_.clear(); // Clear buffer without deleting backing store.
int size = code->CodeSize();
code_buffer_.AddAll(Vector<byte>(code->address(), size));
return Code::cast(HeapObject::FromAddress(&code_buffer_.first()));
code_buffer_.insert(code_buffer_.end(), code->address(),
code->address() + size);
return Code::cast(HeapObject::FromAddress(&code_buffer_.front()));
}
bool Serializer::HasNotExceededFirstPageOfEachSpace() {
for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
if (!completed_chunks_[i].is_empty()) return false;
if (!completed_chunks_[i].empty()) return false;
}
return true;
}
......
......@@ -125,7 +125,7 @@ class Serializer : public SerializerDeserializer {
explicit Serializer(Isolate* isolate);
~Serializer() override;
void EncodeReservations(List<SerializedData::Reservation>* out) const;
void EncodeReservations(std::vector<SerializedData::Reservation>* out) const;
void SerializeDeferredObjects();
......@@ -220,14 +220,14 @@ class Serializer : public SerializerDeserializer {
void QueueDeferredObject(HeapObject* obj) {
DCHECK(reference_map_.Lookup(obj).is_back_reference());
deferred_objects_.Add(obj);
deferred_objects_.push_back(obj);
}
void OutputStatistics(const char* name);
#ifdef DEBUG
void PushStack(HeapObject* o) { stack_.Add(o); }
void PopStack() { stack_.RemoveLast(); }
void PushStack(HeapObject* o) { stack_.push_back(o); }
void PopStack() { stack_.pop_back(); }
void PrintStack();
bool BackReferenceIsAlreadyAllocated(SerializerReference back_reference);
......@@ -255,7 +255,7 @@ class Serializer : public SerializerDeserializer {
// page. So we track the chunk size in pending_chunk_ of a space, but
// when it exceeds a page, we complete the current chunk and start a new one.
uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
List<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
std::vector<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces];
// Number of maps that we need to allocate.
uint32_t num_maps_;
......@@ -271,10 +271,10 @@ class Serializer : public SerializerDeserializer {
// from index 0.
uint32_t seen_backing_stores_index_;
List<byte> code_buffer_;
std::vector<byte> code_buffer_;
// To handle stack overflow.
List<HeapObject*> deferred_objects_;
std::vector<HeapObject*> deferred_objects_;
#ifdef OBJECT_PRINT
static const int kInstanceTypes = 256;
......@@ -283,7 +283,7 @@ class Serializer : public SerializerDeserializer {
#endif // OBJECT_PRINT
#ifdef DEBUG
List<HeapObject*> stack_;
std::vector<HeapObject*> stack_;
#endif // DEBUG
DISALLOW_COPY_AND_ASSIGN(Serializer);
......
......@@ -80,8 +80,9 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
return result;
}
void ProfileDeserialization(const SnapshotData* startup_snapshot,
const List<SnapshotData*>* context_snapshots) {
void ProfileDeserialization(
const SnapshotData* startup_snapshot,
const std::vector<SnapshotData*>& context_snapshots) {
if (FLAG_profile_deserialization) {
int startup_total = 0;
PrintF("Deserialization will reserve:\n");
......@@ -89,24 +90,24 @@ void ProfileDeserialization(const SnapshotData* startup_snapshot,
startup_total += reservation.chunk_size();
}
PrintF("%10d bytes per isolate\n", startup_total);
for (int i = 0; i < context_snapshots->length(); i++) {
for (size_t i = 0; i < context_snapshots.size(); i++) {
int context_total = 0;
for (const auto& reservation : context_snapshots->at(i)->Reservations()) {
for (const auto& reservation : context_snapshots[i]->Reservations()) {
context_total += reservation.chunk_size();
}
PrintF("%10d bytes per context #%d\n", context_total, i);
PrintF("%10d bytes per context #%zu\n", context_total, i);
}
}
}
v8::StartupData Snapshot::CreateSnapshotBlob(
const SnapshotData* startup_snapshot,
const List<SnapshotData*>* context_snapshots, bool can_be_rehashed) {
int num_contexts = context_snapshots->length();
const std::vector<SnapshotData*>& context_snapshots, bool can_be_rehashed) {
int num_contexts = static_cast<int>(context_snapshots.size());
int startup_snapshot_offset = StartupSnapshotOffset(num_contexts);
int total_length = startup_snapshot_offset;
total_length += startup_snapshot->RawData().length();
for (const auto& context_snapshot : *context_snapshots) {
for (const auto context_snapshot : context_snapshots) {
total_length += context_snapshot->RawData().length();
}
......@@ -127,7 +128,7 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
payload_offset += payload_length;
for (int i = 0; i < num_contexts; i++) {
memcpy(data + ContextSnapshotOffsetOffset(i), &payload_offset, kInt32Size);
SnapshotData* context_snapshot = context_snapshots->at(i);
SnapshotData* context_snapshot = context_snapshots[i];
payload_length = context_snapshot->RawData().length();
memcpy(data + payload_offset, context_snapshot->RawData().start(),
payload_length);
......@@ -194,13 +195,13 @@ Vector<const byte> Snapshot::ExtractContextData(const v8::StartupData* data,
SnapshotData::SnapshotData(const Serializer* serializer) {
DisallowHeapAllocation no_gc;
List<Reservation> reservations;
std::vector<Reservation> reservations;
serializer->EncodeReservations(&reservations);
const List<byte>* payload = serializer->sink()->data();
const std::vector<byte>* payload = serializer->sink()->data();
// Calculate sizes.
int reservation_size = reservations.length() * kInt32Size;
int size = kHeaderSize + reservation_size + payload->length();
int reservation_size = static_cast<int>(reservations.size()) * kInt32Size;
int size = kHeaderSize + reservation_size + static_cast<int>(payload->size());
// Allocate backing store and create result data.
AllocateData(size);
......@@ -208,16 +209,16 @@ SnapshotData::SnapshotData(const Serializer* serializer) {
// Set header values.
SetMagicNumber(serializer->isolate());
SetHeaderValue(kVersionHashOffset, Version::Hash());
SetHeaderValue(kNumReservationsOffset, reservations.length());
SetHeaderValue(kPayloadLengthOffset, payload->length());
SetHeaderValue(kNumReservationsOffset, static_cast<int>(reservations.size()));
SetHeaderValue(kPayloadLengthOffset, static_cast<int>(payload->size()));
// Copy reservation chunk sizes.
CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.data()),
reservation_size);
// Copy serialized data.
CopyBytes(data_ + kHeaderSize + reservation_size, payload->begin(),
static_cast<size_t>(payload->length()));
CopyBytes(data_ + kHeaderSize + reservation_size, payload->data(),
static_cast<size_t>(payload->size()));
}
bool SnapshotData::IsSane() {
......
......@@ -29,7 +29,7 @@ void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
void SnapshotByteSink::PutRaw(const byte* data, int number_of_bytes,
const char* description) {
data_.AddAll(Vector<byte>(const_cast<byte*>(data), number_of_bytes));
data_.insert(data_.end(), data, data + number_of_bytes);
}
......
......@@ -86,7 +86,7 @@ class SnapshotByteSink {
~SnapshotByteSink() {}
void Put(byte b, const char* description) { data_.Add(b); }
void Put(byte b, const char* description) { data_.push_back(b); }
void PutSection(int b, const char* description) {
DCHECK_LE(b, kMaxUInt8);
......@@ -95,12 +95,12 @@ class SnapshotByteSink {
void PutInt(uintptr_t integer, const char* description);
void PutRaw(const byte* data, int number_of_bytes, const char* description);
int Position() { return data_.length(); }
int Position() { return static_cast<int>(data_.size()); }
const List<byte>* data() const { return &data_; }
const std::vector<byte>* data() const { return &data_; }
private:
List<byte> data_;
std::vector<byte> data_;
};
} // namespace internal
......
......@@ -71,7 +71,8 @@ class Snapshot : public AllStatic {
static v8::StartupData CreateSnapshotBlob(
const SnapshotData* startup_snapshot,
const List<SnapshotData*>* context_snapshots, bool can_be_rehashed);
const std::vector<SnapshotData*>& context_snapshots,
bool can_be_rehashed);
#ifdef DEBUG
static bool SnapshotIsValid(v8::StartupData* snapshot_blob);
......
......@@ -20,7 +20,7 @@ void StartupDeserializer::DeserializeInto(Isolate* isolate) {
// No active handles.
DCHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
// Partial snapshot cache is not yet populated.
DCHECK(isolate->partial_snapshot_cache()->is_empty());
DCHECK(isolate->partial_snapshot_cache()->empty());
// Builtins are not yet created.
DCHECK(!isolate->builtins()->is_initialized());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment