Commit 34645da5 authored by yangguo's avatar yangguo Committed by Commit bot

[interpreter] do not serialize bytecode for snapshot.

Code compiled during snapshot are overwhelmingly for functions
that are only used for bootstrapping. It makes no sense to
include them in the startup snapshot, which bloats up the snapshot size
and slows down deserialization.

Snapshot sizes for comparison, for ia32:
w/o --ignition:   484k
w/ --ignition:    537k
bytecode removed: 489k

R=rmcilroy@chromium.org,mstarzinger@chromium.org

Review URL: https://codereview.chromium.org/1667693002

Cr-Commit-Position: refs/heads/master@{#33734}
parent 76bfc16b
...@@ -2417,14 +2417,6 @@ bool Heap::CreateInitialMaps() { ...@@ -2417,14 +2417,6 @@ bool Heap::CreateInitialMaps() {
ByteArray* byte_array; ByteArray* byte_array;
if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false; if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
set_empty_byte_array(byte_array); set_empty_byte_array(byte_array);
BytecodeArray* bytecode_array = nullptr;
AllocationResult allocation =
AllocateBytecodeArray(0, nullptr, 0, 0, empty_fixed_array());
if (!allocation.To(&bytecode_array)) {
return false;
}
set_empty_bytecode_array(bytecode_array);
} }
#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \ #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
......
...@@ -190,9 +190,7 @@ namespace internal { ...@@ -190,9 +190,7 @@ namespace internal {
V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \ V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \
V(FixedArray, interpreter_table, InterpreterTable) \ V(FixedArray, interpreter_table, InterpreterTable) \
V(Map, bytecode_array_map, BytecodeArrayMap) \ V(Map, bytecode_array_map, BytecodeArrayMap) \
V(WeakCell, empty_weak_cell, EmptyWeakCell) \ V(WeakCell, empty_weak_cell, EmptyWeakCell)
V(BytecodeArray, empty_bytecode_array, EmptyBytecodeArray)
// Entries in this list are limited to Smis and are not visited during GC. // Entries in this list are limited to Smis and are not visited during GC.
#define SMI_ROOT_LIST(V) \ #define SMI_ROOT_LIST(V) \
...@@ -254,7 +252,6 @@ namespace internal { ...@@ -254,7 +252,6 @@ namespace internal {
V(OrderedHashTableMap) \ V(OrderedHashTableMap) \
V(EmptyFixedArray) \ V(EmptyFixedArray) \
V(EmptyByteArray) \ V(EmptyByteArray) \
V(EmptyBytecodeArray) \
V(EmptyDescriptorArray) \ V(EmptyDescriptorArray) \
V(ArgumentsMarker) \ V(ArgumentsMarker) \
V(SymbolMap) \ V(SymbolMap) \
......
...@@ -1868,7 +1868,6 @@ bool V8HeapExplorer::IterateAndExtractSinglePass() { ...@@ -1868,7 +1868,6 @@ bool V8HeapExplorer::IterateAndExtractSinglePass() {
bool V8HeapExplorer::IsEssentialObject(Object* object) { bool V8HeapExplorer::IsEssentialObject(Object* object) {
return object->IsHeapObject() && !object->IsOddball() && return object->IsHeapObject() && !object->IsOddball() &&
object != heap_->empty_byte_array() && object != heap_->empty_byte_array() &&
object != heap_->empty_bytecode_array() &&
object != heap_->empty_fixed_array() && object != heap_->empty_fixed_array() &&
object != heap_->empty_descriptor_array() && object != heap_->empty_descriptor_array() &&
object != heap_->fixed_array_map() && object != heap_->cell_map() && object != heap_->fixed_array_map() && object != heap_->cell_map() &&
......
...@@ -71,6 +71,8 @@ Object* CompileOptimized(Isolate* isolate, Handle<JSFunction> function, ...@@ -71,6 +71,8 @@ Object* CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
DCHECK(function->code()->kind() == Code::FUNCTION || DCHECK(function->code()->kind() == Code::FUNCTION ||
function->code()->kind() == Code::OPTIMIZED_FUNCTION || function->code()->kind() == Code::OPTIMIZED_FUNCTION ||
(function->code()->is_interpreter_entry_trampoline() &&
function->shared()->HasBytecodeArray()) ||
function->IsInOptimizationQueue()); function->IsInOptimizationQueue());
return function->code(); return function->code();
} }
......
...@@ -1680,9 +1680,10 @@ bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code, ...@@ -1680,9 +1680,10 @@ bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
return false; return false;
} }
StartupSerializer::StartupSerializer(Isolate* isolate, SnapshotByteSink* sink) StartupSerializer::StartupSerializer(Isolate* isolate, SnapshotByteSink* sink)
: Serializer(isolate, sink), root_index_wave_front_(0) { : Serializer(isolate, sink),
root_index_wave_front_(0),
serializing_builtins_(false) {
// Clear the cache of objects used by the partial snapshot. After the // Clear the cache of objects used by the partial snapshot. After the
// strong roots have been serialized we can create a partial snapshot // strong roots have been serialized we can create a partial snapshot
// which will repopulate the cache with objects needed by that partial // which will repopulate the cache with objects needed by that partial
...@@ -1696,6 +1697,19 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code, ...@@ -1696,6 +1697,19 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) { WhereToPoint where_to_point, int skip) {
DCHECK(!obj->IsJSFunction()); DCHECK(!obj->IsJSFunction());
if (obj->IsCode()) {
Code* code = Code::cast(obj);
// If the function code is compiled (either as native code or bytecode),
// replace it with lazy-compile builtin. Only exception is when we are
// serializing the canonical interpreter-entry-trampoline builtin.
if (code->kind() == Code::FUNCTION ||
(!serializing_builtins_ && code->is_interpreter_entry_trampoline())) {
obj = isolate()->builtins()->builtin(Builtins::kCompileLazy);
}
} else if (obj->IsBytecodeArray()) {
obj = isolate()->heap()->undefined_value();
}
int root_index = root_index_map_.Lookup(obj); int root_index = root_index_map_.Lookup(obj);
// We can only encode roots as such if it has already been serialized. // We can only encode roots as such if it has already been serialized.
// That applies to root indices below the wave front. // That applies to root indices below the wave front.
...@@ -1705,10 +1719,6 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code, ...@@ -1705,10 +1719,6 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
return; return;
} }
if (obj->IsCode() && Code::cast(obj)->kind() == Code::FUNCTION) {
obj = isolate()->builtins()->builtin(Builtins::kCompileLazy);
}
if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return; if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
FlushSkip(skip); FlushSkip(skip);
...@@ -1733,6 +1743,11 @@ void StartupSerializer::SerializeWeakReferencesAndDeferred() { ...@@ -1733,6 +1743,11 @@ void StartupSerializer::SerializeWeakReferencesAndDeferred() {
Pad(); Pad();
} }
void StartupSerializer::Synchronize(VisitorSynchronization::SyncTag tag) {
// We expect the builtins tag after builtins have been serialized.
DCHECK(!serializing_builtins_ || tag == VisitorSynchronization::kBuiltins);
serializing_builtins_ = (tag == VisitorSynchronization::kHandleScope);
}
void Serializer::PutRoot(int root_index, void Serializer::PutRoot(int root_index,
HeapObject* object, HeapObject* object,
......
...@@ -652,8 +652,11 @@ class StartupSerializer : public Serializer { ...@@ -652,8 +652,11 @@ class StartupSerializer : public Serializer {
WhereToPoint where_to_point, int skip) override; WhereToPoint where_to_point, int skip) override;
void SerializeWeakReferencesAndDeferred(); void SerializeWeakReferencesAndDeferred();
void Synchronize(VisitorSynchronization::SyncTag tag) override;
private: private:
intptr_t root_index_wave_front_; intptr_t root_index_wave_front_;
bool serializing_builtins_;
DISALLOW_COPY_AND_ASSIGN(StartupSerializer); DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
}; };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment