serialize.cc 99.7 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5 6 7 8
#include "src/v8.h"

#include "src/accessors.h"
#include "src/api.h"
9
#include "src/base/platform/platform.h"
10
#include "src/bootstrapper.h"
11
#include "src/code-stubs.h"
12
#include "src/cpu-profiler.h"
13 14 15
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/global-handles.h"
16 17
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
18
#include "src/natives.h"
19
#include "src/objects.h"
20
#include "src/parser.h"
21
#include "src/runtime/runtime.h"
22 23
#include "src/serialize.h"
#include "src/snapshot.h"
24
#include "src/snapshot-source-sink.h"
25
#include "src/v8threads.h"
26
#include "src/version.h"
27

28 29
namespace v8 {
namespace internal {
30

31

32 33 34 35
// -----------------------------------------------------------------------------
// Coding of external references.


36 37 38 39 40 41
ExternalReferenceTable* ExternalReferenceTable::instance(Isolate* isolate) {
  ExternalReferenceTable* external_reference_table =
      isolate->external_reference_table();
  if (external_reference_table == NULL) {
    external_reference_table = new ExternalReferenceTable(isolate);
    isolate->set_external_reference_table(external_reference_table);
42
  }
43 44
  return external_reference_table;
}
45 46


47
ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
  // Miscellaneous
  Add(ExternalReference::roots_array_start(isolate).address(),
      "Heap::roots_array_start()");
  Add(ExternalReference::address_of_stack_limit(isolate).address(),
      "StackGuard::address_of_jslimit()");
  Add(ExternalReference::address_of_real_stack_limit(isolate).address(),
      "StackGuard::address_of_real_jslimit()");
  Add(ExternalReference::new_space_start(isolate).address(),
      "Heap::NewSpaceStart()");
  Add(ExternalReference::new_space_mask(isolate).address(),
      "Heap::NewSpaceMask()");
  Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
      "Heap::NewSpaceAllocationLimitAddress()");
  Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
      "Heap::NewSpaceAllocationTopAddress()");
  Add(ExternalReference::debug_break(isolate).address(), "Debug::Break()");
  Add(ExternalReference::debug_step_in_fp_address(isolate).address(),
      "Debug::step_in_fp_addr()");
  Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
      "mod_two_doubles");
  // Keyed lookup cache.
  Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
      "KeyedLookupCache::keys()");
  Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
      "KeyedLookupCache::field_offsets()");
  Add(ExternalReference::handle_scope_next_address(isolate).address(),
      "HandleScope::next");
  Add(ExternalReference::handle_scope_limit_address(isolate).address(),
      "HandleScope::limit");
  Add(ExternalReference::handle_scope_level_address(isolate).address(),
      "HandleScope::level");
  Add(ExternalReference::new_deoptimizer_function(isolate).address(),
      "Deoptimizer::New()");
  Add(ExternalReference::compute_output_frames_function(isolate).address(),
      "Deoptimizer::ComputeOutputFrames()");
  Add(ExternalReference::address_of_min_int().address(),
      "LDoubleConstant::min_int");
  Add(ExternalReference::address_of_one_half().address(),
      "LDoubleConstant::one_half");
  Add(ExternalReference::isolate_address(isolate).address(), "isolate");
  Add(ExternalReference::address_of_negative_infinity().address(),
      "LDoubleConstant::negative_infinity");
  Add(ExternalReference::power_double_double_function(isolate).address(),
      "power_double_double_function");
  Add(ExternalReference::power_double_int_function(isolate).address(),
      "power_double_int_function");
  Add(ExternalReference::math_log_double_function(isolate).address(),
      "std::log");
  Add(ExternalReference::store_buffer_top(isolate).address(),
      "store_buffer_top");
  Add(ExternalReference::address_of_the_hole_nan().address(), "the_hole_nan");
  Add(ExternalReference::get_date_field_function(isolate).address(),
      "JSDate::GetField");
  Add(ExternalReference::date_cache_stamp(isolate).address(),
      "date_cache_stamp");
  Add(ExternalReference::address_of_pending_message_obj(isolate).address(),
      "address_of_pending_message_obj");
  Add(ExternalReference::address_of_has_pending_message(isolate).address(),
      "address_of_has_pending_message");
  Add(ExternalReference::address_of_pending_message_script(isolate).address(),
      "pending_message_script");
  Add(ExternalReference::get_make_code_young_function(isolate).address(),
      "Code::MakeCodeYoung");
  Add(ExternalReference::cpu_features().address(), "cpu_features");
  Add(ExternalReference::old_pointer_space_allocation_top_address(isolate)
          .address(),
      "Heap::OldPointerSpaceAllocationTopAddress");
  Add(ExternalReference::old_pointer_space_allocation_limit_address(isolate)
          .address(),
      "Heap::OldPointerSpaceAllocationLimitAddress");
  Add(ExternalReference::old_data_space_allocation_top_address(isolate)
          .address(),
      "Heap::OldDataSpaceAllocationTopAddress");
  Add(ExternalReference::old_data_space_allocation_limit_address(isolate)
          .address(),
      "Heap::OldDataSpaceAllocationLimitAddress");
  Add(ExternalReference::allocation_sites_list_address(isolate).address(),
      "Heap::allocation_sites_list_address()");
  Add(ExternalReference::address_of_uint32_bias().address(), "uint32_bias");
  Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
      "Code::MarkCodeAsExecuted");
  Add(ExternalReference::is_profiling_address(isolate).address(),
      "CpuProfiler::is_profiling");
  Add(ExternalReference::scheduled_exception_address(isolate).address(),
      "Isolate::scheduled_exception");
  Add(ExternalReference::invoke_function_callback(isolate).address(),
      "InvokeFunctionCallback");
  Add(ExternalReference::invoke_accessor_getter_callback(isolate).address(),
      "InvokeAccessorGetterCallback");
  Add(ExternalReference::flush_icache_function(isolate).address(),
      "CpuFeatures::FlushICache");
  Add(ExternalReference::log_enter_external_function(isolate).address(),
      "Logger::EnterExternal");
  Add(ExternalReference::log_leave_external_function(isolate).address(),
      "Logger::LeaveExternal");
  Add(ExternalReference::address_of_minus_one_half().address(),
      "double_constants.minus_one_half");
  Add(ExternalReference::stress_deopt_count(isolate).address(),
      "Isolate::stress_deopt_count_address()");

  // Debug addresses
  Add(ExternalReference::debug_after_break_target_address(isolate).address(),
      "Debug::after_break_target_address()");
  Add(ExternalReference::debug_restarter_frame_function_pointer_address(isolate)
          .address(),
      "Debug::restarter_frame_function_pointer_address()");
  Add(ExternalReference::debug_is_active_address(isolate).address(),
      "Debug::is_active_address()");

#ifndef V8_INTERPRETED_REGEXP
  Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
      "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
  Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
      "RegExpMacroAssembler*::CheckStackGuardState()");
  Add(ExternalReference::re_grow_stack(isolate).address(),
      "NativeRegExpMacroAssembler::GrowStack()");
  Add(ExternalReference::re_word_character_map().address(),
      "NativeRegExpMacroAssembler::word_character_map");
  Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(),
      "RegExpStack::limit_address()");
  Add(ExternalReference::address_of_regexp_stack_memory_address(isolate)
          .address(),
      "RegExpStack::memory_address()");
  Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(),
      "RegExpStack::memory_size()");
  Add(ExternalReference::address_of_static_offsets_vector(isolate).address(),
      "OffsetsVector::static_offsets_vector");
#endif  // V8_INTERPRETED_REGEXP

177 178 179 180 181 182 183 184 185 186 187 188
  // The following populates all of the different type of external references
  // into the ExternalReferenceTable.
  //
  // NOTE: This function was originally 100k of code.  It has since been
  // rewritten to be mostly table driven, as the callback macro style tends to
  // very easily cause code bloat.  Please be careful in the future when adding
  // new references.

  struct RefTableEntry {
    uint16_t id;
    const char* name;
  };
189

190 191 192 193 194
  static const RefTableEntry c_builtins[] = {
#define DEF_ENTRY_C(name, ignored)           \
  { Builtins::c_##name, "Builtins::" #name } \
  ,
      BUILTIN_LIST_C(DEF_ENTRY_C)
195
#undef DEF_ENTRY_C
196
  };
197

198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
  for (unsigned i = 0; i < arraysize(c_builtins); ++i) {
    ExternalReference ref(static_cast<Builtins::CFunctionId>(c_builtins[i].id),
                          isolate);
    Add(ref.address(), c_builtins[i].name);
  }

  static const RefTableEntry builtins[] = {
#define DEF_ENTRY_C(name, ignored)          \
  { Builtins::k##name, "Builtins::" #name } \
  ,
#define DEF_ENTRY_A(name, i1, i2, i3)       \
  { Builtins::k##name, "Builtins::" #name } \
  ,
      BUILTIN_LIST_C(DEF_ENTRY_C) BUILTIN_LIST_A(DEF_ENTRY_A)
          BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A)
213 214
#undef DEF_ENTRY_C
#undef DEF_ENTRY_A
215
  };
216

217 218 219 220
  for (unsigned i = 0; i < arraysize(builtins); ++i) {
    ExternalReference ref(static_cast<Builtins::Name>(builtins[i].id), isolate);
    Add(ref.address(), builtins[i].name);
  }
221

222 223 224 225
  static const RefTableEntry runtime_functions[] = {
#define RUNTIME_ENTRY(name, i1, i2)       \
  { Runtime::k##name, "Runtime::" #name } \
  ,
226
      RUNTIME_FUNCTION_LIST(RUNTIME_ENTRY) INLINE_FUNCTION_LIST(RUNTIME_ENTRY)
227
          INLINE_OPTIMIZED_FUNCTION_LIST(RUNTIME_ENTRY)
228
#undef RUNTIME_ENTRY
229
  };
230

231 232 233 234 235
  for (unsigned i = 0; i < arraysize(runtime_functions); ++i) {
    ExternalReference ref(
        static_cast<Runtime::FunctionId>(runtime_functions[i].id), isolate);
    Add(ref.address(), runtime_functions[i].name);
  }
236

237 238 239 240 241
  static const RefTableEntry inline_caches[] = {
#define IC_ENTRY(name)          \
  { IC::k##name, "IC::" #name } \
  ,
      IC_UTIL_LIST(IC_ENTRY)
242
#undef IC_ENTRY
243
  };
244

245 246 247 248
  for (unsigned i = 0; i < arraysize(inline_caches); ++i) {
    ExternalReference ref(
        IC_Utility(static_cast<IC::UtilityId>(inline_caches[i].id)), isolate);
    Add(ref.address(), runtime_functions[i].name);
249
  }
250 251

  // Stat counters
252
  struct StatsRefTableEntry {
253
    StatsCounter* (Counters::*counter)();
254 255 256
    const char* name;
  };

257 258 259 260 261
  static const StatsRefTableEntry stats_ref_table[] = {
#define COUNTER_ENTRY(name, caption)      \
  { &Counters::name, "Counters::" #name } \
  ,
      STATS_COUNTER_LIST_1(COUNTER_ENTRY) STATS_COUNTER_LIST_2(COUNTER_ENTRY)
262
#undef COUNTER_ENTRY
263
  };
264

265
  Counters* counters = isolate->counters();
266 267 268 269 270 271 272 273 274
  for (unsigned i = 0; i < arraysize(stats_ref_table); ++i) {
    // To make sure the indices are not dependent on whether counters are
    // enabled, use a dummy address as filler.
    Address address = NotAvailable();
    StatsCounter* counter = (counters->*(stats_ref_table[i].counter))();
    if (counter->Enabled()) {
      address = reinterpret_cast<Address>(counter->GetInternalPointer());
    }
    Add(address, stats_ref_table[i].name);
275
  }
276 277

  // Top addresses
278 279 280
  static const char* address_names[] = {
#define BUILD_NAME_LITERAL(Name, name) "Isolate::" #name "_address",
      FOR_EACH_ISOLATE_ADDRESS_NAME(BUILD_NAME_LITERAL) NULL
281
#undef BUILD_NAME_LITERAL
282 283
  };

284 285 286
  for (int i = 0; i < Isolate::kIsolateAddressCount; ++i) {
    Add(isolate->get_address_from_id(static_cast<Isolate::AddressId>(i)),
        address_names[i]);
287 288 289
  }

  // Accessors
290 291 292 293 294 295 296 297 298 299
  struct AccessorRefTable {
    Address address;
    const char* name;
  };

  static const AccessorRefTable accessors[] = {
#define ACCESSOR_INFO_DECLARATION(name)                                     \
  { FUNCTION_ADDR(&Accessors::name##Getter), "Accessors::" #name "Getter" } \
  , {FUNCTION_ADDR(&Accessors::name##Setter), "Accessors::" #name "Setter"},
      ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
300
#undef ACCESSOR_INFO_DECLARATION
301 302 303 304 305
  };

  for (unsigned i = 0; i < arraysize(accessors); ++i) {
    Add(accessors[i].address, accessors[i].name);
  }
306

307 308
  StubCache* stub_cache = isolate->stub_cache();

309
  // Stub cache tables
310
  Add(stub_cache->key_reference(StubCache::kPrimary).address(),
311
      "StubCache::primary_->key");
312
  Add(stub_cache->value_reference(StubCache::kPrimary).address(),
313
      "StubCache::primary_->value");
314
  Add(stub_cache->map_reference(StubCache::kPrimary).address(),
315
      "StubCache::primary_->map");
316
  Add(stub_cache->key_reference(StubCache::kSecondary).address(),
317
      "StubCache::secondary_->key");
318
  Add(stub_cache->value_reference(StubCache::kSecondary).address(),
319
      "StubCache::secondary_->value");
320
  Add(stub_cache->map_reference(StubCache::kSecondary).address(),
321
      "StubCache::secondary_->map");
322 323

  // Runtime entries
324
  Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
325
      "HandleScope::DeleteExtensions");
326 327
  Add(ExternalReference::incremental_marking_record_write_function(isolate)
          .address(),
328
      "IncrementalMarking::RecordWrite");
329
  Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
330
      "StoreBuffer::StoreBufferOverflow");
331

332 333
  // Add a small set of deopt entry addresses to encoder without generating the
  // deopt table code, which isn't possible at deserialization time.
334
  HandleScope scope(isolate);
335 336
  for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) {
    Address address = Deoptimizer::GetDeoptimizationEntry(
337
        isolate,
338 339 340
        entry,
        Deoptimizer::LAZY,
        Deoptimizer::CALCULATE_ENTRY_ADDRESS);
341
    Add(address, "lazy_deopt");
342
  }
343 344 345
}


346
ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate)
347 348 349 350 351 352 353 354
    : map_(HashMap::PointersMatch) {
  ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate);
  for (int i = 0; i < table->size(); ++i) {
    Address addr = table->address(i);
    if (addr == ExternalReferenceTable::NotAvailable()) continue;
    // We expect no duplicate external references entries in the table.
    DCHECK_NULL(map_.Lookup(addr, Hash(addr), false));
    map_.Lookup(addr, Hash(addr), true)->value = reinterpret_cast<void*>(i);
355 356 357 358
  }
}


359 360
uint32_t ExternalReferenceEncoder::Encode(Address address) const {
  DCHECK_NOT_NULL(address);
361
  HashMap::Entry* entry =
362 363 364
      const_cast<HashMap&>(map_).Lookup(address, Hash(address), false);
  DCHECK_NOT_NULL(entry);
  return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
365 366 367
}


368 369 370 371 372 373 374
const char* ExternalReferenceEncoder::NameOfAddress(Isolate* isolate,
                                                    Address address) const {
  HashMap::Entry* entry =
      const_cast<HashMap&>(map_).Lookup(address, Hash(address), false);
  if (entry == NULL) return "<unknown>";
  uint32_t i = static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
  return ExternalReferenceTable::instance(isolate)->name(i);
375 376 377
}


378
RootIndexMap::RootIndexMap(Isolate* isolate) : map_(HashMap::PointersMatch) {
379
  Object** root_array = isolate->heap()->roots_array_start();
380
  for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
381 382 383 384 385 386
    Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
    Object* root = root_array[root_index];
    // Omit root entries that can be written after initialization. They must
    // not be referenced through the root list in the snapshot.
    if (root->IsHeapObject() &&
        isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
387
      HeapObject* heap_object = HeapObject::cast(root);
388
      HashMap::Entry* entry = LookupEntry(&map_, heap_object, false);
389 390 391
      if (entry != NULL) {
        // Some are initialized to a previous value in the root list.
        DCHECK_LT(GetValue(entry), i);
392
      } else {
393
        SetValue(LookupEntry(&map_, heap_object, true), i);
394 395 396 397 398 399
      }
    }
  }
}


400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
class CodeAddressMap: public CodeEventLogger {
 public:
  explicit CodeAddressMap(Isolate* isolate)
      : isolate_(isolate) {
    isolate->logger()->addCodeEventListener(this);
  }

  virtual ~CodeAddressMap() {
    isolate_->logger()->removeCodeEventListener(this);
  }

  virtual void CodeMoveEvent(Address from, Address to) {
    address_to_name_map_.Move(from, to);
  }

415 416 417
  virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) {
  }

418 419 420 421 422 423 424 425 426 427 428
  virtual void CodeDeleteEvent(Address from) {
    address_to_name_map_.Remove(from);
  }

  const char* Lookup(Address address) {
    return address_to_name_map_.Lookup(address);
  }

 private:
  class NameMap {
   public:
429
    NameMap() : impl_(HashMap::PointersMatch) {}
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459

    ~NameMap() {
      for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
        DeleteArray(static_cast<const char*>(p->value));
      }
    }

    void Insert(Address code_address, const char* name, int name_size) {
      HashMap::Entry* entry = FindOrCreateEntry(code_address);
      if (entry->value == NULL) {
        entry->value = CopyName(name, name_size);
      }
    }

    const char* Lookup(Address code_address) {
      HashMap::Entry* entry = FindEntry(code_address);
      return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
    }

    void Remove(Address code_address) {
      HashMap::Entry* entry = FindEntry(code_address);
      if (entry != NULL) {
        DeleteArray(static_cast<char*>(entry->value));
        RemoveEntry(entry);
      }
    }

    void Move(Address from, Address to) {
      if (from == to) return;
      HashMap::Entry* from_entry = FindEntry(from);
460
      DCHECK(from_entry != NULL);
461 462 463
      void* value = from_entry->value;
      RemoveEntry(from_entry);
      HashMap::Entry* to_entry = FindOrCreateEntry(to);
464
      DCHECK(to_entry->value == NULL);
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
      to_entry->value = value;
    }

   private:
    static char* CopyName(const char* name, int name_size) {
      char* result = NewArray<char>(name_size + 1);
      for (int i = 0; i < name_size; ++i) {
        char c = name[i];
        if (c == '\0') c = ' ';
        result[i] = c;
      }
      result[name_size] = '\0';
      return result;
    }

    HashMap::Entry* FindOrCreateEntry(Address code_address) {
      return impl_.Lookup(code_address, ComputePointerHash(code_address), true);
    }

    HashMap::Entry* FindEntry(Address code_address) {
      return impl_.Lookup(code_address,
                          ComputePointerHash(code_address),
                          false);
    }

    void RemoveEntry(HashMap::Entry* entry) {
      impl_.Remove(entry->key, entry->hash);
    }

    HashMap impl_;

    DISALLOW_COPY_AND_ASSIGN(NameMap);
  };

  virtual void LogRecordedBuffer(Code* code,
                                 SharedFunctionInfo*,
                                 const char* name,
                                 int length) {
    address_to_name_map_.Insert(code->address(), name, length);
  }

  NameMap address_to_name_map_;
  Isolate* isolate_;
};


511 512 513 514 515
void Deserializer::DecodeReservation(
    Vector<const SerializedData::Reservation> res) {
  DCHECK_EQ(0, reservations_[NEW_SPACE].length());
  STATIC_ASSERT(NEW_SPACE == 0);
  int current_space = NEW_SPACE;
516 517 518
  for (int i = 0; i < res.length(); i++) {
    SerializedData::Reservation r(0);
    memcpy(&r, res.start() + i, sizeof(r));
519 520 521 522
    reservations_[current_space].Add({r.chunk_size(), NULL, NULL});
    if (r.is_last()) current_space++;
  }
  DCHECK_EQ(kNumberOfSpaces, current_space);
523
  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
524 525 526
}


527 528 529 530
void Deserializer::FlushICacheForNewCodeObjects() {
  PageIterator it(isolate_->heap()->code_space());
  while (it.has_next()) {
    Page* p = it.next();
531
    CpuFeatures::FlushICache(p->area_start(), p->area_end() - p->area_start());
532 533 534 535
  }
}


536
bool Deserializer::ReserveSpace() {
537 538 539 540 541
#ifdef DEBUG
  for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
    CHECK(reservations_[i].length() > 0);
  }
#endif  // DEBUG
542 543 544 545 546 547 548 549
  if (!isolate_->heap()->ReserveSpace(reservations_)) return false;
  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
    high_water_[i] = reservations_[i][0].start;
  }
  return true;
}


550
void Deserializer::Initialize(Isolate* isolate) {
551 552
  DCHECK_NULL(isolate_);
  DCHECK_NOT_NULL(isolate);
553
  isolate_ = isolate;
554 555 556 557
  DCHECK_NULL(external_reference_table_);
  external_reference_table_ = ExternalReferenceTable::instance(isolate);
  CHECK_EQ(magic_number_,
           SerializedData::ComputeMagicNumber(external_reference_table_));
558 559 560 561 562
}


void Deserializer::Deserialize(Isolate* isolate) {
  Initialize(isolate);
563
  if (!ReserveSpace()) V8::FatalProcessOutOfMemory("deserializing context");
564
  // No active threads.
565
  DCHECK_NULL(isolate_->thread_manager()->FirstThreadStateInUse());
566
  // No active handles.
567
  DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty());
568
  isolate_->heap()->IterateSmiRoots(this);
569
  isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
570
  isolate_->heap()->RepairFreeListsAfterDeserialization();
571 572 573 574
  isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);

  isolate_->heap()->set_native_contexts_list(
      isolate_->heap()->undefined_value());
575 576
  isolate_->heap()->set_array_buffers_list(
      isolate_->heap()->undefined_value());
577

578 579 580 581 582 583 584
  // The allocation site list is build during root iteration, but if no sites
  // were encountered then it needs to be initialized to undefined.
  if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
    isolate_->heap()->set_allocation_sites_list(
        isolate_->heap()->undefined_value());
  }

585 586 587 588
  // Update data pointers to the external strings containing natives sources.
  for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
    Object* source = isolate_->heap()->natives_source_cache()->get(i);
    if (!source->IsUndefined()) {
589
      ExternalOneByteString::cast(source)->update_data_cache();
590 591
    }
  }
592

593 594
  FlushICacheForNewCodeObjects();

595 596 597
  // Issue code events for newly deserialized code objects.
  LOG_CODE_EVENT(isolate_, LogCodeObjects());
  LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
598 599 600
}


601
MaybeHandle<Object> Deserializer::DeserializePartial(
602 603
    Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
    Handle<FixedArray>* outdated_contexts_out) {
604
  Initialize(isolate);
605
  if (!ReserveSpace()) {
606
    V8::FatalProcessOutOfMemory("deserialize context");
607
    return MaybeHandle<Object>();
608
  }
609

610 611 612 613
  Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(1);
  attached_objects[kGlobalProxyReference] = global_proxy;
  SetAttachedObjects(attached_objects);

614
  DisallowHeapAllocation no_gc;
615 616 617 618
  // Keep track of the code space start and end pointers in case new
  // code objects were unserialized
  OldSpace* code_space = isolate_->heap()->code_space();
  Address start_address = code_space->top();
619 620 621 622
  Object* root;
  Object* outdated_contexts;
  VisitPointer(&root);
  VisitPointer(&outdated_contexts);
623 624 625 626 627

  // There's no code deserialized here. If this assert fires
  // then that's changed and logging should be added to notify
  // the profiler et al of the new code.
  CHECK_EQ(start_address, code_space->top());
628 629 630 631 632 633 634 635 636 637 638 639 640
  CHECK(outdated_contexts->IsFixedArray());
  *outdated_contexts_out =
      Handle<FixedArray>(FixedArray::cast(outdated_contexts), isolate);
  return Handle<Object>(root, isolate);
}


MaybeHandle<SharedFunctionInfo> Deserializer::DeserializeCode(
    Isolate* isolate) {
  Initialize(isolate);
  if (!ReserveSpace()) {
    return Handle<SharedFunctionInfo>();
  } else {
641
    deserializing_user_code_ = true;
642 643 644 645 646
    DisallowHeapAllocation no_gc;
    Object* root;
    VisitPointer(&root);
    return Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root));
  }
647 648 649
}


650
Deserializer::~Deserializer() {
651
  // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed.
652
  // DCHECK(source_.AtEOF());
653
  attached_objects_.Dispose();
654 655 656 657
}


// This is called on the roots.  It is the driver of the deserialization
658
// process.  It is also called on the body of each function.
659
void Deserializer::VisitPointers(Object** start, Object** end) {
660 661
  // The space must be new space.  Any other space would cause ReadChunk to try
  // to update the remembered using NULL as the address.
662
  ReadData(start, end, NEW_SPACE, NULL);
663 664 665
}


666 667 668 669 670 671 672 673 674 675
void Deserializer::RelinkAllocationSite(AllocationSite* site) {
  if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
    site->set_weak_next(isolate_->heap()->undefined_value());
  } else {
    site->set_weak_next(isolate_->heap()->allocation_sites_list());
  }
  isolate_->heap()->set_allocation_sites_list(site);
}


676 677 678 679 680
// Used to insert a deserialized internalized string into the string table.
class StringTableInsertionKey : public HashTableKey {
 public:
  explicit StringTableInsertionKey(String* string)
      : string_(string), hash_(HashForObject(string)) {
681
    DCHECK(string->IsInternalizedString());
682 683
  }

684
  bool IsMatch(Object* string) OVERRIDE {
685 686 687 688 689 690 691
    // We know that all entries in a hash table had their hash keys created.
    // Use that knowledge to have fast failure.
    if (hash_ != HashForObject(string)) return false;
    // We want to compare the content of two internalized strings here.
    return string_->SlowEquals(String::cast(string));
  }

692
  uint32_t Hash() OVERRIDE { return hash_; }
693

694
  uint32_t HashForObject(Object* key) OVERRIDE {
695 696 697 698
    return String::cast(key)->Hash();
  }

  MUST_USE_RESULT virtual Handle<Object> AsHandle(Isolate* isolate)
699
      OVERRIDE {
700 701 702 703 704 705 706 707
    return handle(string_, isolate);
  }

  String* string_;
  uint32_t hash_;
};


708
HeapObject* Deserializer::ProcessNewObjectFromSerializedCode(HeapObject* obj) {
709 710 711 712 713 714 715 716
  if (obj->IsString()) {
    String* string = String::cast(obj);
    // Uninitialize hash field as the hash seed may have changed.
    string->set_hash_field(String::kEmptyHashField);
    if (string->IsInternalizedString()) {
      DisallowHeapAllocation no_gc;
      HandleScope scope(isolate_);
      StringTableInsertionKey key(string);
717 718 719
      String* canonical = *StringTable::LookupKey(isolate_, &key);
      string->SetForwardedInternalizedString(canonical);
      return canonical;
720 721 722 723 724 725
    }
  }
  return obj;
}


726 727
HeapObject* Deserializer::GetBackReferencedObject(int space) {
  HeapObject* obj;
728
  BackReference back_reference(source_.GetInt());
729
  if (space == LO_SPACE) {
730 731
    CHECK(back_reference.chunk_index() == 0);
    uint32_t index = back_reference.large_object_index();
732 733 734 735 736 737 738 739 740 741 742 743 744
    obj = deserialized_large_objects_[index];
  } else {
    DCHECK(space < kNumberOfPreallocatedSpaces);
    uint32_t chunk_index = back_reference.chunk_index();
    DCHECK_LE(chunk_index, current_chunk_[space]);
    uint32_t chunk_offset = back_reference.chunk_offset();
    obj = HeapObject::FromAddress(reservations_[space][chunk_index].start +
                                  chunk_offset);
  }
  if (deserializing_user_code() && obj->IsInternalizedString()) {
    obj = String::cast(obj)->GetForwardedInternalizedString();
  }
  hot_objects_.Add(obj);
745 746 747 748
  return obj;
}


749 750 751
// This routine writes the new object into the pointer provided and then
// returns true if the new object was in young space and false otherwise.
// The reason for this strange interface is that otherwise the object is
752 753
// written very late, which means the FreeSpace map is not set up by the
// time we need to use it to mark the space at the end of a page free.
754 755 756
void Deserializer::ReadObject(int space_number, Object** write_back) {
  Address address;
  HeapObject* obj;
757
  int next_int = source_.GetInt();
758 759 760 761

  bool double_align = false;
#ifndef V8_HOST_ARCH_64_BIT
  double_align = next_int == kDoubleAlignmentSentinel;
762
  if (double_align) next_int = source_.GetInt();
763 764 765 766 767 768 769 770 771 772 773 774
#endif

  DCHECK_NE(kDoubleAlignmentSentinel, next_int);
  int size = next_int << kObjectAlignmentBits;
  int reserved_size = size + (double_align ? kPointerSize : 0);
  address = Allocate(space_number, reserved_size);
  obj = HeapObject::FromAddress(address);
  if (double_align) {
    obj = isolate_->heap()->DoubleAlignForDeserialization(obj, reserved_size);
    address = obj->address();
  }

775
  isolate_->heap()->OnAllocationEvent(obj, size);
776 777
  Object** current = reinterpret_cast<Object**>(address);
  Object** limit = current + (size >> kPointerSizeLog2);
778
  if (FLAG_log_snapshot_positions) {
779
    LOG(isolate_, SnapshotPositionEvent(address, source_.position()));
780
  }
781
  ReadData(current, limit, space_number, address);
782 783 784 785

  // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
  // as a (weak) root. If this root is relocated correctly,
  // RelinkAllocationSite() isn't necessary.
786 787 788
  if (obj->IsAllocationSite()) RelinkAllocationSite(AllocationSite::cast(obj));

  // Fix up strings from serialized user code.
789
  if (deserializing_user_code()) obj = ProcessNewObjectFromSerializedCode(obj);
790

791 792
  Object* write_back_obj = obj;
  UnalignedCopy(write_back, &write_back_obj);
793
#ifdef DEBUG
794 795 796 797 798
  if (obj->IsCode()) {
    DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE);
  } else {
    DCHECK(space_number != CODE_SPACE);
  }
799
#endif
800 801 802 803 804 805 806 807 808 809 810 811

  if (obj->IsCode()) {
    // Turn internal references encoded as offsets back to absolute addresses.
    Code* code = Code::cast(obj);
    Address entry = code->entry();
    int mode_mask = RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE);
    for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
      RelocInfo* rinfo = it.rinfo();
      intptr_t offset =
          reinterpret_cast<intptr_t>(rinfo->target_internal_reference());
      DCHECK(0 <= offset && offset <= code->instruction_size());
      rinfo->set_target_internal_reference(entry + offset);
812 813
    }
  }
814 815
}

816 817 818 819 820

// We know the space requirements before deserialization and can
// pre-allocate that reserved space. During deserialization, all we need
// to do is to bump up the pointer for each space in the reserved
// space. This is also used for fixing back references.
821
// We may have to split up the pre-allocation into several chunks
822 823
// because it would not fit onto a single page. We do not have to keep
// track of when to move to the next chunk. An opcode will signal this.
824 825 826 827 828 829 830 831
// Since multiple large objects cannot be folded into one large object
// space allocation, we have to do an actual allocation when deserializing
// each large object. Instead of tracking offset for back references, we
// reference large objects by index.
Address Deserializer::Allocate(int space_index, int size) {
  if (space_index == LO_SPACE) {
    AlwaysAllocateScope scope(isolate_);
    LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
832
    Executability exec = static_cast<Executability>(source_.Get());
833 834 835 836 837 838 839
    AllocationResult result = lo_space->AllocateRaw(size, exec);
    HeapObject* obj = HeapObject::cast(result.ToObjectChecked());
    deserialized_large_objects_.Add(obj);
    return obj->address();
  } else {
    DCHECK(space_index < kNumberOfPreallocatedSpaces);
    Address address = high_water_[space_index];
840
    DCHECK_NOT_NULL(address);
841 842 843
    high_water_[space_index] += size;
#ifdef DEBUG
    // Assert that the current reserved chunk is still big enough.
844 845
    const Heap::Reservation& reservation = reservations_[space_index];
    int chunk_index = current_chunk_[space_index];
846 847
    CHECK_LE(high_water_[space_index], reservation[chunk_index].end);
#endif
848 849 850 851
    return address;
  }
}

852

853 854
void Deserializer::ReadData(Object** current, Object** limit, int source_space,
                            Address current_object_address) {
855
  Isolate* const isolate = isolate_;
856 857 858
  // Write barrier support costs around 1% in startup time.  In fact there
  // are no new space objects in current boot snapshots, so it's not needed,
  // but that may change.
859 860 861
  bool write_barrier_needed = (current_object_address != NULL &&
                               source_space != NEW_SPACE &&
                               source_space != CELL_SPACE &&
862
                               source_space != PROPERTY_CELL_SPACE &&
863
                               source_space != CODE_SPACE &&
864
                               source_space != OLD_DATA_SPACE);
865
  while (current < limit) {
866
    byte data = source_.Get();
867
    switch (data) {
868 869 870 871 872 873
#define CASE_STATEMENT(where, how, within, space_number) \
  case where + how + within + space_number:              \
    STATIC_ASSERT((where & ~kPointedToMask) == 0);       \
    STATIC_ASSERT((how & ~kHowToCodeMask) == 0);         \
    STATIC_ASSERT((within & ~kWhereToPointMask) == 0);   \
    STATIC_ASSERT((space_number & ~kSpaceMask) == 0);
874

875
#define CASE_BODY(where, how, within, space_number_if_any)                     \
876 877 878 879 880 881 882 883 884 885 886 887 888 889
  {                                                                            \
    bool emit_write_barrier = false;                                           \
    bool current_was_incremented = false;                                      \
    int space_number = space_number_if_any == kAnyOldSpace                     \
                           ? (data & kSpaceMask)                               \
                           : space_number_if_any;                              \
    if (where == kNewObject && how == kPlain && within == kStartOfObject) {    \
      ReadObject(space_number, current);                                       \
      emit_write_barrier = (space_number == NEW_SPACE);                        \
    } else {                                                                   \
      Object* new_object = NULL; /* May not be a real Object pointer. */       \
      if (where == kNewObject) {                                               \
        ReadObject(space_number, &new_object);                                 \
      } else if (where == kRootArray) {                                        \
890
        int root_id = source_.GetInt();                                        \
891 892 893
        new_object = isolate->heap()->roots_array_start()[root_id];            \
        emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
      } else if (where == kPartialSnapshotCache) {                             \
894
        int cache_index = source_.GetInt();                                    \
895
        new_object = isolate->partial_snapshot_cache()->at(cache_index);       \
896 897
        emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
      } else if (where == kExternalReference) {                                \
898
        int skip = source_.GetInt();                                           \
899 900
        current = reinterpret_cast<Object**>(                                  \
            reinterpret_cast<Address>(current) + skip);                        \
901
        int reference_id = source_.GetInt();                                   \
902
        Address address = external_reference_table_->address(reference_id);    \
903 904 905
        new_object = reinterpret_cast<Object*>(address);                       \
      } else if (where == kBackref) {                                          \
        emit_write_barrier = (space_number == NEW_SPACE);                      \
906
        new_object = GetBackReferencedObject(data & kSpaceMask);               \
907
      } else if (where == kBuiltin) {                                          \
908
        DCHECK(deserializing_user_code());                                     \
909
        int builtin_id = source_.GetInt();                                     \
910 911
        DCHECK_LE(0, builtin_id);                                              \
        DCHECK_LT(builtin_id, Builtins::builtin_count);                        \
912 913 914
        Builtins::Name name = static_cast<Builtins::Name>(builtin_id);         \
        new_object = isolate->builtins()->builtin(name);                       \
        emit_write_barrier = false;                                            \
915
      } else if (where == kAttachedReference) {                                \
916
        int index = source_.GetInt();                                          \
917 918
        DCHECK(deserializing_user_code() || index == kGlobalProxyReference);   \
        new_object = *attached_objects_[index];                                \
919
        emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
920
      } else {                                                                 \
921
        DCHECK(where == kBackrefWithSkip);                                     \
922
        int skip = source_.GetInt();                                           \
923 924 925
        current = reinterpret_cast<Object**>(                                  \
            reinterpret_cast<Address>(current) + skip);                        \
        emit_write_barrier = (space_number == NEW_SPACE);                      \
926
        new_object = GetBackReferencedObject(data & kSpaceMask);               \
927 928 929 930 931 932
      }                                                                        \
      if (within == kInnerPointer) {                                           \
        if (space_number != CODE_SPACE || new_object->IsCode()) {              \
          Code* new_code_object = reinterpret_cast<Code*>(new_object);         \
          new_object =                                                         \
              reinterpret_cast<Object*>(new_code_object->instruction_start()); \
933
        } else {                                                               \
934
          DCHECK(space_number == CODE_SPACE);                                  \
935 936
          Cell* cell = Cell::cast(new_object);                                 \
          new_object = reinterpret_cast<Object*>(cell->ValueAddress());        \
937 938
        }                                                                      \
      }                                                                        \
939 940 941 942 943 944 945 946 947 948
      if (how == kFromCode) {                                                  \
        Address location_of_branch_data = reinterpret_cast<Address>(current);  \
        Assembler::deserialization_set_special_target_at(                      \
            location_of_branch_data,                                           \
            Code::cast(HeapObject::FromAddress(current_object_address)),       \
            reinterpret_cast<Address>(new_object));                            \
        location_of_branch_data += Assembler::kSpecialTargetSize;              \
        current = reinterpret_cast<Object**>(location_of_branch_data);         \
        current_was_incremented = true;                                        \
      } else {                                                                 \
949
        UnalignedCopy(current, &new_object);                                   \
950 951 952 953 954 955 956 957 958 959 960 961 962
      }                                                                        \
    }                                                                          \
    if (emit_write_barrier && write_barrier_needed) {                          \
      Address current_address = reinterpret_cast<Address>(current);            \
      isolate->heap()->RecordWrite(                                            \
          current_object_address,                                              \
          static_cast<int>(current_address - current_object_address));         \
    }                                                                          \
    if (!current_was_incremented) {                                            \
      current++;                                                               \
    }                                                                          \
    break;                                                                     \
  }
963 964 965 966

// This generates a case and a body for the new space (which has to do extra
// write barrier handling) and handles the other spaces with 8 fall-through
// cases and one body.
967 968 969 970 971 972 973 974 975 976
#define ALL_SPACES(where, how, within)                    \
  CASE_STATEMENT(where, how, within, NEW_SPACE)           \
  CASE_BODY(where, how, within, NEW_SPACE)                \
  CASE_STATEMENT(where, how, within, OLD_DATA_SPACE)      \
  CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE)   \
  CASE_STATEMENT(where, how, within, CODE_SPACE)          \
  CASE_STATEMENT(where, how, within, MAP_SPACE)           \
  CASE_STATEMENT(where, how, within, CELL_SPACE)          \
  CASE_STATEMENT(where, how, within, PROPERTY_CELL_SPACE) \
  CASE_STATEMENT(where, how, within, LO_SPACE)            \
977
  CASE_BODY(where, how, within, kAnyOldSpace)
978

979 980 981 982 983 984 985 986 987 988 989
#define FOUR_CASES(byte_code)             \
  case byte_code:                         \
  case byte_code + 1:                     \
  case byte_code + 2:                     \
  case byte_code + 3:

#define SIXTEEN_CASES(byte_code)          \
  FOUR_CASES(byte_code)                   \
  FOUR_CASES(byte_code + 4)               \
  FOUR_CASES(byte_code + 8)               \
  FOUR_CASES(byte_code + 12)
990

991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
#define COMMON_RAW_LENGTHS(f)        \
  f(1)  \
  f(2)  \
  f(3)  \
  f(4)  \
  f(5)  \
  f(6)  \
  f(7)  \
  f(8)  \
  f(9)  \
  f(10) \
  f(11) \
  f(12) \
  f(13) \
  f(14) \
  f(15) \
  f(16) \
  f(17) \
  f(18) \
  f(19) \
  f(20) \
  f(21) \
  f(22) \
  f(23) \
  f(24) \
  f(25) \
  f(26) \
  f(27) \
  f(28) \
  f(29) \
  f(30) \
  f(31)

1024 1025
      // We generate 15 cases and bodies that process special tags that combine
      // the raw data tag and the length into one byte.
1026 1027 1028 1029 1030 1031 1032
#define RAW_CASE(index)                                                        \
  case kRawData + index: {                                                     \
    byte* raw_data_out = reinterpret_cast<byte*>(current);                     \
    source_.CopyRaw(raw_data_out, index* kPointerSize);                        \
    current = reinterpret_cast<Object**>(raw_data_out + index * kPointerSize); \
    break;                                                                     \
  }
1033 1034
      COMMON_RAW_LENGTHS(RAW_CASE)
#undef RAW_CASE
1035 1036 1037 1038

      // Deserialize a chunk of raw data that doesn't have one of the popular
      // lengths.
      case kRawData: {
1039
        int size = source_.GetInt();
1040
        byte* raw_data_out = reinterpret_cast<byte*>(current);
1041
        source_.CopyRaw(raw_data_out, size);
1042 1043
        break;
      }
1044

1045 1046
      SIXTEEN_CASES(kRootArrayConstants + kNoSkipDistance)
      SIXTEEN_CASES(kRootArrayConstants + kNoSkipDistance + 16) {
1047
        int root_id = RootArrayConstantFromByteCode(data);
1048
        Object* object = isolate->heap()->roots_array_start()[root_id];
1049
        DCHECK(!isolate->heap()->InNewSpace(object));
1050
        UnalignedCopy(current++, &object);
1051 1052 1053
        break;
      }

1054 1055 1056
      SIXTEEN_CASES(kRootArrayConstants + kHasSkipDistance)
      SIXTEEN_CASES(kRootArrayConstants + kHasSkipDistance + 16) {
        int root_id = RootArrayConstantFromByteCode(data);
1057
        int skip = source_.GetInt();
1058 1059 1060
        current = reinterpret_cast<Object**>(
            reinterpret_cast<intptr_t>(current) + skip);
        Object* object = isolate->heap()->roots_array_start()[root_id];
1061
        DCHECK(!isolate->heap()->InNewSpace(object));
1062
        UnalignedCopy(current++, &object);
1063 1064 1065
        break;
      }

1066
      case kVariableRepeat: {
1067
        int repeats = source_.GetInt();
1068
        Object* object = current[-1];
1069
        DCHECK(!isolate->heap()->InNewSpace(object));
1070
        for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
1071 1072 1073
        break;
      }

1074 1075
      STATIC_ASSERT(kRootArrayNumberOfConstantEncodings ==
                    Heap::kOldSpaceRoots);
1076 1077 1078 1079 1080 1081 1082
      STATIC_ASSERT(kMaxFixedRepeats == 15);
      FOUR_CASES(kFixedRepeat)
      FOUR_CASES(kFixedRepeat + 4)
      FOUR_CASES(kFixedRepeat + 8)
      case kFixedRepeat + 12:
      case kFixedRepeat + 13:
      case kFixedRepeat + 14: {
1083
        int repeats = RepeatsForCode(data);
1084 1085
        Object* object;
        UnalignedCopy(&object, current - 1);
1086
        DCHECK(!isolate->heap()->InNewSpace(object));
1087
        for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
1088 1089 1090
        break;
      }

1091 1092
      // Deserialize a new object and write a pointer to it to the current
      // object.
1093
      ALL_SPACES(kNewObject, kPlain, kStartOfObject)
1094 1095 1096
      // Support for direct instruction pointers in functions.  It's an inner
      // pointer because it points at the entry point, not at the start of the
      // code object.
1097 1098
      CASE_STATEMENT(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
      CASE_BODY(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
1099 1100
      // Deserialize a new code object and write a pointer to its first
      // instruction to the current code object.
1101
      ALL_SPACES(kNewObject, kFromCode, kInnerPointer)
1102 1103 1104
      // Find a recently deserialized object using its offset from the current
      // allocation point and write a pointer to it to the current object.
      ALL_SPACES(kBackref, kPlain, kStartOfObject)
1105
      ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
1106 1107
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
    defined(V8_TARGET_ARCH_PPC) || V8_OOL_CONSTANT_POOL
1108
      // Deserialize a new object from pointer found in code and write
1109 1110 1111
      // a pointer to it to the current object. Required only for MIPS, PPC or
      // ARM with ool constant pool, and omitted on the other architectures
      // because it is fully unrolled and would cause bloat.
1112
      ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
1113 1114
      // Find a recently deserialized code object using its offset from the
      // current allocation point and write a pointer to it to the current
1115
      // object. Required only for MIPS, PPC or ARM with ool constant pool.
1116
      ALL_SPACES(kBackref, kFromCode, kStartOfObject)
1117
      ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
1118
#endif
1119 1120
      // Find a recently deserialized code object using its offset from the
      // current allocation point and write a pointer to its first instruction
1121 1122
      // to the current code object or the instruction pointer in a function
      // object.
1123
      ALL_SPACES(kBackref, kFromCode, kInnerPointer)
1124
      ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer)
1125
      ALL_SPACES(kBackref, kPlain, kInnerPointer)
1126
      ALL_SPACES(kBackrefWithSkip, kPlain, kInnerPointer)
1127 1128 1129
      // Find an object in the roots array and write a pointer to it to the
      // current object.
      CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
1130
      CASE_BODY(kRootArray, kPlain, kStartOfObject, 0)
1131
#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \
1132
    defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC)
1133 1134 1135 1136
      // Find an object in the roots array and write a pointer to it to in code.
      CASE_STATEMENT(kRootArray, kFromCode, kStartOfObject, 0)
      CASE_BODY(kRootArray, kFromCode, kStartOfObject, 0)
#endif
1137 1138 1139 1140 1141 1142
      // Find an object in the partial snapshots cache and write a pointer to it
      // to the current object.
      CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
      CASE_BODY(kPartialSnapshotCache,
                kPlain,
                kStartOfObject,
1143
                0)
1144 1145
      // Find an code entry in the partial snapshots cache and
      // write a pointer to it to the current object.
1146
      CASE_STATEMENT(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
1147 1148
      CASE_BODY(kPartialSnapshotCache,
                kPlain,
1149
                kInnerPointer,
1150
                0)
1151 1152 1153 1154 1155 1156
      // Find an external reference and write a pointer to it to the current
      // object.
      CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0)
      CASE_BODY(kExternalReference,
                kPlain,
                kStartOfObject,
1157
                0)
1158 1159 1160 1161 1162 1163
      // Find an external reference and write a pointer to it in the current
      // code object.
      CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0)
      CASE_BODY(kExternalReference,
                kFromCode,
                kStartOfObject,
1164
                0)
1165 1166 1167
      // Find a builtin and write a pointer to it to the current object.
      CASE_STATEMENT(kBuiltin, kPlain, kStartOfObject, 0)
      CASE_BODY(kBuiltin, kPlain, kStartOfObject, 0)
1168 1169
      CASE_STATEMENT(kBuiltin, kPlain, kInnerPointer, 0)
      CASE_BODY(kBuiltin, kPlain, kInnerPointer, 0)
1170 1171
      CASE_STATEMENT(kBuiltin, kFromCode, kInnerPointer, 0)
      CASE_BODY(kBuiltin, kFromCode, kInnerPointer, 0)
1172 1173 1174 1175
      // Find an object in the attached references and write a pointer to it to
      // the current object.
      CASE_STATEMENT(kAttachedReference, kPlain, kStartOfObject, 0)
      CASE_BODY(kAttachedReference, kPlain, kStartOfObject, 0)
1176 1177 1178 1179
      CASE_STATEMENT(kAttachedReference, kPlain, kInnerPointer, 0)
      CASE_BODY(kAttachedReference, kPlain, kInnerPointer, 0)
      CASE_STATEMENT(kAttachedReference, kFromCode, kInnerPointer, 0)
      CASE_BODY(kAttachedReference, kFromCode, kInnerPointer, 0)
1180 1181 1182 1183 1184

#undef CASE_STATEMENT
#undef CASE_BODY
#undef ALL_SPACES

1185
      case kSkip: {
1186
        int size = source_.GetInt();
1187 1188
        current = reinterpret_cast<Object**>(
            reinterpret_cast<intptr_t>(current) + size);
1189 1190 1191
        break;
      }

1192
      case kNativesStringResource: {
1193
        DCHECK(!isolate_->heap()->deserialization_complete());
1194
        int index = source_.Get();
1195
        Vector<const char> source_vector = Natives::GetScriptSource(index);
1196
        NativesExternalStringResource* resource =
1197
            new NativesExternalStringResource(source_vector.start(),
1198
                                              source_vector.length());
1199 1200
        Object* resource_obj = reinterpret_cast<Object*>(resource);
        UnalignedCopy(current++, &resource_obj);
1201 1202
        break;
      }
1203

1204
      case kNextChunk: {
1205
        int space = source_.Get();
1206 1207 1208 1209 1210 1211 1212
        DCHECK(space < kNumberOfPreallocatedSpaces);
        int chunk_index = current_chunk_[space];
        const Heap::Reservation& reservation = reservations_[space];
        // Make sure the current chunk is indeed exhausted.
        CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
        // Move to next reserved chunk.
        chunk_index = ++current_chunk_[space];
1213
        CHECK_LT(chunk_index, reservation.length());
1214 1215 1216 1217
        high_water_[space] = reservation[chunk_index].start;
        break;
      }

1218 1219
      FOUR_CASES(kHotObjectWithSkip)
      FOUR_CASES(kHotObjectWithSkip + 4) {
1220
        int skip = source_.GetInt();
1221 1222 1223 1224 1225 1226 1227
        current = reinterpret_cast<Object**>(
            reinterpret_cast<Address>(current) + skip);
        // Fall through.
      }
      FOUR_CASES(kHotObject)
      FOUR_CASES(kHotObject + 4) {
        int index = data & kHotObjectIndexMask;
1228 1229 1230
        Object* hot_object = hot_objects_.Get(index);
        UnalignedCopy(current, &hot_object);
        if (write_barrier_needed && isolate->heap()->InNewSpace(hot_object)) {
1231 1232 1233 1234 1235 1236 1237 1238 1239
          Address current_address = reinterpret_cast<Address>(current);
          isolate->heap()->RecordWrite(
              current_object_address,
              static_cast<int>(current_address - current_object_address));
        }
        current++;
        break;
      }

1240
      case kSynchronize: {
1241 1242
        // If we get here then that indicates that you have a mismatch between
        // the number of GC roots when serializing and deserializing.
1243
        CHECK(false);
1244
      }
1245

1246
      default:
1247
        CHECK(false);
1248 1249
    }
  }
1250
  CHECK_EQ(limit, current);
1251 1252 1253
}


1254 1255 1256 1257
Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
    : isolate_(isolate),
      sink_(sink),
      external_reference_encoder_(new ExternalReferenceEncoder(isolate)),
1258
      root_index_map_(isolate),
1259
      code_address_map_(NULL),
1260
      large_objects_total_size_(0),
1261
      seen_large_objects_index_(0) {
1262 1263
  // The serializer is meant to be used only to generate initial heap images
  // from a context in which there is only one isolate.
1264 1265 1266 1267 1268
  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
    pending_chunk_[i] = 0;
    max_chunk_size_[i] = static_cast<uint32_t>(
        MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(i)));
  }
1269 1270 1271
}


1272 1273
Serializer::~Serializer() {
  delete external_reference_encoder_;
1274
  if (code_address_map_ != NULL) delete code_address_map_;
1275 1276 1277
}


1278
void StartupSerializer::SerializeStrongReferences() {
1279
  Isolate* isolate = this->isolate();
1280
  // No active threads.
1281
  CHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
1282
  // No active or weak handles.
1283 1284
  CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
  CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
1285
  CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
1286
  // We don't support serializing installed extensions.
1287
  CHECK(!isolate->has_installed_extensions());
1288
  isolate->heap()->IterateSmiRoots(this);
1289
  isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
1290 1291 1292
}


1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
void StartupSerializer::VisitPointers(Object** start, Object** end) {
  for (Object** current = start; current < end; current++) {
    if (start == isolate()->heap()->roots_array_start()) {
      root_index_wave_front_ =
          Max(root_index_wave_front_, static_cast<intptr_t>(current - start));
    }
    if (ShouldBeSkipped(current)) {
      sink_->Put(kSkip, "Skip");
      sink_->PutInt(kPointerSize, "SkipOneWord");
    } else if ((*current)->IsSmi()) {
1303
      sink_->Put(kOnePointerRawData, "Smi");
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
      for (int i = 0; i < kPointerSize; i++) {
        sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
      }
    } else {
      SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0);
    }
  }
}


1314
void PartialSerializer::Serialize(Object** o) {
1315 1316 1317
  if ((*o)->IsContext()) {
    Context* context = Context::cast(*o);
    global_object_ = context->global_object();
1318
    back_reference_map()->AddGlobalProxy(context->global_proxy());
1319
  }
1320 1321
  VisitPointer(o);
  SerializeOutdatedContextsAsFixedArray();
1322
  Pad();
1323 1324 1325
}


1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345
void PartialSerializer::SerializeOutdatedContextsAsFixedArray() {
  int length = outdated_contexts_.length();
  if (length == 0) {
    FixedArray* empty = isolate_->heap()->empty_fixed_array();
    SerializeObject(empty, kPlain, kStartOfObject, 0);
  } else {
    // Serialize an imaginary fixed array containing outdated contexts.
    int size = FixedArray::SizeFor(length);
    Allocate(NEW_SPACE, size);
    sink_->Put(kNewObject + NEW_SPACE, "emulated FixedArray");
    sink_->PutInt(size >> kObjectAlignmentBits, "FixedArray size in words");
    Map* map = isolate_->heap()->fixed_array_map();
    SerializeObject(map, kPlain, kStartOfObject, 0);
    Smi* length_smi = Smi::FromInt(length);
    sink_->Put(kOnePointerRawData, "Smi");
    for (int i = 0; i < kPointerSize; i++) {
      sink_->Put(reinterpret_cast<byte*>(&length_smi)[i], "Byte");
    }
    for (int i = 0; i < length; i++) {
      BackReference back_ref = outdated_contexts_[i];
1346
      DCHECK(BackReferenceIsAlreadyAllocated(back_ref));
1347 1348 1349 1350 1351 1352 1353
      sink_->Put(kBackref + back_ref.space(), "BackRef");
      sink_->PutInt(back_ref.reference(), "BackRefValue");
    }
  }
}


1354 1355 1356 1357 1358 1359 1360 1361
bool Serializer::ShouldBeSkipped(Object** current) {
  Object** roots = isolate()->heap()->roots_array_start();
  return current == &roots[Heap::kStoreBufferTopRootIndex]
      || current == &roots[Heap::kStackLimitRootIndex]
      || current == &roots[Heap::kRealStackLimitRootIndex];
}


1362
void Serializer::VisitPointers(Object** start, Object** end) {
1363
  for (Object** current = start; current < end; current++) {
1364
    if ((*current)->IsSmi()) {
1365
      sink_->Put(kOnePointerRawData, "Smi");
1366 1367 1368 1369
      for (int i = 0; i < kPointerSize; i++) {
        sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
      }
    } else {
1370
      SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0);
1371
    }
1372 1373 1374 1375
  }
}


1376 1377
void Serializer::EncodeReservations(
    List<SerializedData::Reservation>* out) const {
1378
  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
1379 1380 1381 1382
    for (int j = 0; j < completed_chunks_[i].length(); j++) {
      out->Add(SerializedData::Reservation(completed_chunks_[i][j]));
    }

1383
    if (pending_chunk_[i] > 0 || completed_chunks_[i].length() == 0) {
1384
      out->Add(SerializedData::Reservation(pending_chunk_[i]));
1385
    }
1386
    out->last().mark_as_last();
1387
  }
1388 1389 1390

  out->Add(SerializedData::Reservation(large_objects_total_size_));
  out->last().mark_as_last();
1391 1392 1393
}


1394 1395
// This ensures that the partial snapshot cache keeps things alive during GC and
// tracks their movement.  When it is called during serialization of the startup
1396 1397 1398 1399 1400 1401
// snapshot nothing happens.  When the partial (context) snapshot is created,
// this array is populated with the pointers that the partial snapshot will
// need. As that happens we emit serialized objects to the startup snapshot
// that correspond to the elements of this cache array.  On deserialization we
// therefore need to visit the cache array.  This fills it up with pointers to
// deserialized objects.
1402 1403
void SerializerDeserializer::Iterate(Isolate* isolate,
                                     ObjectVisitor* visitor) {
1404
  if (isolate->serializer_enabled()) return;
1405 1406 1407 1408 1409
  List<Object*>* cache = isolate->partial_snapshot_cache();
  for (int i = 0;; ++i) {
    // Extend the array ready to get a value when deserializing.
    if (cache->length() <= i) cache->Add(Smi::FromInt(0));
    visitor->VisitPointer(&cache->at(i));
1410 1411
    // Sentinel is the undefined object, which is a root so it will not normally
    // be found in the cache.
1412
    if (cache->at(i)->IsUndefined()) break;
1413
  }
1414 1415 1416 1417
}


int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
1418
  Isolate* isolate = this->isolate();
1419
  List<Object*>* cache = isolate->partial_snapshot_cache();
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
  int new_index = cache->length();

  int index = partial_cache_index_map_.LookupOrInsert(heap_object, new_index);
  if (index == PartialCacheIndexMap::kInvalidIndex) {
    // We didn't find the object in the cache.  So we add it to the cache and
    // then visit the pointer so that it becomes part of the startup snapshot
    // and we can refer to it from the partial snapshot.
    cache->Add(heap_object);
    startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object));
    // We don't recurse from the startup snapshot generator into the partial
    // snapshot generator.
    return new_index;
1432
  }
1433
  return index;
1434 1435 1436
}


1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
#ifdef DEBUG
bool Serializer::BackReferenceIsAlreadyAllocated(BackReference reference) {
  DCHECK(reference.is_valid());
  DCHECK(!reference.is_source());
  DCHECK(!reference.is_global_proxy());
  AllocationSpace space = reference.space();
  int chunk_index = reference.chunk_index();
  if (space == LO_SPACE) {
    return chunk_index == 0 &&
           reference.large_object_index() < seen_large_objects_index_;
  } else if (chunk_index == completed_chunks_[space].length()) {
    return reference.chunk_offset() < pending_chunk_[space];
  } else {
    return chunk_index < completed_chunks_[space].length() &&
           reference.chunk_offset() < completed_chunks_[space][chunk_index];
  }
}
#endif  // DEBUG


1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476
bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
                                      WhereToPoint where_to_point, int skip) {
  if (how_to_code == kPlain && where_to_point == kStartOfObject) {
    // Encode a reference to a hot object by its index in the working set.
    int index = hot_objects_.Find(obj);
    if (index != HotObjectsList::kNotFound) {
      DCHECK(index >= 0 && index <= kMaxHotObjectIndex);
      if (FLAG_trace_serializer) {
        PrintF(" Encoding hot object %d:", index);
        obj->ShortPrint();
        PrintF("\n");
      }
      if (skip != 0) {
        sink_->Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
        sink_->PutInt(skip, "HotObjectSkipDistance");
      } else {
        sink_->Put(kHotObject + index, "HotObject");
      }
      return true;
    }
1477
  }
1478 1479 1480 1481 1482 1483 1484 1485 1486 1487
  BackReference back_reference = back_reference_map_.Lookup(obj);
  if (back_reference.is_valid()) {
    // Encode the location of an already deserialized object in order to write
    // its location into a later object.  We can encode the location as an
    // offset fromthe start of the deserialized objects or as an offset
    // backwards from thecurrent allocation pointer.
    if (back_reference.is_source()) {
      FlushSkip(skip);
      if (FLAG_trace_serializer) PrintF(" Encoding source object\n");
      DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
1488 1489 1490 1491 1492 1493 1494 1495
      sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Source");
      sink_->PutInt(kSourceObjectReference, "kSourceObjectReference");
    } else if (back_reference.is_global_proxy()) {
      FlushSkip(skip);
      if (FLAG_trace_serializer) PrintF(" Encoding global proxy\n");
      DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
      sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Global Proxy");
      sink_->PutInt(kGlobalProxyReference, "kGlobalProxyReference");
1496 1497 1498 1499 1500 1501
    } else {
      if (FLAG_trace_serializer) {
        PrintF(" Encoding back reference to: ");
        obj->ShortPrint();
        PrintF("\n");
      }
1502

1503 1504 1505 1506 1507 1508 1509 1510
      AllocationSpace space = back_reference.space();
      if (skip == 0) {
        sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRef");
      } else {
        sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space,
                   "BackRefWithSkip");
        sink_->PutInt(skip, "BackRefSkipDistance");
      }
1511
      DCHECK(BackReferenceIsAlreadyAllocated(back_reference));
1512 1513 1514 1515 1516 1517 1518
      sink_->PutInt(back_reference.reference(), "BackRefValue");

      hot_objects_.Add(obj);
    }
    return true;
  }
  return false;
1519 1520 1521
}


1522 1523 1524
void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
                                        WhereToPoint where_to_point, int skip) {
  DCHECK(!obj->IsJSFunction());
1525

1526 1527 1528 1529 1530 1531
  int root_index = root_index_map_.Lookup(obj);
  // We can only encode roots as such if it has already been serialized.
  // That applies to root indices below the wave front.
  if (root_index != RootIndexMap::kInvalidRootIndex &&
      root_index < root_index_wave_front_) {
    PutRoot(root_index, obj, how_to_code, where_to_point, skip);
1532 1533 1534
    return;
  }

1535 1536 1537 1538
  if (obj->IsCode() && Code::cast(obj)->kind() == Code::FUNCTION) {
    obj = isolate()->builtins()->builtin(Builtins::kCompileLazy);
  }

1539
  if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
1540

1541
  FlushSkip(skip);
1542 1543 1544 1545 1546

  // Object has not yet been serialized.  Serialize it here.
  ObjectSerializer object_serializer(this, obj, sink_, how_to_code,
                                     where_to_point);
  object_serializer.Serialize();
1547 1548 1549 1550
}


void StartupSerializer::SerializeWeakReferences() {
1551
  // This phase comes right after the serialization (of the snapshot).
1552 1553 1554 1555
  // After we have done the partial serialization the partial snapshot cache
  // will contain some references needed to decode the partial snapshot.  We
  // add one entry with 'undefined' which is the sentinel that the deserializer
  // uses to know it is done deserializing the array.
1556
  Object* undefined = isolate()->heap()->undefined_value();
1557
  VisitPointer(&undefined);
1558
  isolate()->heap()->IterateWeakRoots(this, VISIT_ALL);
1559
  Pad();
1560 1561 1562
}


1563 1564 1565
void Serializer::PutRoot(int root_index,
                         HeapObject* object,
                         SerializerDeserializer::HowToCode how_to_code,
1566 1567
                         SerializerDeserializer::WhereToPoint where_to_point,
                         int skip) {
1568 1569 1570 1571 1572 1573
  if (FLAG_trace_serializer) {
    PrintF(" Encoding root %d:", root_index);
    object->ShortPrint();
    PrintF("\n");
  }

1574 1575 1576
  if (how_to_code == kPlain &&
      where_to_point == kStartOfObject &&
      root_index < kRootArrayNumberOfConstantEncodings &&
1577
      !isolate()->heap()->InNewSpace(object)) {
1578 1579 1580
    if (skip == 0) {
      sink_->Put(kRootArrayConstants + kNoSkipDistance + root_index,
                 "RootConstant");
1581
    } else {
1582 1583 1584
      sink_->Put(kRootArrayConstants + kHasSkipDistance + root_index,
                 "RootConstant");
      sink_->PutInt(skip, "SkipInPutRoot");
1585 1586
    }
  } else {
1587
    FlushSkip(skip);
1588 1589 1590 1591 1592 1593
    sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
    sink_->PutInt(root_index, "root_index");
  }
}


1594 1595 1596
void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
                                        WhereToPoint where_to_point, int skip) {
  if (obj->IsMap()) {
1597 1598
    // The code-caches link to context-specific code objects, which
    // the startup and context serializes cannot currently handle.
1599
    DCHECK(Map::cast(obj)->code_cache() == obj->GetHeap()->empty_fixed_array());
1600 1601
  }

1602 1603 1604
  // Replace typed arrays by undefined.
  if (obj->IsJSTypedArray()) obj = isolate_->heap()->undefined_value();

1605 1606 1607
  int root_index = root_index_map_.Lookup(obj);
  if (root_index != RootIndexMap::kInvalidRootIndex) {
    PutRoot(root_index, obj, how_to_code, where_to_point, skip);
1608 1609 1610
    return;
  }

1611
  if (ShouldBeInThePartialSnapshotCache(obj)) {
1612
    FlushSkip(skip);
1613

1614
    int cache_index = PartialSnapshotCacheIndex(obj);
1615 1616
    sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point,
               "PartialSnapshotCache");
1617 1618 1619 1620 1621 1622 1623
    sink_->PutInt(cache_index, "partial_snapshot_cache_index");
    return;
  }

  // Pointers from the partial snapshot to the objects in the startup snapshot
  // should go through the root array or through the partial snapshot cache.
  // If this is not the case you may have to add something to the root array.
1624
  DCHECK(!startup_serializer_->back_reference_map()->Lookup(obj).is_valid());
1625 1626
  // All the internalized strings that the partial snapshot needs should be
  // either in the root table or in the partial snapshot cache.
1627
  DCHECK(!obj->IsInternalizedString());
1628

1629 1630 1631
  if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;

  FlushSkip(skip);
1632 1633 1634 1635

  // Object has not yet been serialized.  Serialize it here.
  ObjectSerializer serializer(this, obj, sink_, how_to_code, where_to_point);
  serializer.Serialize();
1636 1637 1638 1639 1640 1641 1642 1643 1644

  if (obj->IsContext() &&
      Context::cast(obj)->global_object() == global_object_) {
    // Context refers to the current global object. This reference will
    // become outdated after deserialization.
    BackReference back_reference = back_reference_map_.Lookup(obj);
    DCHECK(back_reference.is_valid());
    outdated_contexts_.Add(back_reference);
  }
1645 1646 1647
}


1648 1649
void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
                                                     int size, Map* map) {
1650 1651 1652 1653 1654 1655 1656 1657
  if (serializer_->code_address_map_) {
    const char* code_name =
        serializer_->code_address_map_->Lookup(object_->address());
    LOG(serializer_->isolate_,
        CodeNameEvent(object_->address(), sink_->Position(), code_name));
    LOG(serializer_->isolate_,
        SnapshotPositionEvent(object_->address(), sink_->Position()));
  }
1658

1659
  BackReference back_reference;
1660
  if (space == LO_SPACE) {
1661
    sink_->Put(kNewObject + reference_representation_ + space,
1662 1663
               "NewLargeObject");
    sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
1664
    if (object_->IsCode()) {
1665
      sink_->Put(EXECUTABLE, "executable large object");
1666
    } else {
1667
      sink_->Put(NOT_EXECUTABLE, "not executable large object");
1668
    }
1669
    back_reference = serializer_->AllocateLargeObject(size);
1670
  } else {
1671
    bool needs_double_align = false;
1672 1673 1674
    if (object_->NeedsToEnsureDoubleAlignment()) {
      // Add wriggle room for double alignment padding.
      back_reference = serializer_->Allocate(space, size + kPointerSize);
1675
      needs_double_align = true;
1676 1677 1678
    } else {
      back_reference = serializer_->Allocate(space, size);
    }
1679
    sink_->Put(kNewObject + reference_representation_ + space, "NewObject");
1680 1681
    if (needs_double_align)
      sink_->PutInt(kDoubleAlignmentSentinel, "DoubleAlignSentinel");
1682 1683
    int encoded_size = size >> kObjectAlignmentBits;
    DCHECK_NE(kDoubleAlignmentSentinel, encoded_size);
1684
    sink_->PutInt(encoded_size, "ObjectSizeInWords");
1685
  }
1686 1687

  // Mark this object as already serialized.
1688
  serializer_->back_reference_map()->Add(object_, back_reference);
1689 1690

  // Serialize the map (first word of the object).
1691 1692 1693 1694 1695 1696 1697 1698
  serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
}


void Serializer::ObjectSerializer::SerializeExternalString() {
  // Instead of serializing this as an external string, we serialize
  // an imaginary sequential string with the same content.
  Isolate* isolate = serializer_->isolate();
1699 1700
  DCHECK(object_->IsExternalString());
  DCHECK(object_->map() != isolate->heap()->native_source_string_map());
1701 1702 1703
  ExternalString* string = ExternalString::cast(object_);
  int length = string->length();
  Map* map;
1704 1705 1706
  int content_size;
  int allocation_size;
  const byte* resource;
1707
  // Find the map and size for the imaginary sequential string.
1708
  bool internalized = object_->IsInternalizedString();
1709
  if (object_->IsExternalOneByteString()) {
1710 1711
    map = internalized ? isolate->heap()->one_byte_internalized_string_map()
                       : isolate->heap()->one_byte_string_map();
1712 1713 1714 1715
    allocation_size = SeqOneByteString::SizeFor(length);
    content_size = length * kCharSize;
    resource = reinterpret_cast<const byte*>(
        ExternalOneByteString::cast(string)->resource()->data());
1716
  } else {
1717 1718
    map = internalized ? isolate->heap()->internalized_string_map()
                       : isolate->heap()->string_map();
1719 1720 1721
    allocation_size = SeqTwoByteString::SizeFor(length);
    content_size = length * kShortSize;
    resource = reinterpret_cast<const byte*>(
1722 1723 1724
        ExternalTwoByteString::cast(string)->resource()->data());
  }

1725 1726 1727 1728
  AllocationSpace space = (allocation_size > Page::kMaxRegularHeapObjectSize)
                              ? LO_SPACE
                              : OLD_DATA_SPACE;
  SerializePrologue(space, allocation_size, map);
1729 1730

  // Output the rest of the imaginary string.
1731
  int bytes_to_output = allocation_size - HeapObject::kHeaderSize;
1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743

  // Output raw data header. Do not bother with common raw length cases here.
  sink_->Put(kRawData, "RawDataForString");
  sink_->PutInt(bytes_to_output, "length");

  // Serialize string header (except for map).
  Address string_start = string->address();
  for (int i = HeapObject::kHeaderSize; i < SeqString::kHeaderSize; i++) {
    sink_->PutSection(string_start[i], "StringHeader");
  }

  // Serialize string content.
1744
  sink_->PutRaw(resource, content_size, "StringContent");
1745 1746 1747 1748 1749 1750

  // Since the allocation size is rounded up to object alignment, there
  // maybe left-over bytes that need to be padded.
  int padding_size = allocation_size - SeqString::kHeaderSize - content_size;
  DCHECK(0 <= padding_size && padding_size < kObjectAlignment);
  for (int i = 0; i < padding_size; i++) sink_->PutSection(0, "StringPadding");
1751 1752 1753 1754 1755

  sink_->Put(kSkip, "SkipAfterString");
  sink_->PutInt(bytes_to_output, "SkipDistance");
}

1756

1757
void Serializer::ObjectSerializer::Serialize() {
1758 1759 1760 1761 1762 1763
  if (FLAG_trace_serializer) {
    PrintF(" Encoding heap object: ");
    object_->ShortPrint();
    PrintF("\n");
  }

1764 1765 1766
  // We cannot serialize typed array objects correctly.
  DCHECK(!object_->IsJSTypedArray());

1767 1768 1769 1770 1771 1772
  if (object_->IsScript()) {
    // Clear cached line ends.
    Object* undefined = serializer_->isolate()->heap()->undefined_value();
    Script::cast(object_)->set_line_ends(undefined);
  }

1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785
  if (object_->IsExternalString()) {
    Heap* heap = serializer_->isolate()->heap();
    if (object_->map() != heap->native_source_string_map()) {
      // Usually we cannot recreate resources for external strings. To work
      // around this, external strings are serialized to look like ordinary
      // sequential strings.
      // The exception are native source code strings, since we can recreate
      // their resources. In that case we fall through and leave it to
      // VisitExternalOneByteString further down.
      SerializeExternalString();
      return;
    }
  }
1786

1787 1788
  int size = object_->Size();
  Map* map = object_->map();
1789 1790 1791
  AllocationSpace space =
      MemoryChunk::FromAddress(object_->address())->owner()->identity();
  SerializePrologue(space, size, map);
1792

1793 1794 1795 1796 1797 1798
  // Serialize the rest of the object.
  CHECK_EQ(0, bytes_processed_so_far_);
  bytes_processed_so_far_ = kPointerSize;

  object_->IterateBody(map->instance_type(), size, this);
  OutputRawData(object_->address() + size);
1799 1800 1801
}


1802 1803
void Serializer::ObjectSerializer::VisitPointers(Object** start,
                                                 Object** end) {
1804 1805 1806
  Object** current = start;
  while (current < end) {
    while (current < end && (*current)->IsSmi()) current++;
1807
    if (current < end) OutputRawData(reinterpret_cast<Address>(current));
1808 1809

    while (current < end && !(*current)->IsSmi()) {
1810
      HeapObject* current_contents = HeapObject::cast(*current);
1811
      int root_index = serializer_->root_index_map()->Lookup(current_contents);
1812 1813
      // Repeats are not subject to the write barrier so we can only use
      // immortal immovable root members. They are never in new space.
1814
      if (current != start && root_index != RootIndexMap::kInvalidRootIndex &&
1815
          Heap::RootIsImmortalImmovable(root_index) &&
1816
          current_contents == current[-1]) {
1817
        DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents));
1818
        int repeat_count = 1;
1819 1820
        while (&current[repeat_count] < end - 1 &&
               current[repeat_count] == current_contents) {
1821 1822 1823 1824
          repeat_count++;
        }
        current += repeat_count;
        bytes_processed_so_far_ += repeat_count * kPointerSize;
1825 1826
        if (repeat_count > kMaxFixedRepeats) {
          sink_->Put(kVariableRepeat, "SerializeRepeats");
1827 1828 1829 1830 1831
          sink_->PutInt(repeat_count, "SerializeRepeats");
        } else {
          sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats");
        }
      } else {
1832 1833
        serializer_->SerializeObject(
                current_contents, kPlain, kStartOfObject, 0);
1834 1835 1836
        bytes_processed_so_far_ += kPointerSize;
        current++;
      }
1837
    }
1838 1839 1840 1841
  }
}


1842
void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
1843 1844 1845
  // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
  if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;

1846 1847
  int skip = OutputRawData(rinfo->target_address_address(),
                           kCanReturnSkipInsteadOfSkipping);
1848 1849
  HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
  Object* object = rinfo->target_object();
1850 1851
  serializer_->SerializeObject(HeapObject::cast(object), how_to_code,
                               kStartOfObject, skip);
1852 1853 1854 1855
  bytes_processed_so_far_ += rinfo->target_address_size();
}


1856
void Serializer::ObjectSerializer::VisitExternalReference(Address* p) {
1857 1858
  int skip = OutputRawData(reinterpret_cast<Address>(p),
                           kCanReturnSkipInsteadOfSkipping);
1859 1860
  sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
  sink_->PutInt(skip, "SkipB4ExternalRef");
1861 1862
  Address target = *p;
  sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
1863
  bytes_processed_so_far_ += kPointerSize;
1864 1865 1866
}


1867
void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
1868 1869 1870 1871
  int skip = OutputRawData(rinfo->target_address_address(),
                           kCanReturnSkipInsteadOfSkipping);
  HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
  sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
1872
  sink_->PutInt(skip, "SkipB4ExternalRef");
1873
  Address target = rinfo->target_external_reference();
1874
  sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
1875 1876 1877 1878
  bytes_processed_so_far_ += rinfo->target_address_size();
}


1879
void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
1880 1881 1882 1883
  int skip = OutputRawData(rinfo->target_address_address(),
                           kCanReturnSkipInsteadOfSkipping);
  HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
  sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
1884
  sink_->PutInt(skip, "SkipB4ExternalRef");
1885 1886
  Address target = rinfo->target_address();
  sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
1887
  bytes_processed_so_far_ += rinfo->target_address_size();
1888 1889 1890
}


1891
void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
1892 1893 1894
  // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
  if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;

1895 1896 1897 1898
  int skip = OutputRawData(rinfo->target_address_address(),
                           kCanReturnSkipInsteadOfSkipping);
  Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
  serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
1899
  bytes_processed_so_far_ += rinfo->target_address_size();
1900 1901 1902
}


1903
void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
1904
  int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
1905 1906
  Code* object = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
  serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
1907 1908 1909 1910
  bytes_processed_so_far_ += kPointerSize;
}


1911
void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
1912 1913 1914
  // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
  if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;

1915
  int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
1916 1917
  Cell* object = Cell::cast(rinfo->target_cell());
  serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
1918
  bytes_processed_so_far_ += kPointerSize;
1919 1920 1921
}


1922 1923
void Serializer::ObjectSerializer::VisitExternalOneByteString(
    v8::String::ExternalOneByteStringResource** resource_pointer) {
1924 1925 1926
  Address references_start = reinterpret_cast<Address>(resource_pointer);
  OutputRawData(references_start);
  for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
1927 1928
    Object* source =
        serializer_->isolate()->heap()->natives_source_cache()->get(i);
1929
    if (!source->IsUndefined()) {
1930 1931
      ExternalOneByteString* string = ExternalOneByteString::cast(source);
      typedef v8::String::ExternalOneByteStringResource Resource;
1932
      const Resource* resource = string->resource();
1933
      if (resource == *resource_pointer) {
1934
        sink_->Put(kNativesStringResource, "NativesStringResource");
1935 1936 1937 1938 1939 1940 1941
        sink_->PutSection(i, "NativesStringResourceEnd");
        bytes_processed_so_far_ += sizeof(resource);
        return;
      }
    }
  }
  // One of the strings in the natives cache should match the resource.  We
1942
  // don't expect any other kinds of external strings here.
1943 1944 1945 1946
  UNREACHABLE();
}


1947 1948 1949
Address Serializer::ObjectSerializer::PrepareCode() {
  // To make snapshots reproducible, we make a copy of the code object
  // and wipe all pointers in the copy, which we then serialize.
1950 1951
  Code* original = Code::cast(object_);
  Code* code = serializer_->CopyCode(original);
1952 1953
  // Code age headers are not serializable.
  code->MakeYoung(serializer_->isolate());
1954 1955 1956 1957 1958 1959
  Address entry = original->entry();
  int mode_mask = RelocInfo::kCodeTargetMask |
                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
                  RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE);
1960
  for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
1961 1962 1963 1964 1965 1966 1967 1968 1969
    RelocInfo* rinfo = it.rinfo();
    if (RelocInfo::IsInternalReference(rinfo->rmode())) {
      // Convert internal references to relative offsets.
      Address target = rinfo->target_internal_reference();
      intptr_t offset = target - entry;
      DCHECK(0 <= offset && offset <= original->instruction_size());
      rinfo->set_target_internal_reference(reinterpret_cast<Address>(offset));
    } else if (!(FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool())) {
      rinfo->WipeOut();
1970
    }
1971
  }
1972 1973 1974 1975
  // We need to wipe out the header fields *after* wiping out the
  // relocations, because some of these fields are needed for the latter.
  code->WipeOutHeader();
  return code->address();
1976 1977 1978
}


1979 1980
int Serializer::ObjectSerializer::OutputRawData(
    Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) {
1981
  Address object_start = object_->address();
1982
  int base = bytes_processed_so_far_;
1983
  int up_to_offset = static_cast<int>(up_to - object_start);
1984 1985
  int to_skip = up_to_offset - bytes_processed_so_far_;
  int bytes_to_output = to_skip;
1986
  bytes_processed_so_far_ += to_skip;
1987 1988
  // This assert will fail if the reloc info gives us the target_address_address
  // locations in a non-ascending order.  Luckily that doesn't happen.
1989
  DCHECK(to_skip >= 0);
1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001
  bool outputting_code = false;
  if (to_skip != 0 && code_object_ && !code_has_been_output_) {
    // Output the code all at once and fix later.
    bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_;
    outputting_code = true;
    code_has_been_output_ = true;
  }
  if (bytes_to_output != 0 &&
      (!code_object_ || outputting_code)) {
#define RAW_CASE(index)                                                        \
    if (!outputting_code && bytes_to_output == index * kPointerSize &&         \
        index * kPointerSize == to_skip) {                                     \
2002
      sink_->PutSection(kRawData + index, "RawDataFixed");                     \
2003
      to_skip = 0;  /* This insn already skips. */                             \
2004 2005 2006 2007
    } else  /* NOLINT */
    COMMON_RAW_LENGTHS(RAW_CASE)
#undef RAW_CASE
    {  /* NOLINT */
2008
      // We always end up here if we are outputting the code of a code object.
2009
      sink_->Put(kRawData, "RawData");
2010
      sink_->PutInt(bytes_to_output, "length");
2011
    }
2012

2013
    if (code_object_) object_start = PrepareCode();
2014 2015

    const char* description = code_object_ ? "Code" : "Byte";
2016 2017 2018 2019
#ifdef MEMORY_SANITIZER
    // Object sizes are usually rounded up with uninitialized padding space.
    MSAN_MEMORY_IS_INITIALIZED(object_start + base, bytes_to_output);
#endif  // MEMORY_SANITIZER
2020
    sink_->PutRaw(object_start + base, bytes_to_output, description);
2021
  }
2022 2023 2024 2025
  if (to_skip != 0 && return_skip == kIgnoringReturn) {
    sink_->Put(kSkip, "Skip");
    sink_->PutInt(to_skip, "SkipDistance");
    to_skip = 0;
2026
  }
2027
  return to_skip;
2028 2029 2030
}


2031
BackReference Serializer::AllocateLargeObject(int size) {
2032 2033
  // Large objects are allocated one-by-one when deserializing. We do not
  // have to keep track of multiple chunks.
2034
  large_objects_total_size_ += size;
2035
  return BackReference::LargeObjectReference(seen_large_objects_index_++);
2036 2037 2038
}


2039
BackReference Serializer::Allocate(AllocationSpace space, int size) {
2040
  DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
2041
  DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space)));
2042
  uint32_t new_chunk_size = pending_chunk_[space] + size;
2043
  if (new_chunk_size > max_chunk_size(space)) {
2044 2045
    // The new chunk size would not fit onto a single page. Complete the
    // current chunk and start a new one.
2046 2047
    sink_->Put(kNextChunk, "NextChunk");
    sink_->Put(space, "NextChunkSpace");
2048
    completed_chunks_[space].Add(pending_chunk_[space]);
2049
    DCHECK_LE(completed_chunks_[space].length(), BackReference::kMaxChunkIndex);
2050 2051 2052
    pending_chunk_[space] = 0;
    new_chunk_size = size;
  }
2053
  uint32_t offset = pending_chunk_[space];
2054
  pending_chunk_[space] = new_chunk_size;
2055 2056
  return BackReference::Reference(space, completed_chunks_[space].length(),
                                  offset);
2057 2058 2059
}


2060 2061 2062
void Serializer::Pad() {
  // The non-branching GetInt will read up to 3 bytes too far, so we need
  // to pad the snapshot to make sure we don't read over the end.
2063 2064 2065
  for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
    sink_->Put(kNop, "Padding");
  }
2066 2067 2068 2069
  // Pad up to pointer size for checksum.
  while (!IsAligned(sink_->Position(), kPointerAlignment)) {
    sink_->Put(kNop, "Padding");
  }
2070 2071 2072
}


2073 2074 2075 2076 2077 2078
void Serializer::InitializeCodeAddressMap() {
  isolate_->InitializeLoggingAndCounters();
  code_address_map_ = new CodeAddressMap(isolate_);
}


2079 2080 2081 2082 2083 2084 2085 2086
Code* Serializer::CopyCode(Code* code) {
  code_buffer_.Rewind(0);  // Clear buffer without deleting backing store.
  int size = code->CodeSize();
  code_buffer_.AddAll(Vector<byte>(code->address(), size));
  return Code::cast(HeapObject::FromAddress(&code_buffer_.first()));
}


2087 2088 2089
ScriptData* CodeSerializer::Serialize(Isolate* isolate,
                                      Handle<SharedFunctionInfo> info,
                                      Handle<String> source) {
2090 2091
  base::ElapsedTimer timer;
  if (FLAG_profile_deserialization) timer.Start();
2092
  if (FLAG_trace_serializer) {
2093 2094 2095 2096 2097
    PrintF("[Serializing from");
    Object* script = info->script();
    if (script->IsScript()) Script::cast(script)->name()->ShortPrint();
    PrintF("]\n");
  }
2098

2099
  // Serialize code object.
2100 2101
  SnapshotByteSink sink(info->code()->CodeSize() * 2);
  CodeSerializer cs(isolate, &sink, *source, info->code());
2102
  DisallowHeapAllocation no_gc;
2103
  Object** location = Handle<Object>::cast(info).location();
2104 2105
  cs.VisitPointer(location);
  cs.Pad();
2106

2107
  SerializedCodeData data(sink.data(), cs);
2108 2109 2110 2111 2112 2113 2114 2115 2116
  ScriptData* script_data = data.GetScriptData();

  if (FLAG_profile_deserialization) {
    double ms = timer.Elapsed().InMillisecondsF();
    int length = script_data->length();
    PrintF("[Serializing to %d bytes took %0.3f ms]\n", length, ms);
  }

  return script_data;
2117 2118 2119
}


2120
void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
2121
                                     WhereToPoint where_to_point, int skip) {
2122 2123 2124
  int root_index = root_index_map_.Lookup(obj);
  if (root_index != RootIndexMap::kInvalidRootIndex) {
    PutRoot(root_index, obj, how_to_code, where_to_point, skip);
2125 2126 2127
    return;
  }

2128
  if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
2129

2130
  FlushSkip(skip);
2131

2132 2133
  if (obj->IsCode()) {
    Code* code_object = Code::cast(obj);
2134 2135 2136 2137 2138 2139 2140
    switch (code_object->kind()) {
      case Code::OPTIMIZED_FUNCTION:  // No optimized code compiled yet.
      case Code::HANDLER:             // No handlers patched in yet.
      case Code::REGEXP:              // No regexp literals initialized yet.
      case Code::NUMBER_OF_KINDS:     // Pseudo enum value.
        CHECK(false);
      case Code::BUILTIN:
2141 2142
        SerializeBuiltin(code_object->builtin_index(), how_to_code,
                         where_to_point);
2143 2144
        return;
      case Code::STUB:
2145
        SerializeCodeStub(code_object->stub_key(), how_to_code, where_to_point);
2146 2147 2148 2149
        return;
#define IC_KIND_CASE(KIND) case Code::KIND:
        IC_KIND_LIST(IC_KIND_CASE)
#undef IC_KIND_CASE
2150
        SerializeIC(code_object, how_to_code, where_to_point);
2151
        return;
2152
      case Code::FUNCTION:
2153 2154 2155
        DCHECK(code_object->has_reloc_info_for_serialization());
        // Only serialize the code for the toplevel function unless specified
        // by flag. Replace code of inner functions by the lazy compile builtin.
2156
        // This is safe, as checked in Compiler::BuildFunctionInfo.
2157
        if (code_object != main_code_ && !FLAG_serialize_inner) {
2158
          SerializeBuiltin(Builtins::kCompileLazy, how_to_code, where_to_point);
2159
        } else {
2160
          SerializeGeneric(code_object, how_to_code, where_to_point);
2161
        }
2162
        return;
2163
    }
2164
    UNREACHABLE();
2165 2166
  }

2167
  // Past this point we should not see any (context-specific) maps anymore.
2168
  CHECK(!obj->IsMap());
2169
  // There should be no references to the global object embedded.
2170
  CHECK(!obj->IsJSGlobalProxy() && !obj->IsGlobalObject());
2171
  // There should be no hash table embedded. They would require rehashing.
2172
  CHECK(!obj->IsHashTable());
2173 2174
  // We expect no instantiated function objects or contexts.
  CHECK(!obj->IsJSFunction() && !obj->IsContext());
2175

2176
  SerializeGeneric(obj, how_to_code, where_to_point);
2177 2178 2179
}


2180 2181 2182
void CodeSerializer::SerializeGeneric(HeapObject* heap_object,
                                      HowToCode how_to_code,
                                      WhereToPoint where_to_point) {
2183 2184
  if (heap_object->IsInternalizedString()) num_internalized_strings_++;

2185 2186 2187 2188 2189 2190 2191
  // Object has not yet been serialized.  Serialize it here.
  ObjectSerializer serializer(this, heap_object, sink_, how_to_code,
                              where_to_point);
  serializer.Serialize();
}


2192
void CodeSerializer::SerializeBuiltin(int builtin_index, HowToCode how_to_code,
2193
                                      WhereToPoint where_to_point) {
2194
  DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
2195
         (how_to_code == kPlain && where_to_point == kInnerPointer) ||
2196
         (how_to_code == kFromCode && where_to_point == kInnerPointer));
2197 2198
  DCHECK_LT(builtin_index, Builtins::builtin_count);
  DCHECK_LE(0, builtin_index);
2199

2200
  if (FLAG_trace_serializer) {
2201
    PrintF(" Encoding builtin: %s\n",
2202 2203 2204
           isolate()->builtins()->name(builtin_index));
  }

2205
  sink_->Put(kBuiltin + how_to_code + where_to_point, "Builtin");
2206
  sink_->PutInt(builtin_index, "builtin_index");
2207 2208 2209
}


2210
void CodeSerializer::SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
2211
                                       WhereToPoint where_to_point) {
2212 2213 2214
  DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
         (how_to_code == kPlain && where_to_point == kInnerPointer) ||
         (how_to_code == kFromCode && where_to_point == kInnerPointer));
2215
  DCHECK(CodeStub::MajorKeyFromKey(stub_key) != CodeStub::NoCache);
2216
  DCHECK(!CodeStub::GetCode(isolate(), stub_key).is_null());
2217 2218 2219

  int index = AddCodeStubKey(stub_key) + kCodeStubsBaseIndex;

2220
  if (FLAG_trace_serializer) {
2221
    PrintF(" Encoding code stub %s as %d\n",
2222 2223 2224 2225 2226 2227 2228 2229 2230
           CodeStub::MajorName(CodeStub::MajorKeyFromKey(stub_key), false),
           index);
  }

  sink_->Put(kAttachedReference + how_to_code + where_to_point, "CodeStub");
  sink_->PutInt(index, "CodeStub key");
}


2231 2232 2233 2234 2235
void CodeSerializer::SerializeIC(Code* ic, HowToCode how_to_code,
                                 WhereToPoint where_to_point) {
  // The IC may be implemented as a stub.
  uint32_t stub_key = ic->stub_key();
  if (stub_key != CodeStub::NoCacheKey()) {
2236
    if (FLAG_trace_serializer) {
2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249
      PrintF(" %s is a code stub\n", Code::Kind2String(ic->kind()));
    }
    SerializeCodeStub(stub_key, how_to_code, where_to_point);
    return;
  }
  // The IC may be implemented as builtin. Only real builtins have an
  // actual builtin_index value attached (otherwise it's just garbage).
  // Compare to make sure we are really dealing with a builtin.
  int builtin_index = ic->builtin_index();
  if (builtin_index < Builtins::builtin_count) {
    Builtins::Name name = static_cast<Builtins::Name>(builtin_index);
    Code* builtin = isolate()->builtins()->builtin(name);
    if (builtin == ic) {
2250
      if (FLAG_trace_serializer) {
2251 2252 2253 2254 2255 2256 2257 2258 2259 2260
        PrintF(" %s is a builtin\n", Code::Kind2String(ic->kind()));
      }
      DCHECK(ic->kind() == Code::KEYED_LOAD_IC ||
             ic->kind() == Code::KEYED_STORE_IC);
      SerializeBuiltin(builtin_index, how_to_code, where_to_point);
      return;
    }
  }
  // The IC may also just be a piece of code kept in the non_monomorphic_cache.
  // In that case, just serialize as a normal code object.
2261
  if (FLAG_trace_serializer) {
2262 2263 2264
    PrintF(" %s has no special handling\n", Code::Kind2String(ic->kind()));
  }
  DCHECK(ic->kind() == Code::LOAD_IC || ic->kind() == Code::STORE_IC);
2265
  SerializeGeneric(ic, how_to_code, where_to_point);
2266 2267 2268
}


2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280
int CodeSerializer::AddCodeStubKey(uint32_t stub_key) {
  // TODO(yangguo) Maybe we need a hash table for a faster lookup than O(n^2).
  int index = 0;
  while (index < stub_keys_.length()) {
    if (stub_keys_[index] == stub_key) return index;
    index++;
  }
  stub_keys_.Add(stub_key);
  return index;
}


2281
MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
2282
    Isolate* isolate, ScriptData* cached_data, Handle<String> source) {
2283 2284
  base::ElapsedTimer timer;
  if (FLAG_profile_deserialization) timer.Start();
2285

2286
  HandleScope scope(isolate);
2287

2288
  SmartPointer<SerializedCodeData> scd(
2289
      SerializedCodeData::FromCachedData(isolate, cached_data, *source));
2290 2291 2292 2293 2294
  if (scd.is_empty()) {
    if (FLAG_profile_deserialization) PrintF("[Cached code failed check]\n");
    DCHECK(cached_data->rejected());
    return MaybeHandle<SharedFunctionInfo>();
  }
2295

2296 2297 2298
  // Eagerly expand string table to avoid allocations during deserialization.
  StringTable::EnsureCapacityForDeserialization(isolate,
                                                scd->NumInternalizedStrings());
2299

2300 2301 2302 2303 2304 2305 2306 2307 2308
  // Prepare and register list of attached objects.
  Vector<const uint32_t> code_stub_keys = scd->CodeStubKeys();
  Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(
      code_stub_keys.length() + kCodeStubsBaseIndex);
  attached_objects[kSourceObjectIndex] = source;
  for (int i = 0; i < code_stub_keys.length(); i++) {
    attached_objects[i + kCodeStubsBaseIndex] =
        CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked();
  }
2309

2310
  Deserializer deserializer(scd.get());
2311
  deserializer.SetAttachedObjects(attached_objects);
2312

2313 2314 2315 2316 2317 2318
  // Deserialize.
  Handle<SharedFunctionInfo> result;
  if (!deserializer.DeserializeCode(isolate).ToHandle(&result)) {
    // Deserializing may fail if the reservations cannot be fulfilled.
    if (FLAG_profile_deserialization) PrintF("[Deserializing failed]\n");
    return MaybeHandle<SharedFunctionInfo>();
2319
  }
2320
  deserializer.FlushICacheForNewCodeObjects();
2321

2322 2323
  if (FLAG_profile_deserialization) {
    double ms = timer.Elapsed().InMillisecondsF();
2324
    int length = cached_data->length();
2325 2326
    PrintF("[Deserializing from %d bytes took %0.3f ms]\n", length, ms);
  }
2327
  result->set_deserialized(true);
2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339

  if (isolate->logger()->is_logging_code_events() ||
      isolate->cpu_profiler()->is_profiling()) {
    String* name = isolate->heap()->empty_string();
    if (result->script()->IsScript()) {
      Script* script = Script::cast(result->script());
      if (script->name()->IsString()) name = String::cast(script->name());
    }
    isolate->logger()->CodeCreateEvent(Logger::SCRIPT_TAG, result->code(),
                                       *result, NULL, name);
  }

2340
  return scope.CloseAndEscape(result);
2341
}
2342 2343


2344 2345 2346 2347 2348 2349 2350 2351 2352
void SerializedData::AllocateData(int size) {
  DCHECK(!owns_data_);
  data_ = NewArray<byte>(size);
  size_ = size;
  owns_data_ = true;
  DCHECK(IsAligned(reinterpret_cast<intptr_t>(data_), kPointerAlignment));
}


2353
SnapshotData::SnapshotData(const Serializer& ser) {
2354 2355 2356
  DisallowHeapAllocation no_gc;
  List<Reservation> reservations;
  ser.EncodeReservations(&reservations);
2357
  const List<byte>& payload = ser.sink()->data();
2358 2359 2360 2361 2362 2363 2364 2365 2366

  // Calculate sizes.
  int reservation_size = reservations.length() * kInt32Size;
  int size = kHeaderSize + reservation_size + payload.length();

  // Allocate backing store and create result data.
  AllocateData(size);

  // Set header values.
2367
  SetMagicNumber(ser.isolate());
2368
  SetHeaderValue(kCheckSumOffset, Version::Hash());
2369
  SetHeaderValue(kNumReservationsOffset, reservations.length());
2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389
  SetHeaderValue(kPayloadLengthOffset, payload.length());

  // Copy reservation chunk sizes.
  CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
            reservation_size);

  // Copy serialized data.
  CopyBytes(data_ + kHeaderSize + reservation_size, payload.begin(),
            static_cast<size_t>(payload.length()));
}


bool SnapshotData::IsSane() {
  return GetHeaderValue(kCheckSumOffset) == Version::Hash();
}


Vector<const SerializedData::Reservation> SnapshotData::Reservations() const {
  return Vector<const Reservation>(
      reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
2390
      GetHeaderValue(kNumReservationsOffset));
2391 2392 2393 2394
}


Vector<const byte> SnapshotData::Payload() const {
2395
  int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
2396 2397 2398 2399 2400 2401 2402
  const byte* payload = data_ + kHeaderSize + reservations_size;
  int length = GetHeaderValue(kPayloadLengthOffset);
  DCHECK_EQ(data_ + size_, payload + length);
  return Vector<const byte>(payload, length);
}


2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437
class Checksum {
 public:
  explicit Checksum(Vector<const byte> payload) {
    // Fletcher's checksum. Modified to reduce 64-bit sums to 32-bit.
    uintptr_t a = 1;
    uintptr_t b = 0;
    const uintptr_t* cur = reinterpret_cast<const uintptr_t*>(payload.start());
    DCHECK(IsAligned(payload.length(), kIntptrSize));
    const uintptr_t* end = cur + payload.length() / kIntptrSize;
    while (cur < end) {
      // Unsigned overflow expected and intended.
      a += *cur++;
      b += a;
    }
#if V8_HOST_ARCH_64_BIT
    a ^= a >> 32;
    b ^= b >> 32;
#endif  // V8_HOST_ARCH_64_BIT
    a_ = static_cast<uint32_t>(a);
    b_ = static_cast<uint32_t>(b);
  }

  bool Check(uint32_t a, uint32_t b) const { return a == a_ && b == b_; }

  uint32_t a() const { return a_; }
  uint32_t b() const { return b_; }

 private:
  uint32_t a_;
  uint32_t b_;

  DISALLOW_COPY_AND_ASSIGN(Checksum);
};


2438
SerializedCodeData::SerializedCodeData(const List<byte>& payload,
2439
                                       const CodeSerializer& cs) {
2440
  DisallowHeapAllocation no_gc;
2441
  const List<uint32_t>* stub_keys = cs.stub_keys();
2442

2443 2444
  List<Reservation> reservations;
  cs.EncodeReservations(&reservations);
2445

2446
  // Calculate sizes.
2447
  int reservation_size = reservations.length() * kInt32Size;
2448 2449
  int num_stub_keys = stub_keys->length();
  int stub_keys_size = stub_keys->length() * kInt32Size;
2450 2451 2452
  int payload_offset = kHeaderSize + reservation_size + stub_keys_size;
  int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
  int size = padded_payload_offset + payload.length();
2453 2454

  // Allocate backing store and create result data.
2455
  AllocateData(size);
2456 2457

  // Set header values.
2458
  SetMagicNumber(cs.isolate());
2459 2460 2461 2462 2463
  SetHeaderValue(kVersionHashOffset, Version::Hash());
  SetHeaderValue(kSourceHashOffset, SourceHash(cs.source()));
  SetHeaderValue(kCpuFeaturesOffset,
                 static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
  SetHeaderValue(kFlagHashOffset, FlagList::Hash());
2464
  SetHeaderValue(kNumInternalizedStringsOffset, cs.num_internalized_strings());
2465
  SetHeaderValue(kNumReservationsOffset, reservations.length());
2466
  SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
2467
  SetHeaderValue(kPayloadLengthOffset, payload.length());
2468

2469 2470 2471 2472
  Checksum checksum(payload.ToConstVector());
  SetHeaderValue(kChecksum1Offset, checksum.a());
  SetHeaderValue(kChecksum2Offset, checksum.b());

2473
  // Copy reservation chunk sizes.
2474
  CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
2475
            reservation_size);
2476 2477

  // Copy code stub keys.
2478
  CopyBytes(data_ + kHeaderSize + reservation_size,
2479
            reinterpret_cast<byte*>(stub_keys->begin()), stub_keys_size);
2480

2481 2482
  memset(data_ + payload_offset, 0, padded_payload_offset - payload_offset);

2483
  // Copy serialized data.
2484 2485
  CopyBytes(data_ + padded_payload_offset, payload.begin(),
            static_cast<size_t>(payload.length()));
2486 2487 2488
}


2489
SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
2490 2491
    Isolate* isolate, String* source) const {
  uint32_t magic_number = GetMagicNumber();
2492 2493 2494 2495 2496 2497
  uint32_t version_hash = GetHeaderValue(kVersionHashOffset);
  uint32_t source_hash = GetHeaderValue(kSourceHashOffset);
  uint32_t cpu_features = GetHeaderValue(kCpuFeaturesOffset);
  uint32_t flags_hash = GetHeaderValue(kFlagHashOffset);
  uint32_t c1 = GetHeaderValue(kChecksum1Offset);
  uint32_t c2 = GetHeaderValue(kChecksum2Offset);
2498
  if (magic_number != ComputeMagicNumber(isolate)) return MAGIC_NUMBER_MISMATCH;
2499 2500 2501 2502 2503 2504 2505 2506
  if (version_hash != Version::Hash()) return VERSION_MISMATCH;
  if (source_hash != SourceHash(source)) return SOURCE_MISMATCH;
  if (cpu_features != static_cast<uint32_t>(CpuFeatures::SupportedFeatures())) {
    return CPU_FEATURES_MISMATCH;
  }
  if (flags_hash != FlagList::Hash()) return FLAGS_MISMATCH;
  if (!Checksum(Payload()).Check(c1, c2)) return CHECKSUM_MISMATCH;
  return CHECK_SUCCESS;
2507
}
2508 2509


2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524
// Return ScriptData object and relinquish ownership over it to the caller.
ScriptData* SerializedCodeData::GetScriptData() {
  DCHECK(owns_data_);
  ScriptData* result = new ScriptData(data_, size_);
  result->AcquireDataOwnership();
  owns_data_ = false;
  data_ = NULL;
  return result;
}


Vector<const SerializedData::Reservation> SerializedCodeData::Reservations()
    const {
  return Vector<const Reservation>(
      reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
2525
      GetHeaderValue(kNumReservationsOffset));
2526 2527 2528 2529
}


Vector<const byte> SerializedCodeData::Payload() const {
2530
  int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
2531
  int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size;
2532 2533 2534 2535
  int payload_offset = kHeaderSize + reservations_size + code_stubs_size;
  int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
  const byte* payload = data_ + padded_payload_offset;
  DCHECK(IsAligned(reinterpret_cast<intptr_t>(payload), kPointerAlignment));
2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546
  int length = GetHeaderValue(kPayloadLengthOffset);
  DCHECK_EQ(data_ + size_, payload + length);
  return Vector<const byte>(payload, length);
}


int SerializedCodeData::NumInternalizedStrings() const {
  return GetHeaderValue(kNumInternalizedStringsOffset);
}

Vector<const uint32_t> SerializedCodeData::CodeStubKeys() const {
2547
  int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
2548 2549 2550 2551
  const byte* start = data_ + kHeaderSize + reservations_size;
  return Vector<const uint32_t>(reinterpret_cast<const uint32_t*>(start),
                                GetHeaderValue(kNumCodeStubKeysOffset));
}
2552 2553 2554 2555 2556 2557


SerializedCodeData::SerializedCodeData(ScriptData* data)
    : SerializedData(const_cast<byte*>(data->data()), data->length()) {}


2558 2559
SerializedCodeData* SerializedCodeData::FromCachedData(Isolate* isolate,
                                                       ScriptData* cached_data,
2560 2561 2562
                                                       String* source) {
  DisallowHeapAllocation no_gc;
  SerializedCodeData* scd = new SerializedCodeData(cached_data);
2563
  SanityCheckResult r = scd->SanityCheck(isolate, source);
2564
  if (r == CHECK_SUCCESS) return scd;
2565
  cached_data->Reject();
2566
  source->GetIsolate()->counters()->code_cache_reject_reason()->AddSample(r);
2567 2568 2569
  delete scd;
  return NULL;
}
2570
} }  // namespace v8::internal