heap.cc 236 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/v8.h"
6

7 8
#include "src/accessors.h"
#include "src/api.h"
9
#include "src/base/bits.h"
10
#include "src/base/once.h"
11
#include "src/base/utils/random-number-generator.h"
12 13 14 15 16 17 18 19
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compilation-cache.h"
#include "src/conversions.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
20
#include "src/heap/gc-idle-time-handler.h"
21 22
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
23
#include "src/heap/memory-reducer.h"
24 25
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
26
#include "src/heap/store-buffer.h"
27 28 29
#include "src/heap-profiler.h"
#include "src/runtime-profiler.h"
#include "src/scopeinfo.h"
30 31 32
#include "src/snapshot/natives.h"
#include "src/snapshot/serialize.h"
#include "src/snapshot/snapshot.h"
33 34 35
#include "src/utils.h"
#include "src/v8threads.h"
#include "src/vm-state-inl.h"
36

37 38 39 40
#if V8_TARGET_ARCH_PPC && !V8_INTERPRETED_REGEXP
#include "src/regexp-macro-assembler.h"          // NOLINT
#include "src/ppc/regexp-macro-assembler-ppc.h"  // NOLINT
#endif
41
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
42
#include "src/regexp-macro-assembler.h"          // NOLINT
43
#include "src/arm/regexp-macro-assembler-arm.h"  // NOLINT
lrn@chromium.org's avatar
lrn@chromium.org committed
44
#endif
45
#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
46
#include "src/regexp-macro-assembler.h"            // NOLINT
47
#include "src/mips/regexp-macro-assembler-mips.h"  // NOLINT
48
#endif
49 50 51 52
#if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
#include "src/regexp-macro-assembler.h"
#include "src/mips64/regexp-macro-assembler-mips64.h"
#endif
53

54 55
namespace v8 {
namespace internal {
56

57

58 59 60 61 62 63 64
struct Heap::StrongRootsList {
  Object** start;
  Object** end;
  StrongRootsList* next;
};


65
Heap::Heap()
66 67 68
    : amount_of_external_allocated_memory_(0),
      amount_of_external_allocated_memory_at_last_global_gc_(0),
      isolate_(NULL),
69
      code_range_size_(0),
70
      // semispace_size_ should be a power of 2 and old_generation_size_ should
71
      // be a multiple of Page::kPageSize.
72
      reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
73
      max_semi_space_size_(8 * (kPointerSize / 4) * MB),
74
      initial_semispace_size_(Page::kPageSize),
75
      target_semispace_size_(Page::kPageSize),
76
      max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
77 78
      initial_old_generation_size_(max_old_generation_size_ /
                                   kInitalOldGenerationLimitFactor),
79
      old_generation_size_configured_(false),
80
      max_executable_size_(256ul * (kPointerSize / 4) * MB),
81 82 83 84
      // Variables set based on semispace_size_ and old_generation_size_ in
      // ConfigureHeap.
      // Will be 4 * reserved_semispace_size_ to ensure that young
      // generation can be aligned to its size.
85
      maximum_committed_(0),
86
      survived_since_last_expansion_(0),
87
      survived_last_scavenge_(0),
88
      sweep_generation_(0),
89 90
      always_allocate_scope_depth_(0),
      contexts_disposed_(0),
91
      global_ic_age_(0),
92
      scan_on_scavenge_pages_(0),
93
      new_space_(this),
94
      old_space_(NULL),
95 96 97 98
      code_space_(NULL),
      map_space_(NULL),
      lo_space_(NULL),
      gc_state_(NOT_IN_GC),
99
      gc_post_processing_depth_(0),
100 101 102
      allocations_count_(0),
      raw_allocations_hash_(0),
      dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
103 104
      ms_count_(0),
      gc_count_(0),
105
      remembered_unmapped_pages_index_(0),
106
      unflattened_strings_length_(0),
107
#ifdef DEBUG
108
      allocation_timeout_(0),
109
#endif  // DEBUG
110
      old_generation_allocation_limit_(initial_old_generation_size_),
111
      old_gen_exhausted_(false),
112
      inline_allocation_disabled_(false),
113
      store_buffer_rebuilder_(store_buffer()),
114
      hidden_string_(NULL),
115
      gc_safe_size_of_old_object_(NULL),
116
      total_regexp_code_generated_(0),
117
      tracer_(this),
118
      new_space_high_promotion_mode_active_(false),
119
      gathering_lifetime_feedback_(0),
120
      high_survival_rate_period_length_(0),
121
      promoted_objects_size_(0),
122 123
      low_survival_rate_period_length_(0),
      survival_rate_(0),
124
      promotion_ratio_(0),
125
      semi_space_copied_object_size_(0),
126
      previous_semi_space_copied_object_size_(0),
127
      semi_space_copied_rate_(0),
128 129 130
      nodes_died_in_new_space_(0),
      nodes_copied_in_new_space_(0),
      nodes_promoted_(0),
131
      maximum_size_scavenges_(0),
132 133
      previous_survival_rate_trend_(Heap::STABLE),
      survival_rate_trend_(Heap::STABLE),
134 135
      max_gc_pause_(0.0),
      total_gc_time_ms_(0.0),
136 137
      max_alive_after_gc_(0),
      min_in_mutator_(kMaxInt),
138 139
      marking_time_(0.0),
      sweeping_time_(0.0),
140
      last_idle_notification_time_(0.0),
141
      last_gc_time_(0.0),
142
      mark_compact_collector_(this),
143 144 145
      store_buffer_(this),
      marking_(this),
      incremental_marking_(this),
146
      memory_reducer_(this),
147 148
      full_codegen_bytes_generated_(0),
      crankshaft_codegen_bytes_generated_(0),
149
      new_space_allocation_counter_(0),
150 151
      old_generation_allocation_counter_(0),
      old_generation_size_at_last_gc_(0),
152
      gcs_since_last_deopt_(0),
153
      allocation_sites_scratchpad_length_(0),
154 155
      ring_buffer_full_(false),
      ring_buffer_end_(0),
156
      promotion_queue_(this),
157
      configured_(false),
158
      external_string_table_(this),
159
      chunks_queued_for_free_(NULL),
160
      gc_callbacks_depth_(0),
161
      deserialization_complete_(false),
162 163
      concurrent_sweeping_enabled_(false),
      strong_roots_list_(NULL) {
164 165 166
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
167
#if defined(V8_MAX_SEMISPACE_SIZE)
168
  max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
169 170
#endif

171
  // Ensure old_generation_size_ is a multiple of kPageSize.
172
  DCHECK(MB >= Page::kPageSize);
173

174
  memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
175 176
  set_native_contexts_list(NULL);
  set_allocation_sites_list(Smi::FromInt(0));
177
  set_encountered_weak_collections(Smi::FromInt(0));
ulan@chromium.org's avatar
ulan@chromium.org committed
178
  set_encountered_weak_cells(Smi::FromInt(0));
179 180 181
  // Put a dummy entry in the remembered pages so we can find the list the
  // minidump even if there are no real unmapped pages.
  RememberUnmappedPage(NULL, false);
182 183

  ClearObjectStats(true);
184
}
185 186


187
intptr_t Heap::Capacity() {
188
  if (!HasBeenSetUp()) return 0;
189

190
  return new_space_.Capacity() + old_space_->Capacity() +
191
         code_space_->Capacity() + map_space_->Capacity();
192 193 194
}


195
intptr_t Heap::CommittedOldGenerationMemory() {
196
  if (!HasBeenSetUp()) return 0;
197

198
  return old_space_->CommittedMemory() + code_space_->CommittedMemory() +
199
         map_space_->CommittedMemory() + lo_space_->Size();
200 201
}

202

203 204 205 206 207 208 209
intptr_t Heap::CommittedMemory() {
  if (!HasBeenSetUp()) return 0;

  return new_space_.CommittedMemory() + CommittedOldGenerationMemory();
}


210 211 212 213
size_t Heap::CommittedPhysicalMemory() {
  if (!HasBeenSetUp()) return 0;

  return new_space_.CommittedPhysicalMemory() +
214
         old_space_->CommittedPhysicalMemory() +
215 216 217
         code_space_->CommittedPhysicalMemory() +
         map_space_->CommittedPhysicalMemory() +
         lo_space_->CommittedPhysicalMemory();
218 219 220
}


221
intptr_t Heap::CommittedMemoryExecutable() {
222
  if (!HasBeenSetUp()) return 0;
223

224
  return isolate()->memory_allocator()->SizeExecutable();
225 226
}

227

228 229 230 231 232 233 234 235 236 237
void Heap::UpdateMaximumCommitted() {
  if (!HasBeenSetUp()) return;

  intptr_t current_committed_memory = CommittedMemory();
  if (current_committed_memory > maximum_committed_) {
    maximum_committed_ = current_committed_memory;
  }
}


238
intptr_t Heap::Available() {
239
  if (!HasBeenSetUp()) return 0;
240

241 242 243 244 245 246
  intptr_t total = 0;
  AllSpaces spaces(this);
  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
    total += space->Available();
  }
  return total;
247 248 249
}


250
bool Heap::HasBeenSetUp() {
251
  return old_space_ != NULL && code_space_ != NULL && map_space_ != NULL &&
252
         lo_space_ != NULL;
253 254 255
}


256
int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
257 258
  if (IntrusiveMarking::IsMarked(object)) {
    return IntrusiveMarking::SizeOfMarkedObject(object);
259
  }
260
  return object->SizeFromMap(object->map());
261 262 263
}


264 265
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
                                              const char** reason) {
266
  // Is global GC requested?
267
  if (space != NEW_SPACE) {
268
    isolate_->counters()->gc_compactor_caused_by_request()->Increment();
269
    *reason = "GC in old space requested";
270 271 272
    return MARK_COMPACTOR;
  }

273 274 275 276 277
  if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
    *reason = "GC in old space forced by flags";
    return MARK_COMPACTOR;
  }

278
  // Is enough data promoted to justify a global GC?
279
  if (OldGenerationAllocationLimitReached()) {
280
    isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
281
    *reason = "promotion limit reached";
282 283 284 285 286
    return MARK_COMPACTOR;
  }

  // Have allocation in OLD and LO failed?
  if (old_gen_exhausted_) {
287 288 289
    isolate_->counters()
        ->gc_compactor_caused_by_oldspace_exhaustion()
        ->Increment();
290
    *reason = "old generations exhausted";
291 292 293 294 295 296
    return MARK_COMPACTOR;
  }

  // Is there enough space left in OLD to guarantee that a scavenge can
  // succeed?
  //
297
  // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
298 299 300 301 302
  // for object promotion. It counts only the bytes that the memory
  // allocator has not yet allocated from the OS and assigned to any space,
  // and does not count available bytes already in the old space or code
  // space.  Undercounting is safe---we may get an unrequested full GC when
  // a scavenge would have succeeded.
303
  if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
304 305 306
    isolate_->counters()
        ->gc_compactor_caused_by_oldspace_exhaustion()
        ->Increment();
307
    *reason = "scavenge might not succeed";
308 309 310 311
    return MARK_COMPACTOR;
  }

  // Default
312
  *reason = NULL;
313 314 315 316 317 318 319
  return SCAVENGER;
}


// TODO(1238405): Combine the infrastructure for --heap-stats and
// --log-gc to avoid the complicated preprocessor and flag testing.
void Heap::ReportStatisticsBeforeGC() {
320 321 322
// Heap::ReportHeapStatistics will also log NewSpace statistics when
// compiled --log-gc is set.  The following logic is used to avoid
// double logging.
323
#ifdef DEBUG
324
  if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
325 326 327
  if (FLAG_heap_stats) {
    ReportHeapStatistics("Before GC");
  } else if (FLAG_log_gc) {
328
    new_space_.ReportStatistics();
329
  }
330
  if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
331
#else
332
  if (FLAG_log_gc) {
333 334 335
    new_space_.CollectStatistics();
    new_space_.ReportStatistics();
    new_space_.ClearHistograms();
336
  }
337
#endif  // DEBUG
338 339 340
}


341 342
void Heap::PrintShortHeapStatistics() {
  if (!FLAG_trace_gc_verbose) return;
343 344 345 346 347 348 349 350 351 352 353 354
  PrintIsolate(isolate_, "Memory allocator,   used: %6" V8_PTR_PREFIX
                         "d KB"
                         ", available: %6" V8_PTR_PREFIX "d KB\n",
               isolate_->memory_allocator()->Size() / KB,
               isolate_->memory_allocator()->Available() / KB);
  PrintIsolate(isolate_, "New space,          used: %6" V8_PTR_PREFIX
                         "d KB"
                         ", available: %6" V8_PTR_PREFIX
                         "d KB"
                         ", committed: %6" V8_PTR_PREFIX "d KB\n",
               new_space_.Size() / KB, new_space_.Available() / KB,
               new_space_.CommittedMemory() / KB);
355
  PrintIsolate(isolate_, "Old space,          used: %6" V8_PTR_PREFIX
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
                         "d KB"
                         ", available: %6" V8_PTR_PREFIX
                         "d KB"
                         ", committed: %6" V8_PTR_PREFIX "d KB\n",
               old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
               old_space_->CommittedMemory() / KB);
  PrintIsolate(isolate_, "Code space,         used: %6" V8_PTR_PREFIX
                         "d KB"
                         ", available: %6" V8_PTR_PREFIX
                         "d KB"
                         ", committed: %6" V8_PTR_PREFIX "d KB\n",
               code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
               code_space_->CommittedMemory() / KB);
  PrintIsolate(isolate_, "Map space,          used: %6" V8_PTR_PREFIX
                         "d KB"
                         ", available: %6" V8_PTR_PREFIX
                         "d KB"
                         ", committed: %6" V8_PTR_PREFIX "d KB\n",
               map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
               map_space_->CommittedMemory() / KB);
  PrintIsolate(isolate_, "Large object space, used: %6" V8_PTR_PREFIX
                         "d KB"
                         ", available: %6" V8_PTR_PREFIX
                         "d KB"
                         ", committed: %6" V8_PTR_PREFIX "d KB\n",
               lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
               lo_space_->CommittedMemory() / KB);
  PrintIsolate(isolate_, "All spaces,         used: %6" V8_PTR_PREFIX
                         "d KB"
                         ", available: %6" V8_PTR_PREFIX
                         "d KB"
                         ", committed: %6" V8_PTR_PREFIX "d KB\n",
               this->SizeOfObjects() / KB, this->Available() / KB,
               this->CommittedMemory() / KB);
  PrintIsolate(
      isolate_, "External memory reported: %6" V8_PTR_PREFIX "d KB\n",
      static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
  PrintIsolate(isolate_, "Total time spent in GC  : %.1f ms\n",
               total_gc_time_ms_);
395 396 397
}


398 399 400
// TODO(1238405): Combine the infrastructure for --heap-stats and
// --log-gc to avoid the complicated preprocessor and flag testing.
void Heap::ReportStatisticsAfterGC() {
401 402
// Similar to the before GC, we use some complicated logic to ensure that
// NewSpace statistics are logged exactly once when --log-gc is turned on.
403
#if defined(DEBUG)
404
  if (FLAG_heap_stats) {
405
    new_space_.CollectStatistics();
406 407
    ReportHeapStatistics("After GC");
  } else if (FLAG_log_gc) {
408
    new_space_.ReportStatistics();
409
  }
410
#else
411
  if (FLAG_log_gc) new_space_.ReportStatistics();
412
#endif  // DEBUG
413 414 415 416 417 418 419 420 421 422 423 424 425 426
  for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
       ++i) {
    int count = deferred_counters_[i];
    deferred_counters_[i] = 0;
    while (count > 0) {
      count--;
      isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i));
    }
  }
}


void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
  deferred_counters_[feature]++;
427 428 429
}


430
void Heap::GarbageCollectionPrologue() {
431 432
  {
    AllowHeapAllocation for_the_first_part_of_prologue;
433 434 435
    ClearJSFunctionResultCaches();
    gc_count_++;
    unflattened_strings_length_ = 0;
436

437
    if (FLAG_flush_code) {
438 439
      mark_compact_collector()->EnableCodeFlushing(true);
    }
440

441
#ifdef VERIFY_HEAP
442 443 444
    if (FLAG_verify_heap) {
      Verify();
    }
445
#endif
446
  }
447

448 449
  // Reset GC statistics.
  promoted_objects_size_ = 0;
450
  previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
451
  semi_space_copied_object_size_ = 0;
452 453 454
  nodes_died_in_new_space_ = 0;
  nodes_copied_in_new_space_ = 0;
  nodes_promoted_ = 0;
455

456 457
  UpdateMaximumCommitted();

458
#ifdef DEBUG
459
  DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
460 461 462 463

  if (FLAG_gc_verbose) Print();

  ReportStatisticsBeforeGC();
464
#endif  // DEBUG
465

466
  store_buffer()->GCPrologue();
467

468
  if (isolate()->concurrent_osr_enabled()) {
469
    isolate()->optimizing_compile_dispatcher()->AgeBufferedOsrJobs();
470
  }
471 472 473 474 475 476 477

  if (new_space_.IsAtMaximumCapacity()) {
    maximum_size_scavenges_++;
  } else {
    maximum_size_scavenges_ = 0;
  }
  CheckNewSpaceExpansionCriteria();
478
  UpdateNewSpaceAllocationCounter();
479 480
}

481

482 483
intptr_t Heap::SizeOfObjects() {
  intptr_t total = 0;
484
  AllSpaces spaces(this);
485
  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
486
    total += space->SizeOfObjects();
487
  }
488
  return total;
489 490
}

491

492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
const char* Heap::GetSpaceName(int idx) {
  switch (idx) {
    case NEW_SPACE:
      return "new_space";
    case OLD_SPACE:
      return "old_space";
    case MAP_SPACE:
      return "map_space";
    case CODE_SPACE:
      return "code_space";
    case LO_SPACE:
      return "large_object_space";
    default:
      UNREACHABLE();
  }
  return nullptr;
}


511 512 513 514 515 516 517 518 519 520 521 522 523 524
void Heap::ClearAllICsByKind(Code::Kind kind) {
  HeapObjectIterator it(code_space());

  for (Object* object = it.Next(); object != NULL; object = it.Next()) {
    Code* code = Code::cast(object);
    Code::Kind current_kind = code->kind();
    if (current_kind == Code::FUNCTION ||
        current_kind == Code::OPTIMIZED_FUNCTION) {
      code->ClearInlineCaches(kind);
    }
  }
}


525
void Heap::RepairFreeListsAfterDeserialization() {
526
  PagedSpaces spaces(this);
527
  for (PagedSpace* space = spaces.next(); space != NULL;
528
       space = spaces.next()) {
529
    space->RepairFreeListsAfterDeserialization();
530 531 532 533
  }
}


534 535
bool Heap::ProcessPretenuringFeedback() {
  bool trigger_deoptimization = false;
536
  if (FLAG_allocation_site_pretenuring) {
537 538 539
    int tenure_decisions = 0;
    int dont_tenure_decisions = 0;
    int allocation_mementos_found = 0;
540 541 542 543
    int allocation_sites = 0;
    int active_allocation_sites = 0;

    // If the scratchpad overflowed, we have to iterate over the allocation
544
    // sites list.
545 546 547 548
    // TODO(hpayer): We iterate over the whole list of allocation sites when
    // we grew to the maximum semi-space size to deopt maybe tenured
    // allocation sites. We could hold the maybe tenured allocation sites
    // in a seperate data structure if this is a performance problem.
549
    bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
550
    bool use_scratchpad =
551 552
        allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
        !deopt_maybe_tenured;
553 554 555

    int i = 0;
    Object* list_element = allocation_sites_list();
556
    bool maximum_size_scavenge = MaximumSizeScavenge();
557 558 559 560 561 562
    while (use_scratchpad ? i < allocation_sites_scratchpad_length_
                          : list_element->IsAllocationSite()) {
      AllocationSite* site =
          use_scratchpad
              ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
              : AllocationSite::cast(list_element);
563 564
      allocation_mementos_found += site->memento_found_count();
      if (site->memento_found_count() > 0) {
565
        active_allocation_sites++;
566 567 568 569 570 571 572 573 574
        if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
          trigger_deoptimization = true;
        }
        if (site->GetPretenureMode() == TENURED) {
          tenure_decisions++;
        } else {
          dont_tenure_decisions++;
        }
        allocation_sites++;
575
      }
576

577 578 579 580 581
      if (deopt_maybe_tenured && site->IsMaybeTenure()) {
        site->set_deopt_dependent_code(true);
        trigger_deoptimization = true;
      }

582 583 584 585 586
      if (use_scratchpad) {
        i++;
      } else {
        list_element = site->weak_next();
      }
587
    }
588

589
    if (trigger_deoptimization) {
590
      isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
591
    }
592

593
    FlushAllocationSitesScratchpad();
594

595
    if (FLAG_trace_pretenuring_statistics &&
596
        (allocation_mementos_found > 0 || tenure_decisions > 0 ||
597
         dont_tenure_decisions > 0)) {
598 599 600 601 602 603 604
      PrintF(
          "GC: (mode, #visited allocation sites, #active allocation sites, "
          "#mementos, #tenure decisions, #donttenure decisions) "
          "(%s, %d, %d, %d, %d, %d)\n",
          use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
          active_allocation_sites, allocation_mementos_found, tenure_decisions,
          dont_tenure_decisions);
605 606
    }
  }
607
  return trigger_deoptimization;
608 609
}

610

611 612 613 614 615 616 617 618 619
void Heap::DeoptMarkedAllocationSites() {
  // TODO(hpayer): If iterating over the allocation sites list becomes a
  // performance issue, use a cache heap data structure instead (similar to the
  // allocation sites scratchpad).
  Object* list_element = allocation_sites_list();
  while (list_element->IsAllocationSite()) {
    AllocationSite* site = AllocationSite::cast(list_element);
    if (site->deopt_dependent_code()) {
      site->dependent_code()->MarkCodeForDeoptimization(
620
          isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
621 622 623 624 625 626 627 628
      site->set_deopt_dependent_code(false);
    }
    list_element = site->weak_next();
  }
  Deoptimizer::DeoptimizeMarkedCode(isolate_);
}


629
void Heap::GarbageCollectionEpilogue() {
630
  store_buffer()->GCEpilogue();
631

632 633 634 635 636 637
  // In release mode, we only zap the from space under heap verification.
  if (Heap::ShouldZapGarbage()) {
    ZapFromSpace();
  }

#ifdef VERIFY_HEAP
638 639 640
  if (FLAG_verify_heap) {
    Verify();
  }
641
#endif
642

643 644
  AllowHeapAllocation for_the_rest_of_the_epilogue;

645
#ifdef DEBUG
646
  if (FLAG_print_global_handles) isolate_->global_handles()->Print();
647 648 649
  if (FLAG_print_handles) PrintHandles();
  if (FLAG_gc_verbose) Print();
  if (FLAG_code_stats) ReportCodeStatistics("After GC");
650
  if (FLAG_check_handle_count) CheckHandleCount();
651
#endif
652
  if (FLAG_deopt_every_n_garbage_collections > 0) {
653 654 655
    // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
    // the topmost optimized frame can be deoptimized safely, because it
    // might not have a lazy bailout point right after its current PC.
656 657 658 659 660
    if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
      Deoptimizer::DeoptimizeAll(isolate());
      gcs_since_last_deopt_ = 0;
    }
  }
661

662 663
  UpdateMaximumCommitted();

664 665
  isolate_->counters()->alive_after_last_gc()->Set(
      static_cast<int>(SizeOfObjects()));
666

667 668
  isolate_->counters()->string_table_capacity()->Set(
      string_table()->Capacity());
669
  isolate_->counters()->number_of_symbols()->Set(
670
      string_table()->NumberOfElements());
671

672 673 674
  if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
    isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
        static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
675 676
                         (crankshaft_codegen_bytes_generated_ +
                          full_codegen_bytes_generated_)));
677 678
  }

679 680 681
  if (CommittedMemory() > 0) {
    isolate_->counters()->external_fragmentation_total()->AddSample(
        static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
682

683 684
    isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
        (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
685 686
    isolate_->counters()->heap_fraction_old_space()->AddSample(static_cast<int>(
        (old_space()->CommittedMemory() * 100.0) / CommittedMemory()));
687 688 689 690 691 692 693
    isolate_->counters()->heap_fraction_code_space()->AddSample(
        static_cast<int>((code_space()->CommittedMemory() * 100.0) /
                         CommittedMemory()));
    isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
        (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
    isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
        (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
694 695

    isolate_->counters()->heap_sample_total_committed()->AddSample(
696
        static_cast<int>(CommittedMemory() / KB));
697
    isolate_->counters()->heap_sample_total_used()->AddSample(
698
        static_cast<int>(SizeOfObjects() / KB));
699
    isolate_->counters()->heap_sample_map_space_committed()->AddSample(
700
        static_cast<int>(map_space()->CommittedMemory() / KB));
701 702
    isolate_->counters()->heap_sample_code_space_committed()->AddSample(
        static_cast<int>(code_space()->CommittedMemory() / KB));
703 704 705

    isolate_->counters()->heap_sample_maximum_committed()->AddSample(
        static_cast<int>(MaximumCommittedMemory() / KB));
706
  }
707

708 709 710 711 712 713
#define UPDATE_COUNTERS_FOR_SPACE(space)                \
  isolate_->counters()->space##_bytes_available()->Set( \
      static_cast<int>(space()->Available()));          \
  isolate_->counters()->space##_bytes_committed()->Set( \
      static_cast<int>(space()->CommittedMemory()));    \
  isolate_->counters()->space##_bytes_used()->Set(      \
714
      static_cast<int>(space()->SizeOfObjects()));
715 716 717 718 719 720 721 722 723
#define UPDATE_FRAGMENTATION_FOR_SPACE(space)                          \
  if (space()->CommittedMemory() > 0) {                                \
    isolate_->counters()->external_fragmentation_##space()->AddSample( \
        static_cast<int>(100 -                                         \
                         (space()->SizeOfObjects() * 100.0) /          \
                             space()->CommittedMemory()));             \
  }
#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
  UPDATE_COUNTERS_FOR_SPACE(space)                         \
724 725
  UPDATE_FRAGMENTATION_FOR_SPACE(space)

726
  UPDATE_COUNTERS_FOR_SPACE(new_space)
727
  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
728 729 730
  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
731
#undef UPDATE_COUNTERS_FOR_SPACE
732 733
#undef UPDATE_FRAGMENTATION_FOR_SPACE
#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
734

735
#ifdef DEBUG
736
  ReportStatisticsAfterGC();
737
#endif  // DEBUG
738 739 740 741

  // Remember the last top pointer so that we can later find out
  // whether we allocated in new space since the last GC.
  new_space_top_after_last_gc_ = new_space()->top();
742
  last_gc_time_ = MonotonicallyIncreasingTimeInMs();
743

744
  ReduceNewSpaceSize();
745 746 747
}


748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
void Heap::PreprocessStackTraces() {
  if (!weak_stack_trace_list()->IsWeakFixedArray()) return;
  WeakFixedArray* array = WeakFixedArray::cast(weak_stack_trace_list());
  int length = array->Length();
  for (int i = 0; i < length; i++) {
    if (array->IsEmptySlot(i)) continue;
    FixedArray* elements = FixedArray::cast(array->Get(i));
    for (int j = 1; j < elements->length(); j += 4) {
      Object* maybe_code = elements->get(j + 2);
      // If GC happens while adding a stack trace to the weak fixed array,
      // which has been copied into a larger backing store, we may run into
      // a stack trace that has already been preprocessed. Guard against this.
      if (!maybe_code->IsCode()) break;
      Code* code = Code::cast(maybe_code);
      int offset = Smi::cast(elements->get(j + 3))->value();
      Address pc = code->address() + offset;
      int pos = code->SourcePosition(pc);
      elements->set(j + 2, Smi::FromInt(pos));
    }
  }
  // We must not compact the weak fixed list here, as we may be in the middle
  // of writing to it, when the GC triggered. Instead, we reset the root value.
  set_weak_stack_trace_list(Smi::FromInt(0));
}


774 775 776 777 778 779 780
void Heap::HandleGCRequest() {
  if (incremental_marking()->request_type() ==
      IncrementalMarking::COMPLETE_MARKING) {
    CollectAllGarbage(Heap::kNoGCFlags, "GC interrupt");
    return;
  }
  DCHECK(FLAG_overapproximate_weak_closure);
781 782 783
  if (!incremental_marking()->weak_closure_was_overapproximated()) {
    OverApproximateWeakClosure("GC interrupt");
  }
784 785 786 787 788 789 790 791
}


void Heap::OverApproximateWeakClosure(const char* gc_reason) {
  if (FLAG_trace_incremental_marking) {
    PrintF("[IncrementalMarking] Overapproximate weak closure (%s).\n",
           gc_reason);
  }
792 793 794 795

  GCTracer::Scope gc_scope(tracer(),
                           GCTracer::Scope::MC_INCREMENTAL_WEAKCLOSURE);

796 797 798 799 800 801 802 803 804 805
  {
    GCCallbacksScope scope(this);
    if (scope.CheckReenter()) {
      AllowHeapAllocation allow_allocation;
      GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
      VMState<EXTERNAL> state(isolate_);
      HandleScope handle_scope(isolate_);
      CallGCPrologueCallbacks(kGCTypeMarkSweepCompact, kNoGCCallbackFlags);
    }
  }
806
  incremental_marking()->MarkObjectGroups();
807 808 809 810 811 812 813 814 815 816 817 818 819
  {
    GCCallbacksScope scope(this);
    if (scope.CheckReenter()) {
      AllowHeapAllocation allow_allocation;
      GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
      VMState<EXTERNAL> state(isolate_);
      HandleScope handle_scope(isolate_);
      CallGCEpilogueCallbacks(kGCTypeMarkSweepCompact, kNoGCCallbackFlags);
    }
  }
}


820
void Heap::CollectAllGarbage(int flags, const char* gc_reason,
821
                             const v8::GCCallbackFlags gc_callback_flags) {
822 823 824
  // Since we are ignoring the return value, the exact choice of space does
  // not matter, so long as we do not specify NEW_SPACE, which would not
  // cause a full GC.
825
  mark_compact_collector_.SetFlags(flags);
826
  CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
827
  mark_compact_collector_.SetFlags(kNoGCFlags);
828 829 830
}


831
void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
832 833 834 835 836 837 838 839 840 841 842
  // Since we are ignoring the return value, the exact choice of space does
  // not matter, so long as we do not specify NEW_SPACE, which would not
  // cause a full GC.
  // Major GC would invoke weak handle callbacks on weakly reachable
  // handles, but won't collect weakly reachable objects until next
  // major GC.  Therefore if we collect aggressively and weak handle callback
  // has been invoked, we rerun major GC to release objects which become
  // garbage.
  // Note: as weak callbacks can execute arbitrary code, we cannot
  // hope that eventually there will be no weak callbacks invocations.
  // Therefore stop recollecting after several attempts.
843
  if (isolate()->concurrent_recompilation_enabled()) {
844 845
    // The optimizing compiler may be unnecessarily holding on to memory.
    DisallowHeapAllocation no_recursive_gc;
846
    isolate()->optimizing_compile_dispatcher()->Flush();
847
  }
848
  isolate()->ClearSerializerData();
849 850
  mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
                                     kReduceMemoryFootprintMask);
851
  isolate_->compilation_cache()->Clear();
852
  const int kMaxNumberOfAttempts = 7;
853
  const int kMinNumberOfAttempts = 2;
854
  for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
855 856
    if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL,
                        v8::kGCCallbackFlagForced) &&
857
        attempt + 1 >= kMinNumberOfAttempts) {
858 859 860
      break;
    }
  }
861
  mark_compact_collector()->SetFlags(kNoGCFlags);
862
  new_space_.Shrink();
863
  UncommitFromSpace();
864 865 866
}


867 868 869 870 871 872 873
void Heap::EnsureFillerObjectAtTop() {
  // There may be an allocation memento behind every object in new space.
  // If we evacuate a not full new space or if we are on the last page of
  // the new space, then there may be uninitialized memory behind the top
  // pointer of the new space page. We store a filler object there to
  // identify the unused space.
  Address from_top = new_space_.top();
874 875 876 877 878 879 880 881
  // Check that from_top is inside its page (i.e., not at the end).
  Address space_end = new_space_.ToSpaceEnd();
  if (from_top < space_end) {
    Page* page = Page::FromAddress(from_top);
    if (page->Contains(from_top)) {
      int remaining_in_page = static_cast<int>(page->area_end() - from_top);
      CreateFillerObjectAt(from_top, remaining_in_page);
    }
882 883 884 885
  }
}


886
bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
887 888
                          const char* collector_reason,
                          const v8::GCCallbackFlags gc_callback_flags) {
889
  // The VM is in the GC state until exiting this function.
890
  VMState<GC> state(isolate_);
891 892 893 894 895 896 897 898 899 900

#ifdef DEBUG
  // Reset the allocation timeout to the GC interval, but make sure to
  // allow at least a few allocations after a collection. The reason
  // for this is that we have a lot of allocation sequences and we
  // assume that a garbage collection will allow the subsequent
  // allocation attempts to go through.
  allocation_timeout_ = Max(6, FLAG_gc_interval);
#endif

901
  EnsureFillerObjectAtTop();
902

903 904 905 906 907 908 909
  if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
    if (FLAG_trace_incremental_marking) {
      PrintF("[IncrementalMarking] Scavenge during marking.\n");
    }
  }

  if (collector == MARK_COMPACTOR &&
910
      !mark_compact_collector()->finalize_incremental_marking() &&
911
      !mark_compact_collector()->abort_incremental_marking() &&
912 913 914
      !incremental_marking()->IsStopped() &&
      !incremental_marking()->should_hurry() &&
      FLAG_incremental_marking_steps) {
915 916 917 918
    // Make progress in incremental marking.
    const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
    incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
                                IncrementalMarking::NO_GC_VIA_STACK_GUARD);
919 920
    if (!incremental_marking()->IsComplete() &&
        !mark_compact_collector_.marking_deque_.IsEmpty() && !FLAG_gc_global) {
921 922 923 924 925
      if (FLAG_trace_incremental_marking) {
        PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
      }
      collector = SCAVENGER;
      collector_reason = "incremental marking delaying mark-sweep";
926 927 928
    }
  }

929
  bool next_gc_likely_to_collect_more = false;
930 931 932 933 934
  intptr_t committed_memory_before = 0;

  if (collector == MARK_COMPACTOR) {
    committed_memory_before = CommittedOldGenerationMemory();
  }
935

936
  {
937
    tracer()->Start(collector, gc_reason, collector_reason);
938
    DCHECK(AllowHeapAllocation::IsAllowed());
939
    DisallowHeapAllocation no_allocation_during_gc;
940
    GarbageCollectionPrologue();
941

942 943 944 945 946
    {
      HistogramTimerScope histogram_timer_scope(
          (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
                                   : isolate_->counters()->gc_compactor());
      next_gc_likely_to_collect_more =
947
          PerformGarbageCollection(collector, gc_callback_flags);
948
    }
949 950

    GarbageCollectionEpilogue();
951 952 953
    if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
      isolate()->CheckDetachedContextsAfterGC();
    }
954 955

    if (collector == MARK_COMPACTOR) {
956 957 958 959 960 961 962 963 964 965 966 967 968 969
      intptr_t committed_memory_after = CommittedOldGenerationMemory();
      intptr_t used_memory_after = PromotedSpaceSizeOfObjects();
      MemoryReducer::Event event;
      event.type = MemoryReducer::kMarkCompact;
      event.time_ms = MonotonicallyIncreasingTimeInMs();
      // Trigger one more GC if
      // - this GC decreased committed memory,
      // - there is high fragmentation,
      // - there are live detached contexts.
      event.next_gc_likely_to_collect_more =
          (committed_memory_before - committed_memory_after) > MB ||
          HasHighFragmentation(used_memory_after, committed_memory_after) ||
          (detached_contexts()->length() > 0);
      memory_reducer_.NotifyMarkCompact(event);
970 971
    }

hpayer's avatar
hpayer committed
972
    tracer()->Stop(collector);
973 974
  }

975 976 977 978 979
  if (collector == MARK_COMPACTOR &&
      (gc_callback_flags & kGCCallbackFlagForced) != 0) {
    isolate()->CountUsage(v8::Isolate::kForcedGC);
  }

980 981 982
  // Start incremental marking for the next cycle. The heap snapshot
  // generator needs incremental marking to stay off after it aborted.
  if (!mark_compact_collector()->abort_incremental_marking() &&
983 984
      incremental_marking()->IsStopped() &&
      incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
985
    incremental_marking()->Start(kNoGCFlags);
986 987
  }

988
  return next_gc_likely_to_collect_more;
989 990 991
}


992
int Heap::NotifyContextDisposed(bool dependant_context) {
993 994 995 996
  if (!dependant_context) {
    tracer()->ResetSurvivalEvents();
    old_generation_size_configured_ = false;
  }
997
  if (isolate()->concurrent_recompilation_enabled()) {
998
    // Flush the queued recompilation tasks.
999
    isolate()->optimizing_compile_dispatcher()->Flush();
1000
  }
1001
  AgeInlineCaches();
1002
  set_retained_maps(ArrayList::cast(empty_fixed_array()));
1003
  tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis());
1004 1005 1006 1007
  MemoryReducer::Event event;
  event.type = MemoryReducer::kContextDisposed;
  event.time_ms = MonotonicallyIncreasingTimeInMs();
  memory_reducer_.NotifyContextDisposed(event);
1008 1009 1010 1011
  return ++contexts_disposed_;
}


1012 1013 1014 1015 1016 1017
void Heap::StartIdleIncrementalMarking() {
  gc_idle_time_handler_.ResetNoProgressCounter();
  incremental_marking()->Start(kReduceMemoryFootprintMask);
}


1018
void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
1019 1020 1021
                        int len) {
  if (len == 0) return;

1022
  DCHECK(array->map() != fixed_cow_array_map());
1023
  Object** dst_objects = array->data_start() + dst_index;
1024
  MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
  if (!InNewSpace(array)) {
    for (int i = 0; i < len; i++) {
      // TODO(hpayer): check store buffer for entries
      if (InNewSpace(dst_objects[i])) {
        RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
      }
    }
  }
  incremental_marking()->RecordWrites(array);
}


1037
#ifdef VERIFY_HEAP
1038 1039
// Helper class for verifying the string table.
class StringTableVerifier : public ObjectVisitor {
1040 1041 1042 1043 1044
 public:
  void VisitPointers(Object** start, Object** end) {
    // Visit all HeapObject pointers in [start, end).
    for (Object** p = start; p < end; p++) {
      if ((*p)->IsHeapObject()) {
1045 1046 1047
        // Check that the string is actually internalized.
        CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
              (*p)->IsInternalizedString());
1048 1049
      }
    }
1050 1051
  }
};
1052

1053

1054
static void VerifyStringTable(Heap* heap) {
1055
  StringTableVerifier verifier;
1056
  heap->string_table()->IterateElements(&verifier);
1057
}
1058
#endif  // VERIFY_HEAP
1059 1060


1061
bool Heap::ReserveSpace(Reservation* reservations) {
1062
  bool gc_performed = true;
1063 1064 1065
  int counter = 0;
  static const int kThreshold = 20;
  while (gc_performed && counter++ < kThreshold) {
1066
    gc_performed = false;
1067
    for (int space = NEW_SPACE; space < Serializer::kNumberOfSpaces; space++) {
1068 1069 1070
      Reservation* reservation = &reservations[space];
      DCHECK_LE(1, reservation->length());
      if (reservation->at(0).size == 0) continue;
1071 1072
      bool perform_gc = false;
      if (space == LO_SPACE) {
1073 1074
        DCHECK_EQ(1, reservation->length());
        perform_gc = !lo_space()->CanAllocateSize(reservation->at(0).size);
1075
      } else {
1076 1077 1078
        for (auto& chunk : *reservation) {
          AllocationResult allocation;
          int size = chunk.size;
1079 1080
          DCHECK_LE(size, MemoryAllocator::PageAreaSize(
                              static_cast<AllocationSpace>(space)));
1081
          if (space == NEW_SPACE) {
1082
            allocation = new_space()->AllocateRawUnaligned(size);
1083
          } else {
1084
            allocation = paged_space(space)->AllocateRawUnaligned(size);
1085
          }
1086 1087
          HeapObject* free_space;
          if (allocation.To(&free_space)) {
1088 1089
            // Mark with a free list node, in case we have a GC before
            // deserializing.
1090 1091
            Address free_space_address = free_space->address();
            CreateFillerObjectAt(free_space_address, size);
1092
            DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
1093 1094
            chunk.start = free_space_address;
            chunk.end = free_space_address + size;
1095 1096 1097 1098
          } else {
            perform_gc = true;
            break;
          }
1099 1100 1101 1102
        }
      }
      if (perform_gc) {
        if (space == NEW_SPACE) {
1103
          CollectGarbage(NEW_SPACE, "failed to reserve space in the new space");
1104
        } else {
1105 1106
          if (counter > 1) {
            CollectAllGarbage(
1107
                kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
1108 1109 1110 1111 1112 1113 1114
                "failed to reserve space in paged or large "
                "object space, trying to reduce memory footprint");
          } else {
            CollectAllGarbage(
                kAbortIncrementalMarkingMask,
                "failed to reserve space in paged or large object space");
          }
1115
        }
1116 1117
        gc_performed = true;
        break;  // Abort for-loop over spaces and retry.
1118
      }
1119 1120
    }
  }
1121

1122
  return !gc_performed;
1123 1124 1125
}


1126 1127 1128 1129 1130 1131 1132 1133 1134
void Heap::EnsureFromSpaceIsCommitted() {
  if (new_space_.CommitFromSpaceIfNeeded()) return;

  // Committing memory to from space failed.
  // Memory is exhausted and we will die.
  V8::FatalProcessOutOfMemory("Committing semi space failed.");
}


1135
void Heap::ClearJSFunctionResultCaches() {
1136
  if (isolate_->bootstrapper()->IsActive()) return;
1137

1138
  Object* context = native_contexts_list();
1139
  while (!context->IsUndefined()) {
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
    // Get the caches for this context. GC can happen when the context
    // is not fully initialized, so the caches can be undefined.
    Object* caches_or_undefined =
        Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
    if (!caches_or_undefined->IsUndefined()) {
      FixedArray* caches = FixedArray::cast(caches_or_undefined);
      // Clear the caches:
      int length = caches->length();
      for (int i = 0; i < length; i++) {
        JSFunctionResultCache::cast(caches->get(i))->Clear();
      }
1151
    }
1152 1153
    // Get the next context:
    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1154 1155 1156 1157
  }
}


1158
void Heap::ClearNormalizedMapCaches() {
1159 1160 1161 1162
  if (isolate_->bootstrapper()->IsActive() &&
      !incremental_marking()->IsMarking()) {
    return;
  }
1163

1164
  Object* context = native_contexts_list();
1165
  while (!context->IsUndefined()) {
1166 1167 1168 1169 1170 1171 1172
    // GC can happen when the context is not fully initialized,
    // so the cache can be undefined.
    Object* cache =
        Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
    if (!cache->IsUndefined()) {
      NormalizedMapCache::cast(cache)->Clear();
    }
1173 1174
    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
  }
1175 1176 1177
}


1178
void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1179 1180
  if (start_new_space_size == 0) return;

1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
  promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
                      static_cast<double>(start_new_space_size) * 100);

  if (previous_semi_space_copied_object_size_ > 0) {
    promotion_rate_ =
        (static_cast<double>(promoted_objects_size_) /
         static_cast<double>(previous_semi_space_copied_object_size_) * 100);
  } else {
    promotion_rate_ = 0;
  }
1191 1192

  semi_space_copied_rate_ =
1193 1194
      (static_cast<double>(semi_space_copied_object_size_) /
       static_cast<double>(start_new_space_size) * 100);
1195

1196
  double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
1197
  tracer()->AddSurvivalRatio(survival_rate);
1198
  if (survival_rate > kYoungSurvivalRateHighThreshold) {
1199 1200 1201 1202
    high_survival_rate_period_length_++;
  } else {
    high_survival_rate_period_length_ = 0;
  }
1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220

  if (survival_rate < kYoungSurvivalRateLowThreshold) {
    low_survival_rate_period_length_++;
  } else {
    low_survival_rate_period_length_ = 0;
  }

  double survival_rate_diff = survival_rate_ - survival_rate;

  if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
    set_survival_rate_trend(DECREASING);
  } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
    set_survival_rate_trend(INCREASING);
  } else {
    set_survival_rate_trend(STABLE);
  }

  survival_rate_ = survival_rate;
1221
}
1222

1223
bool Heap::PerformGarbageCollection(
1224
    GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1225
  int freed_global_handles = 0;
1226

1227
  if (collector != SCAVENGER) {
1228
    PROFILE(isolate_, CodeMovingGCEvent());
1229 1230
  }

1231
#ifdef VERIFY_HEAP
1232
  if (FLAG_verify_heap) {
1233
    VerifyStringTable(this);
1234
  }
1235 1236
#endif

1237 1238 1239
  GCType gc_type =
      collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;

1240 1241
  {
    GCCallbacksScope scope(this);
1242 1243
    if (scope.CheckReenter()) {
      AllowHeapAllocation allow_allocation;
1244
      GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1245 1246 1247 1248
      VMState<EXTERNAL> state(isolate_);
      HandleScope handle_scope(isolate_);
      CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
    }
1249 1250
  }

1251
  EnsureFromSpaceIsCommitted();
1252

1253
  int start_new_space_size = Heap::new_space()->SizeAsInt();
1254

1255 1256 1257 1258 1259 1260 1261
  if (IsHighSurvivalRate()) {
    // We speed up the incremental marker if it is running so that it
    // does not fall behind the rate of promotion, which would cause a
    // constantly growing old space.
    incremental_marking()->NotifyOfHighPromotionRate();
  }

1262
  if (collector == MARK_COMPACTOR) {
1263
    UpdateOldGenerationAllocationCounter();
1264
    // Perform mark-sweep with optional compaction.
1265
    MarkCompact();
1266
    sweep_generation_++;
1267
    old_gen_exhausted_ = false;
1268
    old_generation_size_configured_ = true;
1269
    // This should be updated before PostGarbageCollectionProcessing, which can
1270 1271 1272
    // cause another GC. Take into account the objects promoted during GC.
    old_generation_allocation_counter_ +=
        static_cast<size_t>(promoted_objects_size_);
1273
    old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
1274 1275
  } else {
    Scavenge();
1276
  }
1277

1278
  bool deopted = ProcessPretenuringFeedback();
1279
  UpdateSurvivalStatistics(start_new_space_size);
1280 1281 1282

  // When pretenuring is collecting new feedback, we do not shrink the new space
  // right away.
1283 1284 1285
  if (deopted) {
    RecordDeoptForPretenuring();
  } else {
1286 1287
    ConfigureNewGenerationSize();
  }
1288
  ConfigureInitialOldGenerationSize();
1289

1290
  isolate_->counters()->objs_since_last_young()->Set(0);
1291

1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
  if (collector != SCAVENGER) {
    // Callbacks that fire after this point might trigger nested GCs and
    // restart incremental marking, the assertion can't be moved down.
    DCHECK(incremental_marking()->IsStopped());

    // We finished a marking cycle. We can uncommit the marking deque until
    // we start marking again.
    mark_compact_collector_.marking_deque()->Uninitialize();
    mark_compact_collector_.EnsureMarkingDequeIsCommitted(
        MarkCompactCollector::kMinMarkingDequeSize);
  }
1303

1304
  gc_post_processing_depth_++;
1305 1306
  {
    AllowHeapAllocation allow_allocation;
1307
    GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1308
    freed_global_handles =
1309 1310
        isolate_->global_handles()->PostGarbageCollectionProcessing(
            collector, gc_callback_flags);
1311
  }
1312
  gc_post_processing_depth_--;
1313

1314 1315
  isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);

1316
  // Update relocatables.
1317
  Relocatable::PostGarbageCollectionProcessing(isolate_);
1318

1319 1320 1321 1322 1323
  double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
  double mutator_speed = static_cast<double>(
      tracer()
          ->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond());
  intptr_t old_gen_size = PromotedSpaceSizeOfObjects();
1324 1325 1326 1327
  if (collector == MARK_COMPACTOR) {
    // Register the amount of external allocated memory.
    amount_of_external_allocated_memory_at_last_global_gc_ =
        amount_of_external_allocated_memory_;
1328
    SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1329 1330
  } else if (HasLowYoungGenerationAllocationRate() &&
             old_generation_size_configured_) {
1331
    DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1332 1333
  }

1334 1335
  {
    GCCallbacksScope scope(this);
1336 1337
    if (scope.CheckReenter()) {
      AllowHeapAllocation allow_allocation;
1338
      GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1339 1340 1341 1342
      VMState<EXTERNAL> state(isolate_);
      HandleScope handle_scope(isolate_);
      CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
    }
1343
  }
1344 1345

#ifdef VERIFY_HEAP
1346
  if (FLAG_verify_heap) {
1347
    VerifyStringTable(this);
1348
  }
1349
#endif
1350

1351
  return freed_global_handles > 0;
1352 1353 1354
}


1355
void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1356 1357
  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
    if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1358 1359 1360 1361 1362 1363 1364 1365 1366
      if (!gc_prologue_callbacks_[i].pass_isolate_) {
        v8::GCPrologueCallback callback =
            reinterpret_cast<v8::GCPrologueCallback>(
                gc_prologue_callbacks_[i].callback);
        callback(gc_type, flags);
      } else {
        v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
        gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
      }
1367 1368 1369 1370 1371
    }
  }
}


1372 1373
void Heap::CallGCEpilogueCallbacks(GCType gc_type,
                                   GCCallbackFlags gc_callback_flags) {
1374 1375
  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
    if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1376 1377 1378 1379
      if (!gc_epilogue_callbacks_[i].pass_isolate_) {
        v8::GCPrologueCallback callback =
            reinterpret_cast<v8::GCPrologueCallback>(
                gc_epilogue_callbacks_[i].callback);
1380
        callback(gc_type, gc_callback_flags);
1381 1382
      } else {
        v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1383
        gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
1384
      }
1385 1386 1387 1388 1389
    }
  }
}


1390
void Heap::MarkCompact() {
1391
  gc_state_ = MARK_COMPACT;
1392
  LOG(isolate_, ResourceEvent("markcompact", "begin"));
1393

1394 1395
  uint64_t size_of_objects_before_gc = SizeOfObjects();

1396
  mark_compact_collector_.Prepare();
1397

1398
  ms_count_++;
1399

1400
  MarkCompactPrologue();
1401

1402
  mark_compact_collector_.CollectGarbage();
1403

1404
  LOG(isolate_, ResourceEvent("markcompact", "end"));
1405

1406 1407 1408 1409 1410 1411 1412 1413 1414
  MarkCompactEpilogue();

  if (FLAG_allocation_site_pretenuring) {
    EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
  }
}


void Heap::MarkCompactEpilogue() {
1415 1416
  gc_state_ = NOT_IN_GC;

1417
  isolate_->counters()->objs_since_last_full()->Set(0);
1418

1419
  incremental_marking()->Epilogue();
1420 1421

  PreprocessStackTraces();
1422 1423 1424
}


1425
void Heap::MarkCompactPrologue() {
1426 1427
  // At any old GC clear the keyed lookup cache to enable collection of unused
  // maps.
1428 1429 1430
  isolate_->keyed_lookup_cache()->Clear();
  isolate_->context_slot_cache()->Clear();
  isolate_->descriptor_lookup_cache()->Clear();
1431 1432
  RegExpResultsCache::Clear(string_split_cache());
  RegExpResultsCache::Clear(regexp_multiple_cache());
1433

1434
  isolate_->compilation_cache()->MarkCompactPrologue();
1435

1436 1437
  CompletelyClearInstanceofCache();

1438
  FlushNumberStringCache();
1439 1440 1441
  if (FLAG_cleanup_code_caches_at_gc) {
    polymorphic_code_cache()->set_cache(undefined_value());
  }
1442 1443

  ClearNormalizedMapCaches();
1444 1445 1446 1447
}


// Helper class for copying HeapObjects
1448
class ScavengeVisitor : public ObjectVisitor {
1449
 public:
1450
  explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1451

1452
  void VisitPointer(Object** p) { ScavengePointer(p); }
1453 1454 1455

  void VisitPointers(Object** start, Object** end) {
    // Copy all HeapObject pointers in [start, end)
1456
    for (Object** p = start; p < end; p++) ScavengePointer(p);
1457 1458 1459
  }

 private:
1460 1461
  void ScavengePointer(Object** p) {
    Object* object = *p;
1462
    if (!heap_->InNewSpace(object)) return;
1463 1464
    Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
                         reinterpret_cast<HeapObject*>(object));
1465 1466
  }

1467
  Heap* heap_;
1468 1469 1470
};


1471
#ifdef VERIFY_HEAP
1472
// Visitor class to verify pointers in code or data space do not point into
1473
// new space.
1474
class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
1475
 public:
1476
  explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1477
  void VisitPointers(Object** start, Object** end) {
1478 1479
    for (Object** current = start; current < end; current++) {
      if ((*current)->IsHeapObject()) {
1480
        CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1481 1482 1483
      }
    }
  }
1484 1485 1486

 private:
  Heap* heap_;
1487
};
1488

1489

1490
static void VerifyNonPointerSpacePointers(Heap* heap) {
1491 1492
  // Verify that there are no pointers to new space in spaces where we
  // do not expect them.
1493 1494
  VerifyNonPointerSpacePointersVisitor v(heap);
  HeapObjectIterator code_it(heap->code_space());
1495 1496
  for (HeapObject* object = code_it.Next(); object != NULL;
       object = code_it.Next())
1497
    object->Iterate(&v);
1498
}
1499
#endif  // VERIFY_HEAP
1500

1501

1502
void Heap::CheckNewSpaceExpansionCriteria() {
1503 1504 1505 1506 1507 1508 1509 1510 1511
  if (FLAG_experimental_new_space_growth_heuristic) {
    if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
        survived_last_scavenge_ * 100 / new_space_.TotalCapacity() >= 10) {
      // Grow the size of new space if there is room to grow, and more than 10%
      // have survived the last scavenge.
      new_space_.Grow();
      survived_since_last_expansion_ = 0;
    }
  } else if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
1512 1513
             survived_since_last_expansion_ > new_space_.TotalCapacity() &&
             !new_space_high_promotion_mode_active_) {
1514 1515
    // Grow the size of new space if there is room to grow, and enough data
    // has survived scavenge since the last expansion.
1516 1517 1518 1519 1520 1521
    new_space_.Grow();
    survived_since_last_expansion_ = 0;
  }
}


1522 1523
static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
  return heap->InNewSpace(*p) &&
1524
         !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1525 1526 1527
}


1528 1529
void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
                                       StoreBufferEvent event) {
1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
  heap->store_buffer_rebuilder_.Callback(page, event);
}


void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
  if (event == kStoreBufferStartScanningPagesEvent) {
    start_of_current_page_ = NULL;
    current_page_ = NULL;
  } else if (event == kStoreBufferScanningPageEvent) {
    if (current_page_ != NULL) {
      // If this page already overflowed the store buffer during this iteration.
      if (current_page_->scan_on_scavenge()) {
        // Then we should wipe out the entries that have been added for it.
        store_buffer_->SetTop(start_of_current_page_);
      } else if (store_buffer_->Top() - start_of_current_page_ >=
                 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
        // Did we find too many pointers in the previous page?  The heuristic is
        // that no page can take more then 1/5 the remaining slots in the store
        // buffer.
        current_page_->set_scan_on_scavenge(true);
        store_buffer_->SetTop(start_of_current_page_);
      } else {
        // In this case the page we scanned took a reasonable number of slots in
        // the store buffer.  It has now been rehabilitated and is no longer
        // marked scan_on_scavenge.
1555
        DCHECK(!current_page_->scan_on_scavenge());
1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567
      }
    }
    start_of_current_page_ = store_buffer_->Top();
    current_page_ = page;
  } else if (event == kStoreBufferFullEvent) {
    // The current page overflowed the store buffer again.  Wipe out its entries
    // in the store buffer and mark it scan-on-scavenge again.  This may happen
    // several times while scanning.
    if (current_page_ == NULL) {
      // Store Buffer overflowed while scanning promoted objects.  These are not
      // in any particular page, though they are likely to be clustered by the
      // allocation routines.
1568
      store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1569 1570 1571
    } else {
      // Store Buffer overflowed while scanning a particular old space page for
      // pointers to new space.
1572 1573
      DCHECK(current_page_ == page);
      DCHECK(page != NULL);
1574
      current_page_->set_scan_on_scavenge(true);
1575
      DCHECK(start_of_current_page_ != store_buffer_->Top());
1576 1577 1578 1579 1580 1581 1582 1583
      store_buffer_->SetTop(start_of_current_page_);
    }
  } else {
    UNREACHABLE();
  }
}


1584
void PromotionQueue::Initialize() {
1585 1586
  // The last to-space page may be used for promotion queue. On promotion
  // conflict, we use the emergency stack.
1587 1588
  DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
         0);
1589 1590
  front_ = rear_ =
      reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1591 1592
  limit_ = reinterpret_cast<intptr_t*>(
      Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start());
1593 1594 1595 1596 1597
  emergency_stack_ = NULL;
}


void PromotionQueue::RelocateQueueHead() {
1598
  DCHECK(emergency_stack_ == NULL);
1599 1600 1601

  Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
  intptr_t* head_start = rear_;
1602
  intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1603

1604 1605
  int entries_count =
      static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1606 1607 1608 1609

  emergency_stack_ = new List<Entry>(2 * entries_count);

  while (head_start != head_end) {
1610
    int size = static_cast<int>(*(head_start++));
1611
    HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1612 1613 1614 1615
    // New space allocation in SemiSpaceCopyObject marked the region
    // overlapping with promotion queue as uninitialized.
    MSAN_MEMORY_IS_INITIALIZED(&size, sizeof(size));
    MSAN_MEMORY_IS_INITIALIZED(&obj, sizeof(obj));
1616 1617 1618 1619 1620 1621
    emergency_stack_->Add(Entry(obj, size));
  }
  rear_ = head_end;
}


1622 1623
class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
 public:
1624
  explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642

  virtual Object* RetainAs(Object* object) {
    if (!heap_->InFromSpace(object)) {
      return object;
    }

    MapWord map_word = HeapObject::cast(object)->map_word();
    if (map_word.IsForwardingAddress()) {
      return map_word.ToForwardingAddress();
    }
    return NULL;
  }

 private:
  Heap* heap_;
};


1643
void Heap::Scavenge() {
1644
  RelocationLock relocation_lock(this);
1645 1646 1647 1648
  // There are soft limits in the allocation code, designed to trigger a mark
  // sweep collection by failing allocations. There is no sense in trying to
  // trigger one during scavenge: scavenges allocation should always succeed.
  AlwaysAllocateScope scope(isolate());
1649

1650
#ifdef VERIFY_HEAP
1651
  if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1652 1653 1654 1655 1656
#endif

  gc_state_ = SCAVENGE;

  // Implements Cheney's copying algorithm
1657
  LOG(isolate_, ResourceEvent("scavenge", "begin"));
1658

1659
  // Clear descriptor cache.
1660
  isolate_->descriptor_lookup_cache()->Clear();
1661

1662
  // Used for updating survived_since_last_expansion_ at function end.
1663
  intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1664

1665 1666
  SelectScavengingVisitorsTable();

1667 1668
  PrepareArrayBufferDiscoveryInNewSpace();

1669 1670
  // Flip the semispaces.  After flipping, to space is empty, from space has
  // live objects.
1671 1672
  new_space_.Flip();
  new_space_.ResetAllocationInfo();
1673

1674 1675 1676 1677 1678
  // We need to sweep newly copied objects which can be either in the
  // to space or promoted to the old generation.  For to-space
  // objects, we treat the bottom of the to space as a queue.  Newly
  // copied and unswept objects lie between a 'front' mark and the
  // allocation pointer.
1679
  //
1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690
  // Promoted objects can go into various old-generation spaces, and
  // can be allocated internally in the spaces (from the free list).
  // We treat the top of the to space as a queue of addresses of
  // promoted objects.  The addresses of newly promoted and unswept
  // objects lie between a 'front' mark and a 'rear' mark that is
  // updated as a side effect of promoting an object.
  //
  // There is guaranteed to be enough room at the top of the to space
  // for the addresses of promoted objects: every object promoted
  // frees up its size in bytes from the top of the new space, and
  // objects are at least one pointer in size.
1691
  Address new_space_front = new_space_.ToSpaceStart();
1692
  promotion_queue_.Initialize();
1693

1694
  ScavengeVisitor scavenge_visitor(this);
1695
  // Copy roots.
1696
  IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1697

1698 1699
  // Copy objects reachable from the old generation.
  {
1700
    StoreBufferRebuildScope scope(this, store_buffer(),
1701 1702 1703
                                  &ScavengeStoreBufferCallback);
    store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
  }
1704

1705 1706
  // Copy objects reachable from the encountered weak collections list.
  scavenge_visitor.VisitPointer(&encountered_weak_collections_);
ulan@chromium.org's avatar
ulan@chromium.org committed
1707 1708
  // Copy objects reachable from the encountered weak cells.
  scavenge_visitor.VisitPointer(&encountered_weak_cells_);
1709

1710 1711 1712 1713 1714 1715
  // Copy objects reachable from the code flushing candidates list.
  MarkCompactCollector* collector = mark_compact_collector();
  if (collector->is_code_flushing_enabled()) {
    collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
  }

1716
  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1717

1718 1719
  while (isolate()->global_handles()->IterateObjectGroups(
      &scavenge_visitor, &IsUnscavengedHeapObject)) {
1720 1721 1722
    new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
  }
  isolate()->global_handles()->RemoveObjectGroups();
1723
  isolate()->global_handles()->RemoveImplicitRefGroups();
1724

1725
  isolate()->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1726
      &IsUnscavengedHeapObject);
1727 1728

  isolate()->global_handles()->IterateNewSpaceWeakIndependentRoots(
1729
      &scavenge_visitor);
1730 1731
  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);

1732 1733 1734
  UpdateNewSpaceReferencesInExternalStringTable(
      &UpdateNewSpaceReferenceInExternalStringTableEntry);

1735 1736
  promotion_queue_.Destroy();

1737
  incremental_marking()->UpdateMarkingDequeAfterScavenge();
1738

1739
  ScavengeWeakObjectRetainer weak_object_retainer(this);
1740
  ProcessYoungWeakReferences(&weak_object_retainer);
1741

1742
  DCHECK(new_space_front == new_space_.top());
1743 1744 1745 1746

  // Set age mark.
  new_space_.set_age_mark(new_space_.top());

1747 1748 1749
  new_space_.LowerInlineAllocationLimit(
      new_space_.inline_allocation_limit_step());

1750 1751
  FreeDeadArrayBuffers(true);

1752
  // Update how much has survived scavenge.
1753
  IncrementYoungSurvivorsCounter(static_cast<int>(
1754
      (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1755

1756
  LOG(isolate_, ResourceEvent("scavenge", "end"));
1757 1758 1759 1760 1761

  gc_state_ = NOT_IN_GC;
}


1762 1763
String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
                                                                Object** p) {
1764 1765 1766 1767
  MapWord first_word = HeapObject::cast(*p)->map_word();

  if (!first_word.IsForwardingAddress()) {
    // Unreachable external string can be finalized.
1768
    heap->FinalizeExternalString(String::cast(*p));
1769 1770 1771 1772 1773 1774 1775 1776 1777 1778
    return NULL;
  }

  // String is still reachable.
  return String::cast(first_word.ToForwardingAddress());
}


void Heap::UpdateNewSpaceReferencesInExternalStringTable(
    ExternalStringTableUpdaterCallback updater_func) {
1779
#ifdef VERIFY_HEAP
1780 1781 1782
  if (FLAG_verify_heap) {
    external_string_table_.Verify();
  }
1783
#endif
1784

1785
  if (external_string_table_.new_space_strings_.is_empty()) return;
1786

1787 1788 1789
  Object** start = &external_string_table_.new_space_strings_[0];
  Object** end = start + external_string_table_.new_space_strings_.length();
  Object** last = start;
1790

1791
  for (Object** p = start; p < end; ++p) {
1792
    DCHECK(InFromSpace(*p));
1793
    String* target = updater_func(this, p);
1794

1795
    if (target == NULL) continue;
1796

1797
    DCHECK(target->IsExternalString());
1798

1799
    if (InNewSpace(target)) {
1800 1801 1802 1803 1804
      // String is still in new space.  Update the table entry.
      *last = target;
      ++last;
    } else {
      // String got promoted.  Move it to the old string list.
1805
      external_string_table_.AddOldString(target);
1806 1807 1808
    }
  }

1809
  DCHECK(last <= end);
1810
  external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1811 1812 1813
}


1814 1815 1816 1817
void Heap::UpdateReferencesInExternalStringTable(
    ExternalStringTableUpdaterCallback updater_func) {
  // Update old space string references.
  if (external_string_table_.old_space_strings_.length() > 0) {
1818 1819 1820
    Object** start = &external_string_table_.old_space_strings_[0];
    Object** end = start + external_string_table_.old_space_strings_.length();
    for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1821 1822 1823 1824 1825 1826
  }

  UpdateNewSpaceReferencesInExternalStringTable(updater_func);
}


1827
void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
1828 1829
  ProcessNativeContexts(retainer);
  ProcessAllocationSites(retainer);
1830 1831
}

1832

1833 1834 1835 1836 1837
void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
  ProcessNativeContexts(retainer);
}


1838
void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
1839
  Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
1840
  // Update the head of the list of contexts.
1841
  set_native_contexts_list(head);
1842 1843 1844
}


1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894
void Heap::RegisterNewArrayBufferHelper(std::map<void*, size_t>& live_buffers,
                                        void* data, size_t length) {
  live_buffers[data] = length;
}


void Heap::UnregisterArrayBufferHelper(
    std::map<void*, size_t>& live_buffers,
    std::map<void*, size_t>& not_yet_discovered_buffers, void* data) {
  DCHECK(live_buffers.count(data) > 0);
  live_buffers.erase(data);
  not_yet_discovered_buffers.erase(data);
}


void Heap::RegisterLiveArrayBufferHelper(
    std::map<void*, size_t>& not_yet_discovered_buffers, void* data) {
  not_yet_discovered_buffers.erase(data);
}


size_t Heap::FreeDeadArrayBuffersHelper(
    Isolate* isolate, std::map<void*, size_t>& live_buffers,
    std::map<void*, size_t>& not_yet_discovered_buffers) {
  size_t freed_memory = 0;
  for (auto buffer = not_yet_discovered_buffers.begin();
       buffer != not_yet_discovered_buffers.end(); ++buffer) {
    isolate->array_buffer_allocator()->Free(buffer->first, buffer->second);
    freed_memory += buffer->second;
    live_buffers.erase(buffer->first);
  }
  not_yet_discovered_buffers = live_buffers;
  return freed_memory;
}


void Heap::TearDownArrayBuffersHelper(
    Isolate* isolate, std::map<void*, size_t>& live_buffers,
    std::map<void*, size_t>& not_yet_discovered_buffers) {
  for (auto buffer = live_buffers.begin(); buffer != live_buffers.end();
       ++buffer) {
    isolate->array_buffer_allocator()->Free(buffer->first, buffer->second);
  }
  live_buffers.clear();
  not_yet_discovered_buffers.clear();
}


void Heap::RegisterNewArrayBuffer(bool in_new_space, void* data,
                                  size_t length) {
1895
  if (!data) return;
1896 1897 1898 1899 1900
  RegisterNewArrayBufferHelper(live_array_buffers_, data, length);
  if (in_new_space) {
    RegisterNewArrayBufferHelper(live_array_buffers_for_scavenge_, data,
                                 length);
  }
1901 1902 1903 1904 1905
  reinterpret_cast<v8::Isolate*>(isolate_)
      ->AdjustAmountOfExternalAllocatedMemory(length);
}


1906
void Heap::UnregisterArrayBuffer(bool in_new_space, void* data) {
1907
  if (!data) return;
1908 1909 1910 1911 1912 1913 1914
  UnregisterArrayBufferHelper(live_array_buffers_,
                              not_yet_discovered_array_buffers_, data);
  if (in_new_space) {
    UnregisterArrayBufferHelper(live_array_buffers_for_scavenge_,
                                not_yet_discovered_array_buffers_for_scavenge_,
                                data);
  }
1915 1916 1917
}


1918
void Heap::RegisterLiveArrayBuffer(bool from_scavenge, void* data) {
1919 1920
  // ArrayBuffer might be in the middle of being constructed.
  if (data == undefined_value()) return;
1921 1922 1923 1924
  RegisterLiveArrayBufferHelper(
      from_scavenge ? not_yet_discovered_array_buffers_for_scavenge_
                    : not_yet_discovered_array_buffers_,
      data);
1925 1926 1927
}


1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942
void Heap::FreeDeadArrayBuffers(bool from_scavenge) {
  if (from_scavenge) {
    for (auto& buffer : not_yet_discovered_array_buffers_for_scavenge_) {
      not_yet_discovered_array_buffers_.erase(buffer.first);
      live_array_buffers_.erase(buffer.first);
    }
  } else {
    for (auto& buffer : not_yet_discovered_array_buffers_) {
      // Scavenge can't happend during evacuation, so we only need to update
      // live_array_buffers_for_scavenge_.
      // not_yet_discovered_array_buffers_for_scanvenge_ will be reset before
      // the next scavenge run in PrepareArrayBufferDiscoveryInNewSpace.
      live_array_buffers_for_scavenge_.erase(buffer.first);
    }
  }
1943
  size_t freed_memory = FreeDeadArrayBuffersHelper(
1944 1945 1946 1947
      isolate_,
      from_scavenge ? live_array_buffers_for_scavenge_ : live_array_buffers_,
      from_scavenge ? not_yet_discovered_array_buffers_for_scavenge_
                    : not_yet_discovered_array_buffers_);
1948 1949 1950 1951
  if (freed_memory) {
    reinterpret_cast<v8::Isolate*>(isolate_)
        ->AdjustAmountOfExternalAllocatedMemory(
            -static_cast<int64_t>(freed_memory));
1952 1953 1954 1955
  }
}


1956
void Heap::TearDownArrayBuffers() {
1957 1958 1959 1960 1961 1962
  TearDownArrayBuffersHelper(isolate_, live_array_buffers_,
                             not_yet_discovered_array_buffers_);
}


void Heap::PrepareArrayBufferDiscoveryInNewSpace() {
1963 1964
  not_yet_discovered_array_buffers_for_scavenge_ =
      live_array_buffers_for_scavenge_;
1965 1966 1967 1968 1969 1970 1971 1972 1973 1974
}


void Heap::PromoteArrayBuffer(Object* obj) {
  JSArrayBuffer* buffer = JSArrayBuffer::cast(obj);
  if (buffer->is_external()) return;
  void* data = buffer->backing_store();
  if (!data) return;
  // ArrayBuffer might be in the middle of being constructed.
  if (data == undefined_value()) return;
1975 1976 1977 1978
  DCHECK(live_array_buffers_for_scavenge_.count(data) > 0);
  DCHECK(live_array_buffers_.count(data) > 0);
  live_array_buffers_for_scavenge_.erase(data);
  not_yet_discovered_array_buffers_for_scavenge_.erase(data);
1979 1980 1981
}


1982
void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
1983 1984
  Object* allocation_site_obj =
      VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
1985 1986 1987 1988
  set_allocation_sites_list(allocation_site_obj);
}


1989
void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1990
  DisallowHeapAllocation no_allocation_scope;
1991
  Object* cur = allocation_sites_list();
1992
  bool marked = false;
1993 1994 1995 1996
  while (cur->IsAllocationSite()) {
    AllocationSite* casted = AllocationSite::cast(cur);
    if (casted->GetPretenureMode() == flag) {
      casted->ResetPretenureDecision();
1997 1998
      casted->set_deopt_dependent_code(true);
      marked = true;
1999 2000 2001
    }
    cur = casted->weak_next();
  }
2002
  if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
2003 2004 2005 2006 2007 2008 2009 2010
}


void Heap::EvaluateOldSpaceLocalPretenuring(
    uint64_t size_of_objects_before_gc) {
  uint64_t size_of_objects_after_gc = SizeOfObjects();
  double old_generation_survival_rate =
      (static_cast<double>(size_of_objects_after_gc) * 100) /
2011
      static_cast<double>(size_of_objects_before_gc);
2012 2013 2014 2015 2016 2017 2018 2019

  if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
    // Too many objects died in the old generation, pretenuring of wrong
    // allocation sites may be the cause for that. We have to deopt all
    // dependent code registered in the allocation sites to re-evaluate
    // our pretenuring decisions.
    ResetAllAllocationSitesDependentCode(TENURED);
    if (FLAG_trace_pretenuring) {
2020 2021 2022 2023
      PrintF(
          "Deopt all allocation sites dependent code due to low survival "
          "rate in the old generation %f\n",
          old_generation_survival_rate);
2024 2025 2026 2027 2028
    }
  }
}


2029
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
2030
  DisallowHeapAllocation no_allocation;
2031
  // All external strings are listed in the external string table.
2032 2033 2034 2035

  class ExternalStringTableVisitorAdapter : public ObjectVisitor {
   public:
    explicit ExternalStringTableVisitorAdapter(
2036 2037
        v8::ExternalResourceVisitor* visitor)
        : visitor_(visitor) {}
2038 2039
    virtual void VisitPointers(Object** start, Object** end) {
      for (Object** p = start; p < end; p++) {
2040
        DCHECK((*p)->IsExternalString());
2041 2042
        visitor_->VisitExternalString(
            Utils::ToLocal(Handle<String>(String::cast(*p))));
2043 2044
      }
    }
2045

2046 2047 2048 2049 2050
   private:
    v8::ExternalResourceVisitor* visitor_;
  } external_string_table_visitor(visitor);

  external_string_table_.Iterate(&external_string_table_visitor);
2051 2052 2053
}


2054 2055
class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
 public:
2056
  static inline void VisitPointer(Heap* heap, Object** p) {
2057
    Object* object = *p;
2058
    if (!heap->InNewSpace(object)) return;
2059 2060 2061 2062 2063 2064
    Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
                         reinterpret_cast<HeapObject*>(object));
  }
};


2065 2066
Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
                         Address new_space_front) {
2067
  do {
2068
    SemiSpace::AssertValidRange(new_space_front, new_space_.top());
2069 2070 2071
    // The addresses new_space_front and new_space_.top() define a
    // queue of unprocessed copied objects.  Process them until the
    // queue is empty.
2072 2073 2074 2075
    while (new_space_front != new_space_.top()) {
      if (!NewSpacePage::IsAtEnd(new_space_front)) {
        HeapObject* object = HeapObject::FromAddress(new_space_front);
        new_space_front +=
2076
            NewSpaceScavenger::IterateBody(object->map(), object);
2077 2078
      } else {
        new_space_front =
2079
            NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
2080
      }
2081
    }
2082

2083
    // Promote and process all the to-be-promoted objects.
2084
    {
2085
      StoreBufferRebuildScope scope(this, store_buffer(),
2086 2087 2088 2089 2090 2091 2092
                                    &ScavengeStoreBufferCallback);
      while (!promotion_queue()->is_empty()) {
        HeapObject* target;
        int size;
        promotion_queue()->remove(&target, &size);

        // Promoted object might be already partially visited
2093
        // during old space pointer iteration. Thus we search specifically
2094 2095
        // for pointers to from semispace instead of looking for pointers
        // to new space.
2096
        DCHECK(!target->IsMap());
2097
        Address obj_address = target->address();
2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109

        // We are not collecting slots on new space objects during mutation
        // thus we have to scan for pointers to evacuation candidates when we
        // promote objects. But we should not record any slots in non-black
        // objects. Grey object's slots would be rescanned.
        // White object might not survive until the end of collection
        // it would be a violation of the invariant to record it's slots.
        bool record_slots = false;
        if (incremental_marking()->IsCompacting()) {
          MarkBit mark_bit = Marking::MarkBitFrom(target);
          record_slots = Marking::IsBlack(mark_bit);
        }
2110
#if V8_DOUBLE_FIELDS_UNBOXING
2111
        LayoutDescriptorHelper helper(target->map());
2112 2113 2114
        bool has_only_tagged_fields = helper.all_fields_tagged();

        if (!has_only_tagged_fields) {
2115 2116 2117 2118
          for (int offset = 0; offset < size;) {
            int end_of_region_offset;
            if (helper.IsTagged(offset, size, &end_of_region_offset)) {
              IterateAndMarkPointersToFromSpace(
2119 2120
                  record_slots, obj_address + offset,
                  obj_address + end_of_region_offset, &ScavengeObject);
2121
            }
2122
            offset = end_of_region_offset;
2123 2124 2125
          }
        } else {
#endif
2126 2127
          IterateAndMarkPointersToFromSpace(
              record_slots, obj_address, obj_address + size, &ScavengeObject);
2128 2129 2130
#if V8_DOUBLE_FIELDS_UNBOXING
        }
#endif
2131
      }
2132 2133
    }

2134 2135
    // Take another spin if there are now unswept objects in new space
    // (there are currently no more unswept promoted objects).
2136
  } while (new_space_front != new_space_.top());
2137

2138
  return new_space_front;
2139 2140 2141
}


2142 2143
STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
              0);  // NOLINT
2144 2145
STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
              0);  // NOLINT
2146 2147 2148 2149
#ifdef V8_HOST_ARCH_32_BIT
STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
              0);  // NOLINT
#endif
2150 2151


2152 2153 2154 2155 2156 2157 2158
int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
  switch (alignment) {
    case kWordAligned:
      return 0;
    case kDoubleAligned:
    case kDoubleUnaligned:
      return kDoubleSize - kPointerSize;
2159 2160
    case kSimd128Unaligned:
      return kSimd128Size - kPointerSize;
2161 2162
    default:
      UNREACHABLE();
2163
  }
2164 2165 2166 2167 2168 2169 2170 2171 2172 2173
  return 0;
}


int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
  intptr_t offset = OffsetFrom(address);
  if (alignment == kDoubleAligned && (offset & kDoubleAlignmentMask) != 0)
    return kPointerSize;
  if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0)
    return kDoubleSize - kPointerSize;  // No fill if double is always aligned.
2174 2175 2176 2177
  if (alignment == kSimd128Unaligned) {
    return (kSimd128Size - (static_cast<int>(offset) + kPointerSize)) &
           kSimd128AlignmentMask;
  }
2178
  return 0;
2179 2180 2181
}


2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200
HeapObject* Heap::PrecedeWithFiller(HeapObject* object, int filler_size) {
  CreateFillerObjectAt(object->address(), filler_size);
  return HeapObject::FromAddress(object->address() + filler_size);
}


HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
                                  int allocation_size,
                                  AllocationAlignment alignment) {
  int filler_size = allocation_size - object_size;
  DCHECK(filler_size > 0);
  int pre_filler = GetFillToAlign(object->address(), alignment);
  if (pre_filler) {
    object = PrecedeWithFiller(object, pre_filler);
    filler_size -= pre_filler;
  }
  if (filler_size)
    CreateFillerObjectAt(object->address() + object_size, filler_size);
  return object;
2201 2202 2203
}


2204
HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
2205
  return AlignWithFiller(object, size - kPointerSize, size, kDoubleAligned);
2206 2207 2208
}


2209 2210 2211 2212 2213 2214
enum LoggingAndProfiling {
  LOGGING_AND_PROFILING_ENABLED,
  LOGGING_AND_PROFILING_DISABLED
};


2215
enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
2216 2217


2218 2219
template <MarksHandling marks_handling,
          LoggingAndProfiling logging_and_profiling_mode>
2220 2221 2222
class ScavengingVisitor : public StaticVisitorBase {
 public:
  static void Initialize() {
2223
    table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
2224 2225 2226 2227
    table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
    table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
    table_.Register(kVisitByteArray, &EvacuateByteArray);
    table_.Register(kVisitFixedArray, &EvacuateFixedArray);
2228
    table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
2229 2230
    table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
    table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
2231
    table_.Register(kVisitJSArrayBuffer, &EvacuateJSArrayBuffer);
2232

2233 2234 2235 2236
    table_.Register(
        kVisitNativeContext,
        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
            Context::kSize>);
2237

2238 2239 2240 2241
    table_.Register(
        kVisitConsString,
        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
            ConsString::kSize>);
2242

2243 2244 2245 2246
    table_.Register(
        kVisitSlicedString,
        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
            SlicedString::kSize>);
2247

2248 2249 2250 2251
    table_.Register(
        kVisitSymbol,
        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
            Symbol::kSize>);
2252

2253 2254 2255 2256
    table_.Register(
        kVisitSharedFunctionInfo,
        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
            SharedFunctionInfo::kSize>);
2257

2258
    table_.Register(kVisitJSWeakCollection,
2259
                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
2260

2261
    table_.Register(kVisitJSTypedArray,
2262
                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
2263

2264
    table_.Register(kVisitJSDataView,
2265
                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
2266

2267
    table_.Register(kVisitJSRegExp,
2268
                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
2269

2270
    if (marks_handling == IGNORE_MARKS) {
2271 2272 2273 2274
      table_.Register(
          kVisitJSFunction,
          &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
              JSFunction::kSize>);
2275 2276 2277
    } else {
      table_.Register(kVisitJSFunction, &EvacuateJSFunction);
    }
2278

2279
    table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
2280
                                   kVisitDataObject, kVisitDataObjectGeneric>();
2281

2282
    table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2283
                                   kVisitJSObject, kVisitJSObjectGeneric>();
2284

2285
    table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2286
                                   kVisitStruct, kVisitStructGeneric>();
2287
  }
2288

2289 2290
  static VisitorDispatchTable<ScavengingCallback>* GetTable() {
    return &table_;
2291
  }
2292

2293
 private:
2294
  enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
2295

2296
  static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
2297 2298 2299 2300 2301 2302
    bool should_record = false;
#ifdef DEBUG
    should_record = FLAG_heap_stats;
#endif
    should_record = should_record || FLAG_log_gc;
    if (should_record) {
2303 2304
      if (heap->new_space()->Contains(obj)) {
        heap->new_space()->RecordAllocation(obj);
2305
      } else {
2306
        heap->new_space()->RecordPromotion(obj);
2307 2308 2309
      }
    }
  }
2310

2311 2312 2313
  // Helper function used by CopyObject to copy a source object to an
  // allocated target object and update the forwarding pointer in the source
  // object.  Returns the target object.
2314 2315
  INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
                                   HeapObject* target, int size)) {
2316 2317 2318
    // If we migrate into to-space, then the to-space top pointer should be
    // right after the target object. Incorporate double alignment
    // over-allocation.
2319
    DCHECK(!heap->InToSpace(target) ||
2320 2321
           target->address() + size == heap->new_space()->top() ||
           target->address() + size + kPointerSize == heap->new_space()->top());
2322 2323 2324

    // Make sure that we do not overwrite the promotion queue which is at
    // the end of to-space.
2325
    DCHECK(!heap->InToSpace(target) ||
2326 2327
           heap->promotion_queue()->IsBelowPromotionQueue(
               heap->new_space()->top()));
2328

2329
    // Copy the content of source to target.
2330
    heap->CopyBlock(target->address(), source->address(), size);
2331

2332 2333
    // Set the forwarding address.
    source->set_map_word(MapWord::FromForwardingAddress(target));
2334

2335 2336 2337
    if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
      // Update NewSpace stats if necessary.
      RecordCopiedObject(heap, target);
2338
      heap->OnMoveEvent(target, source, size);
2339 2340
    }

2341 2342
    if (marks_handling == TRANSFER_MARKS) {
      if (Marking::TransferColor(source, target)) {
2343
        MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2344 2345
      }
    }
2346
  }
2347

2348
  template <AllocationAlignment alignment>
2349 2350
  static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
                                         HeapObject* object, int object_size) {
2351 2352
    Heap* heap = map->GetHeap();

2353
    DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
2354
    AllocationResult allocation =
2355
        heap->new_space()->AllocateRaw(object_size, alignment);
2356 2357 2358

    HeapObject* target = NULL;  // Initialization to please compiler.
    if (allocation.To(&target)) {
2359 2360 2361 2362 2363 2364
      // Order is important here: Set the promotion limit before storing a
      // filler for double alignment or migrating the object. Otherwise we
      // may end up overwriting promotion queue entries when we migrate the
      // object.
      heap->promotion_queue()->SetNewLimit(heap->new_space()->top());

2365
      MigrateObject(heap, object, target, object_size);
2366

2367
      // Update slot to new target.
2368 2369 2370 2371 2372 2373 2374 2375
      *slot = target;

      heap->IncrementSemiSpaceCopiedObjectSize(object_size);
      return true;
    }
    return false;
  }

2376

2377
  template <ObjectContents object_contents, AllocationAlignment alignment>
2378 2379
  static inline bool PromoteObject(Map* map, HeapObject** slot,
                                   HeapObject* object, int object_size) {
2380
    Heap* heap = map->GetHeap();
2381

2382
    AllocationResult allocation =
2383
        heap->old_space()->AllocateRaw(object_size, alignment);
2384

2385 2386 2387 2388
    HeapObject* target = NULL;  // Initialization to please compiler.
    if (allocation.To(&target)) {
      MigrateObject(heap, object, target, object_size);

2389 2390 2391
      // Update slot to new target.
      *slot = target;

2392 2393
      if (object_contents == POINTER_OBJECT) {
        if (map->instance_type() == JS_FUNCTION_TYPE) {
2394 2395
          heap->promotion_queue()->insert(target,
                                          JSFunction::kNonWeakFieldsEndOffset);
2396 2397
        } else {
          heap->promotion_queue()->insert(target, object_size);
2398
        }
2399 2400 2401 2402 2403 2404
      }
      heap->IncrementPromotedObjectsSize(object_size);
      return true;
    }
    return false;
  }
2405

2406

2407
  template <ObjectContents object_contents, AllocationAlignment alignment>
2408 2409
  static inline void EvacuateObject(Map* map, HeapObject** slot,
                                    HeapObject* object, int object_size) {
2410 2411
    SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
    SLOW_DCHECK(object->Size() == object_size);
2412
    Heap* heap = map->GetHeap();
2413

2414 2415 2416 2417
    if (!heap->ShouldBePromoted(object->address(), object_size)) {
      // A semi-space copy may fail due to fragmentation. In that case, we
      // try to promote the object.
      if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
2418
        return;
2419
      }
2420
    }
2421

2422 2423
    if (PromoteObject<object_contents, alignment>(map, slot, object,
                                                  object_size)) {
2424
      return;
2425
    }
2426 2427 2428 2429 2430

    // If promotion failed, we try to copy the object to the other semi-space
    if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;

    UNREACHABLE();
2431
  }
2432 2433


2434
  static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
2435
                                        HeapObject* object) {
2436 2437
    ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
        JSFunction::kSize>(map, slot, object);
2438

2439 2440 2441 2442
    MapWord map_word = object->map_word();
    DCHECK(map_word.IsForwardingAddress());
    HeapObject* target = map_word.ToForwardingAddress();

2443 2444 2445 2446 2447 2448 2449 2450 2451
    MarkBit mark_bit = Marking::MarkBitFrom(target);
    if (Marking::IsBlack(mark_bit)) {
      // This object is black and it might not be rescanned by marker.
      // We should explicitly record code entry slot for compaction because
      // promotion queue processing (IterateAndMarkPointersToFromSpace) will
      // miss it as it is not HeapObject-tagged.
      Address code_entry_slot =
          target->address() + JSFunction::kCodeEntryOffset;
      Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2452 2453
      map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
          code_entry_slot, code);
2454 2455 2456 2457
    }
  }


2458
  static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
2459 2460
                                        HeapObject* object) {
    int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2461 2462
    EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
                                                 object_size);
2463 2464 2465
  }


2466
  static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
2467 2468 2469
                                              HeapObject* object) {
    int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
    int object_size = FixedDoubleArray::SizeFor(length);
2470
    EvacuateObject<DATA_OBJECT, kDoubleAligned>(map, slot, object, object_size);
2471 2472 2473
  }


2474
  static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
2475 2476
                                             HeapObject* object) {
    int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
2477
    EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
2478 2479 2480 2481 2482 2483

    MapWord map_word = object->map_word();
    DCHECK(map_word.IsForwardingAddress());
    FixedTypedArrayBase* target =
        reinterpret_cast<FixedTypedArrayBase*>(map_word.ToForwardingAddress());
    target->set_base_pointer(target, SKIP_WRITE_BARRIER);
2484 2485 2486
  }


2487
  static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
2488 2489
                                               HeapObject* object) {
    int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
2490
    EvacuateObject<DATA_OBJECT, kDoubleAligned>(map, slot, object, object_size);
2491 2492 2493 2494 2495 2496

    MapWord map_word = object->map_word();
    DCHECK(map_word.IsForwardingAddress());
    FixedTypedArrayBase* target =
        reinterpret_cast<FixedTypedArrayBase*>(map_word.ToForwardingAddress());
    target->set_base_pointer(target, SKIP_WRITE_BARRIER);
2497 2498 2499
  }


2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511
  static inline void EvacuateJSArrayBuffer(Map* map, HeapObject** slot,
                                           HeapObject* object) {
    ObjectEvacuationStrategy<POINTER_OBJECT>::Visit(map, slot, object);

    Heap* heap = map->GetHeap();
    MapWord map_word = object->map_word();
    DCHECK(map_word.IsForwardingAddress());
    HeapObject* target = map_word.ToForwardingAddress();
    if (!heap->InNewSpace(target)) heap->PromoteArrayBuffer(target);
  }


2512
  static inline void EvacuateByteArray(Map* map, HeapObject** slot,
2513 2514
                                       HeapObject* object) {
    int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2515
    EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
2516
  }
2517 2518


2519 2520 2521 2522
  static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
                                              HeapObject* object) {
    int object_size = SeqOneByteString::cast(object)
                          ->SeqOneByteStringSize(map->instance_type());
2523
    EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
2524
  }
2525 2526


2527
  static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
2528
                                              HeapObject* object) {
2529 2530
    int object_size = SeqTwoByteString::cast(object)
                          ->SeqTwoByteStringSize(map->instance_type());
2531
    EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
2532
  }
2533 2534


2535
  static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
2536
                                               HeapObject* object) {
2537
    DCHECK(IsShortcutCandidate(map->instance_type()));
2538

2539 2540 2541
    Heap* heap = map->GetHeap();

    if (marks_handling == IGNORE_MARKS &&
2542
        ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
2543 2544
      HeapObject* first =
          HeapObject::cast(ConsString::cast(object)->unchecked_first());
2545

2546
      *slot = first;
2547

2548
      if (!heap->InNewSpace(first)) {
2549 2550 2551
        object->set_map_word(MapWord::FromForwardingAddress(first));
        return;
      }
2552

2553 2554 2555 2556 2557 2558 2559 2560 2561
      MapWord first_word = first->map_word();
      if (first_word.IsForwardingAddress()) {
        HeapObject* target = first_word.ToForwardingAddress();

        *slot = target;
        object->set_map_word(MapWord::FromForwardingAddress(target));
        return;
      }

2562
      heap->DoScavengeObject(first->map(), slot, first);
2563
      object->set_map_word(MapWord::FromForwardingAddress(*slot));
2564 2565
      return;
    }
2566

2567
    int object_size = ConsString::kSize;
2568 2569
    EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
                                                 object_size);
2570 2571
  }

2572
  template <ObjectContents object_contents>
2573 2574
  class ObjectEvacuationStrategy {
   public:
2575 2576
    template <int object_size>
    static inline void VisitSpecialized(Map* map, HeapObject** slot,
2577
                                        HeapObject* object) {
2578 2579
      EvacuateObject<object_contents, kWordAligned>(map, slot, object,
                                                    object_size);
2580
    }
2581

2582
    static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
2583
      int object_size = map->instance_size();
2584 2585
      EvacuateObject<object_contents, kWordAligned>(map, slot, object,
                                                    object_size);
2586 2587
    }
  };
2588

2589
  static VisitorDispatchTable<ScavengingCallback> table_;
2590
};
2591 2592


2593 2594
template <MarksHandling marks_handling,
          LoggingAndProfiling logging_and_profiling_mode>
2595
VisitorDispatchTable<ScavengingCallback>
2596
    ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2597 2598 2599


static void InitializeScavengingVisitorsTables() {
2600 2601 2602 2603 2604 2605
  ScavengingVisitor<TRANSFER_MARKS,
                    LOGGING_AND_PROFILING_DISABLED>::Initialize();
  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
  ScavengingVisitor<TRANSFER_MARKS,
                    LOGGING_AND_PROFILING_ENABLED>::Initialize();
  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2606 2607 2608
}


2609 2610
void Heap::SelectScavengingVisitorsTable() {
  bool logging_and_profiling =
2611
      FLAG_verify_predictable || isolate()->logger()->is_logging() ||
2612
      isolate()->cpu_profiler()->is_profiling() ||
2613
      (isolate()->heap_profiler() != NULL &&
2614
       isolate()->heap_profiler()->is_tracking_object_moves());
2615

2616 2617
  if (!incremental_marking()->IsMarking()) {
    if (!logging_and_profiling) {
2618 2619
      scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
          IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2620
    } else {
2621 2622
      scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
          IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2623 2624 2625
    }
  } else {
    if (!logging_and_profiling) {
2626 2627
      scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
          TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2628
    } else {
2629 2630
      scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
          TRANSFER_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2631
    }
2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642

    if (incremental_marking()->IsCompacting()) {
      // When compacting forbid short-circuiting of cons-strings.
      // Scavenging code relies on the fact that new space object
      // can't be evacuated into evacuation candidate but
      // short-circuiting violates this assumption.
      scavenging_visitors_table_.Register(
          StaticVisitorBase::kVisitShortcutCandidate,
          scavenging_visitors_table_.GetVisitorById(
              StaticVisitorBase::kVisitConsString));
    }
2643 2644
  }
}
2645 2646 2647


void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2648
  SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
2649
  MapWord first_word = object->map_word();
2650
  SLOW_DCHECK(!first_word.IsForwardingAddress());
2651
  Map* map = first_word.ToMap();
2652
  map->GetHeap()->DoScavengeObject(map, p, object);
2653 2654 2655
}


2656 2657 2658 2659 2660
void Heap::ConfigureInitialOldGenerationSize() {
  if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
    old_generation_allocation_limit_ =
        Max(kMinimumOldGenerationAllocationLimit,
            static_cast<intptr_t>(
2661 2662
                static_cast<double>(old_generation_allocation_limit_) *
                (tracer()->AverageSurvivalRatio() / 100)));
2663 2664 2665 2666
  }
}


2667
void Heap::ConfigureNewGenerationSize() {
2668 2669
  bool still_gathering_lifetime_data = gathering_lifetime_feedback_ != 0;
  if (gathering_lifetime_feedback_ != 0) gathering_lifetime_feedback_--;
2670 2671 2672 2673 2674 2675 2676
  if (!new_space_high_promotion_mode_active_ &&
      new_space_.TotalCapacity() == new_space_.MaximumCapacity() &&
      IsStableOrIncreasingSurvivalTrend() && IsHighSurvivalRate()) {
    // Stable high survival rates even though young generation is at
    // maximum capacity indicates that most objects will be promoted.
    // To decrease scavenger pauses and final mark-sweep pauses, we
    // have to limit maximal capacity of the young generation.
2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688
    if (still_gathering_lifetime_data) {
      if (FLAG_trace_gc) {
        PrintPID(
            "Postpone entering high promotion mode as optimized pretenuring "
            "code is still being generated\n");
      }
    } else {
      new_space_high_promotion_mode_active_ = true;
      if (FLAG_trace_gc) {
        PrintPID("Limited new space size due to high promotion rate: %d MB\n",
                 new_space_.InitialTotalCapacity() / MB);
      }
2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708
    }
  } else if (new_space_high_promotion_mode_active_ &&
             IsStableOrDecreasingSurvivalTrend() && IsLowSurvivalRate()) {
    // Decreasing low survival rates might indicate that the above high
    // promotion mode is over and we should allow the young generation
    // to grow again.
    new_space_high_promotion_mode_active_ = false;
    if (FLAG_trace_gc) {
      PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
               new_space_.MaximumCapacity() / MB);
    }
  }

  if (new_space_high_promotion_mode_active_ &&
      new_space_.TotalCapacity() > new_space_.InitialTotalCapacity()) {
    new_space_.Shrink();
  }
}


2709 2710
AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
                                          int instance_size) {
2711
  Object* result = nullptr;
2712 2713
  AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
  if (!allocation.To(&result)) return allocation;
2714 2715

  // Map::cast cannot be used due to uninitialized map field.
2716
  reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2717 2718
  reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
  reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2719
  // Initialize to only containing tagged fields.
2720
  reinterpret_cast<Map*>(result)->set_visitor_id(
2721 2722 2723 2724 2725
      StaticVisitorBase::GetVisitorId(instance_type, instance_size, false));
  if (FLAG_unbox_double_fields) {
    reinterpret_cast<Map*>(result)
        ->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
  }
2726
  reinterpret_cast<Map*>(result)->clear_unused();
2727
  reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2728
  reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2729 2730
  reinterpret_cast<Map*>(result)->set_bit_field(0);
  reinterpret_cast<Map*>(result)->set_bit_field2(0);
2731
  int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
ulan's avatar
ulan committed
2732 2733
                   Map::OwnsDescriptors::encode(true) |
                   Map::Counter::encode(Map::kRetainingCounterStart);
2734
  reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2735
  reinterpret_cast<Map*>(result)->set_weak_cell_cache(Smi::FromInt(0));
2736 2737 2738 2739
  return result;
}


2740 2741 2742 2743 2744 2745
AllocationResult Heap::AllocateMap(InstanceType instance_type,
                                   int instance_size,
                                   ElementsKind elements_kind) {
  HeapObject* result;
  AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
  if (!allocation.To(&result)) return allocation;
2746

2747 2748
  result->set_map_no_write_barrier(meta_map());
  Map* map = Map::cast(result);
2749
  map->set_instance_type(instance_type);
2750
  map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2751
  map->set_constructor_or_backpointer(null_value(), SKIP_WRITE_BARRIER);
2752
  map->set_instance_size(instance_size);
2753
  map->clear_unused();
2754
  map->set_inobject_properties(0);
2755
  map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2756 2757
  map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
                          SKIP_WRITE_BARRIER);
2758
  map->set_weak_cell_cache(Smi::FromInt(0));
2759
  map->set_raw_transitions(Smi::FromInt(0));
2760
  map->set_unused_property_fields(0);
2761
  map->set_instance_descriptors(empty_descriptor_array());
2762 2763 2764 2765 2766 2767
  if (FLAG_unbox_double_fields) {
    map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
  }
  // Must be called only after |instance_type|, |instance_size| and
  // |layout_descriptor| are set.
  map->set_visitor_id(StaticVisitorBase::GetVisitorId(map));
2768
  map->set_bit_field(0);
2769
  map->set_bit_field2(1 << Map::kIsExtensible);
2770
  int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
ulan's avatar
ulan committed
2771 2772
                   Map::OwnsDescriptors::encode(true) |
                   Map::Counter::encode(Map::kRetainingCounterStart);
2773
  map->set_bit_field3(bit_field3);
2774
  map->set_elements_kind(elements_kind);
2775

2776 2777 2778 2779
  return map;
}


2780
AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
2781 2782
                                            AllocationSpace space) {
  HeapObject* obj;
2783
  {
2784 2785
    AllocationAlignment align = double_align ? kDoubleAligned : kWordAligned;
    AllocationResult allocation = AllocateRaw(size, space, space, align);
2786
    if (!allocation.To(&obj)) return allocation;
2787 2788
  }
#ifdef DEBUG
2789
  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2790
  DCHECK(chunk->owner()->identity() == space);
2791
#endif
2792 2793
  CreateFillerObjectAt(obj->address(), size);
  return obj;
2794 2795 2796
}


2797
const Heap::StringTypeTable Heap::string_type_table[] = {
2798 2799 2800 2801
#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
  { type, size, k##camel_name##MapRootIndex }             \
  ,
    STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2802 2803 2804 2805
#undef STRING_TYPE_ELEMENT
};


2806
const Heap::ConstantStringTable Heap::constant_string_table[] = {
2807
    {"", kempty_stringRootIndex},
2808 2809 2810 2811
#define CONSTANT_STRING_ELEMENT(name, contents) \
  { contents, k##name##RootIndex }              \
  ,
    INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2812
#undef CONSTANT_STRING_ELEMENT
2813 2814 2815 2816
};


const Heap::StructTable Heap::struct_table[] = {
2817 2818 2819 2820
#define STRUCT_TABLE_ELEMENT(NAME, Name, name)        \
  { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
  ,
    STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2821 2822 2823 2824
#undef STRUCT_TABLE_ELEMENT
};


2825
bool Heap::CreateInitialMaps() {
2826
  HeapObject* obj;
2827 2828
  {
    AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
2829
    if (!allocation.To(&obj)) return false;
2830
  }
2831
  // Map::cast cannot be used due to uninitialized map field.
2832 2833 2834
  Map* new_meta_map = reinterpret_cast<Map*>(obj);
  set_meta_map(new_meta_map);
  new_meta_map->set_map(new_meta_map);
2835

2836
  {  // Partial map allocation
2837 2838 2839 2840 2841 2842
#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name)                \
  {                                                                          \
    Map* map;                                                                \
    if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
    set_##field_name##_map(map);                                             \
  }
2843

2844 2845 2846
    ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
    ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
    ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
2847

2848
#undef ALLOCATE_PARTIAL_MAP
2849 2850
  }

2851
  // Allocate the empty array.
2852 2853
  {
    AllocationResult allocation = AllocateEmptyFixedArray();
2854
    if (!allocation.To(&obj)) return false;
2855
  }
2856
  set_empty_fixed_array(FixedArray::cast(obj));
2857

2858
  {
2859
    AllocationResult allocation = Allocate(null_map(), OLD_SPACE);
2860
    if (!allocation.To(&obj)) return false;
2861
  }
2862
  set_null_value(Oddball::cast(obj));
2863
  Oddball::cast(obj)->set_kind(Oddball::kNull);
2864

2865
  {
2866
    AllocationResult allocation = Allocate(undefined_map(), OLD_SPACE);
2867
    if (!allocation.To(&obj)) return false;
2868 2869 2870
  }
  set_undefined_value(Oddball::cast(obj));
  Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2871
  DCHECK(!InNewSpace(undefined_value()));
2872

2873 2874 2875
  // Set preliminary exception sentinel value before actually initializing it.
  set_exception(null_value());

2876
  // Allocate the empty descriptor array.
2877 2878
  {
    AllocationResult allocation = AllocateEmptyFixedArray();
2879
    if (!allocation.To(&obj)) return false;
2880
  }
2881
  set_empty_descriptor_array(DescriptorArray::cast(obj));
2882

2883
  // Fix the instance_descriptors for the existing maps.
sgjesse@chromium.org's avatar
sgjesse@chromium.org committed
2884
  meta_map()->set_code_cache(empty_fixed_array());
2885
  meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2886
  meta_map()->set_raw_transitions(Smi::FromInt(0));
2887
  meta_map()->set_instance_descriptors(empty_descriptor_array());
2888 2889 2890
  if (FLAG_unbox_double_fields) {
    meta_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
  }
2891

sgjesse@chromium.org's avatar
sgjesse@chromium.org committed
2892
  fixed_array_map()->set_code_cache(empty_fixed_array());
2893 2894
  fixed_array_map()->set_dependent_code(
      DependentCode::cast(empty_fixed_array()));
2895
  fixed_array_map()->set_raw_transitions(Smi::FromInt(0));
2896
  fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2897 2898 2899 2900
  if (FLAG_unbox_double_fields) {
    fixed_array_map()->set_layout_descriptor(
        LayoutDescriptor::FastPointerLayout());
  }
2901

2902 2903
  undefined_map()->set_code_cache(empty_fixed_array());
  undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2904
  undefined_map()->set_raw_transitions(Smi::FromInt(0));
2905
  undefined_map()->set_instance_descriptors(empty_descriptor_array());
2906 2907 2908 2909
  if (FLAG_unbox_double_fields) {
    undefined_map()->set_layout_descriptor(
        LayoutDescriptor::FastPointerLayout());
  }
2910 2911 2912

  null_map()->set_code_cache(empty_fixed_array());
  null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2913
  null_map()->set_raw_transitions(Smi::FromInt(0));
2914
  null_map()->set_instance_descriptors(empty_descriptor_array());
2915 2916 2917
  if (FLAG_unbox_double_fields) {
    null_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
  }
2918 2919 2920

  // Fix prototype object for existing maps.
  meta_map()->set_prototype(null_value());
2921
  meta_map()->set_constructor_or_backpointer(null_value());
2922 2923

  fixed_array_map()->set_prototype(null_value());
2924
  fixed_array_map()->set_constructor_or_backpointer(null_value());
2925

2926
  undefined_map()->set_prototype(null_value());
2927
  undefined_map()->set_constructor_or_backpointer(null_value());
2928 2929

  null_map()->set_prototype(null_value());
2930
  null_map()->set_constructor_or_backpointer(null_value());
2931

2932
  {  // Map allocation
2933 2934 2935 2936 2937 2938
#define ALLOCATE_MAP(instance_type, size, field_name)               \
  {                                                                 \
    Map* map;                                                       \
    if (!AllocateMap((instance_type), size).To(&map)) return false; \
    set_##field_name##_map(map);                                    \
  }
2939

2940 2941
#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
  ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2942

2943
    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2944
    DCHECK(fixed_array_map() != fixed_cow_array_map());
2945

2946 2947
    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
    ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
2948 2949
    ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
                 mutable_heap_number)
2950
    ALLOCATE_MAP(FLOAT32X4_TYPE, Float32x4::kSize, float32x4)
2951 2952
    ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
    ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2953

2954 2955 2956 2957 2958
    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2959
    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
2960 2961
    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);

2962
    for (unsigned i = 0; i < arraysize(string_type_table); i++) {
2963
      const StringTypeTable& entry = string_type_table[i];
2964 2965
      {
        AllocationResult allocation = AllocateMap(entry.type, entry.size);
2966
        if (!allocation.To(&obj)) return false;
2967
      }
2968 2969 2970 2971 2972
      // Mark cons string maps as unstable, because their objects can change
      // maps during GC.
      Map* map = Map::cast(obj);
      if (StringShape(entry.type).IsCons()) map->mark_unstable();
      roots_[entry.index] = map;
2973
    }
2974

2975 2976 2977 2978 2979 2980 2981
    {  // Create a separate external one byte string map for native sources.
      AllocationResult allocation = AllocateMap(EXTERNAL_ONE_BYTE_STRING_TYPE,
                                                ExternalOneByteString::kSize);
      if (!allocation.To(&obj)) return false;
      set_native_source_string_map(Map::cast(obj));
    }

2982 2983 2984 2985
    ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
    ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
    ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)

2986 2987
#define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \
  ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kSize, \
2988
               external_##type##_array)
2989

2990
    TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
2991 2992
#undef ALLOCATE_EXTERNAL_ARRAY_MAP

2993 2994
#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
  ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
2995

2996
    TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2997
#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2998

2999
    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
3000 3001 3002 3003 3004

    ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)

    ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
    ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
ulan@chromium.org's avatar
ulan@chromium.org committed
3005
    ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
3006 3007 3008 3009
    ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
    ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)


3010
    for (unsigned i = 0; i < arraysize(struct_table); i++) {
3011 3012
      const StructTable& entry = struct_table[i];
      Map* map;
3013
      if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
3014 3015
      roots_[entry.index] = map;
    }
3016

3017
    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
3018
    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
3019

3020 3021 3022 3023 3024
    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
3025 3026
    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
3027

3028 3029 3030 3031
    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
    native_context_map()->set_dictionary_map(true);
    native_context_map()->set_visitor_id(
        StaticVisitorBase::kVisitNativeContext);
3032

3033
    ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
3034
                 shared_function_info)
3035

3036 3037
    ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
    ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
3038 3039 3040
    external_map()->set_is_extensible(false);
#undef ALLOCATE_VARSIZE_MAP
#undef ALLOCATE_MAP
3041 3042
  }

3043 3044 3045
  {  // Empty arrays
    {
      ByteArray* byte_array;
3046
      if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
3047 3048
      set_empty_byte_array(byte_array);
    }
3049

3050 3051 3052 3053 3054 3055 3056
#define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size)  \
  {                                                                   \
    ExternalArray* obj;                                               \
    if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
      return false;                                                   \
    set_empty_external_##type##_array(obj);                           \
  }
3057

3058
    TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
3059
#undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
3060

3061 3062 3063 3064 3065 3066 3067
#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
  {                                                                     \
    FixedTypedArrayBase* obj;                                           \
    if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
      return false;                                                     \
    set_empty_fixed_##type##_array(obj);                                \
  }
3068 3069 3070

    TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
3071
  }
3072
  DCHECK(!InNewSpace(empty_fixed_array()));
3073 3074 3075 3076
  return true;
}


3077
AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
3078
                                          PretenureFlag pretenure) {
3079 3080
  // Statically ensure that it is safe to allocate heap numbers in paged
  // spaces.
3081
  int size = HeapNumber::kSize;
3082 3083
  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);

3084
  AllocationSpace space = SelectSpace(size, pretenure);
3085

3086
  HeapObject* result;
3087
  {
3088 3089
    AllocationResult allocation =
        AllocateRaw(size, space, OLD_SPACE, kDoubleUnaligned);
3090
    if (!allocation.To(&result)) return allocation;
3091
  }
3092

3093 3094
  Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
  HeapObject::cast(result)->set_map_no_write_barrier(map);
3095 3096 3097 3098 3099
  HeapNumber::cast(result)->set_value(value);
  return result;
}


3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125
AllocationResult Heap::AllocateFloat32x4(float w, float x, float y, float z,
                                         PretenureFlag pretenure) {
  // Statically ensure that it is safe to allocate SIMD values in paged
  // spaces.
  int size = Float32x4::kSize;
  STATIC_ASSERT(Float32x4::kSize <= Page::kMaxRegularHeapObjectSize);

  AllocationSpace space = SelectSpace(size, pretenure);

  HeapObject* result;
  {
    AllocationResult allocation =
        AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned);
    if (!allocation.To(&result)) return allocation;
  }

  result->set_map_no_write_barrier(float32x4_map());
  Float32x4* float32x4 = Float32x4::cast(result);
  float32x4->set_lane(0, w);
  float32x4->set_lane(1, x);
  float32x4->set_lane(2, y);
  float32x4->set_lane(3, z);
  return result;
}


3126
AllocationResult Heap::AllocateCell(Object* value) {
3127
  int size = Cell::kSize;
3128
  STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
3129

3130
  HeapObject* result;
3131
  {
3132
    AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
3133
    if (!allocation.To(&result)) return allocation;
3134
  }
3135
  result->set_map_no_write_barrier(cell_map());
3136 3137 3138 3139 3140
  Cell::cast(result)->set_value(value);
  return result;
}


3141
AllocationResult Heap::AllocatePropertyCell() {
3142
  int size = PropertyCell::kSize;
3143
  STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
3144

3145
  HeapObject* result;
3146
  AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
3147
  if (!allocation.To(&result)) return allocation;
3148

3149
  result->set_map_no_write_barrier(global_property_cell_map());
3150 3151 3152
  PropertyCell* cell = PropertyCell::cast(result);
  cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
                           SKIP_WRITE_BARRIER);
3153
  cell->set_property_details(PropertyDetails(Smi::FromInt(0)));
3154
  cell->set_value(the_hole_value());
3155 3156 3157 3158
  return result;
}


ulan@chromium.org's avatar
ulan@chromium.org committed
3159 3160 3161
AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
  int size = WeakCell::kSize;
  STATIC_ASSERT(WeakCell::kSize <= Page::kMaxRegularHeapObjectSize);
3162
  HeapObject* result = NULL;
ulan@chromium.org's avatar
ulan@chromium.org committed
3163
  {
3164
    AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
ulan@chromium.org's avatar
ulan@chromium.org committed
3165 3166 3167 3168
    if (!allocation.To(&result)) return allocation;
  }
  result->set_map_no_write_barrier(weak_cell_map());
  WeakCell::cast(result)->initialize(value);
3169
  WeakCell::cast(result)->clear_next(this);
ulan@chromium.org's avatar
ulan@chromium.org committed
3170 3171 3172 3173
  return result;
}


3174 3175 3176 3177 3178
void Heap::CreateApiObjects() {
  HandleScope scope(isolate());
  Factory* factory = isolate()->factory();
  Handle<Map> new_neander_map =
      factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3179

3180 3181 3182 3183
  // Don't use Smi-only elements optimizations for objects with the neander
  // map. There are too many cases where element values are set directly with a
  // bottleneck to trap the Smi-only -> fast elements transition, and there
  // appears to be no benefit for optimize this case.
3184
  new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
3185
  set_neander_map(*new_neander_map);
3186

3187 3188 3189 3190 3191
  Handle<JSObject> listeners = factory->NewNeanderObject();
  Handle<FixedArray> elements = factory->NewFixedArray(2);
  elements->set(0, Smi::FromInt(0));
  listeners->set_elements(*elements);
  set_message_listeners(*listeners);
3192 3193
}

3194 3195

void Heap::CreateJSEntryStub() {
3196
  JSEntryStub stub(isolate(), StackFrame::ENTRY);
3197
  set_js_entry_code(*stub.GetCode());
3198 3199 3200 3201
}


void Heap::CreateJSConstructEntryStub() {
3202
  JSEntryStub stub(isolate(), StackFrame::ENTRY_CONSTRUCT);
3203
  set_js_construct_entry_code(*stub.GetCode());
3204 3205 3206
}


3207 3208 3209 3210 3211
void Heap::CreateFixedStubs() {
  // Here we create roots for fixed stubs. They are needed at GC
  // for cooking and uncooking (check out frames.cc).
  // The eliminates the need for doing dictionary lookup in the
  // stub cache for these stubs.
3212
  HandleScope scope(isolate());
3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223

  // Create stubs that should be there, so we don't unexpectedly have to
  // create them if we need them during the creation of another stub.
  // Stub creation mixes raw pointers and handles in an unsafe manner so
  // we cannot create stubs while we are creating stubs.
  CodeStub::GenerateStubsAheadOfTime(isolate());

  // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
  // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
  // is created.

3224
  // gcc-4.4 has problem generating correct code of following snippet:
3225 3226
  // {  JSEntryStub stub;
  //    js_entry_code_ = *stub.GetCode();
3227
  // }
3228 3229
  // {  JSConstructEntryStub stub;
  //    js_construct_entry_code_ = *stub.GetCode();
3230 3231 3232 3233
  // }
  // To workaround the problem, make separate functions without inlining.
  Heap::CreateJSEntryStub();
  Heap::CreateJSConstructEntryStub();
3234 3235 3236
}


3237
void Heap::CreateInitialObjects() {
3238 3239
  HandleScope scope(isolate());
  Factory* factory = isolate()->factory();
3240

3241 3242
  // The -0 value must be set before NewNumber works.
  set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
3243
  DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
3244

3245 3246
  set_nan_value(*factory->NewHeapNumber(
      std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
3247
  set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
3248 3249
  set_minus_infinity_value(
      *factory->NewHeapNumber(-V8_INFINITY, IMMUTABLE, TENURED));
3250

3251
  // The hole has not been created yet, but we want to put something
3252
  // predictable in the gaps in the string table, so lets make that Smi zero.
3253 3254
  set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));

3255
  // Allocate initial string table.
3256
  set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
3257

3258
  // Finish initializing oddballs after creating the string table.
3259 3260
  Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
                      factory->nan_value(), Oddball::kUndefined);
3261

3262
  // Initialize the null_value.
3263 3264 3265 3266
  Oddball::Initialize(isolate(), factory->null_value(), "null",
                      handle(Smi::FromInt(0), isolate()), Oddball::kNull);

  set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
3267 3268 3269
                                      handle(Smi::FromInt(1), isolate()),
                                      Oddball::kTrue));

3270
  set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
3271 3272 3273
                                       handle(Smi::FromInt(0), isolate()),
                                       Oddball::kFalse));

3274
  set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
3275 3276 3277
                                          handle(Smi::FromInt(-1), isolate()),
                                          Oddball::kTheHole));

3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297
  set_uninitialized_value(*factory->NewOddball(
      factory->uninitialized_map(), "uninitialized",
      handle(Smi::FromInt(-1), isolate()), Oddball::kUninitialized));

  set_arguments_marker(*factory->NewOddball(
      factory->arguments_marker_map(), "arguments_marker",
      handle(Smi::FromInt(-4), isolate()), Oddball::kArgumentMarker));

  set_no_interceptor_result_sentinel(*factory->NewOddball(
      factory->no_interceptor_result_sentinel_map(),
      "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
      Oddball::kOther));

  set_termination_exception(*factory->NewOddball(
      factory->termination_exception_map(), "termination_exception",
      handle(Smi::FromInt(-3), isolate()), Oddball::kOther));

  set_exception(*factory->NewOddball(factory->exception_map(), "exception",
                                     handle(Smi::FromInt(-5), isolate()),
                                     Oddball::kException));
3298

3299
  for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
3300 3301 3302
    Handle<String> str =
        factory->InternalizeUtf8String(constant_string_table[i].contents);
    roots_[constant_string_table[i].index] = *str;
3303
  }
3304

3305
  // Allocate the hidden string which is used to identify the hidden properties
3306 3307
  // in JSObjects. The hash code has a special value so that it will not match
  // the empty string when searching for the property. It cannot be part of the
3308
  // loop above because it needs to be allocated manually with the special
3309
  // hash code in place. The hash code for the hidden_string is zero to ensure
3310
  // that it will always be at the first entry in property descriptors.
3311
  hidden_string_ = *factory->NewOneByteInternalizedString(
3312
      OneByteVector("", 0), String::kEmptyStringHash);
3313

3314
  // Create the code_stubs dictionary. The initial size is set to avoid
3315
  // expanding the dictionary during bootstrapping.
3316
  set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
3317

3318
  // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
3319
  // is set to avoid expanding the dictionary during bootstrapping.
3320
  set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
3321

3322 3323
  set_polymorphic_code_cache(PolymorphicCodeCache::cast(
      *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
3324

3325 3326 3327 3328
  set_instanceof_cache_function(Smi::FromInt(0));
  set_instanceof_cache_map(Smi::FromInt(0));
  set_instanceof_cache_answer(Smi::FromInt(0));

3329 3330
  {
    HandleScope scope(isolate());
3331 3332 3333 3334 3335
#define SYMBOL_INIT(name)                                                   \
  {                                                                         \
    Handle<String> name##d = factory->NewStringFromStaticChars(#name);      \
    Handle<Object> symbol(isolate()->factory()->NewPrivateSymbol(name##d)); \
    roots_[k##name##RootIndex] = *symbol;                                   \
3336
  }
3337
    PRIVATE_SYMBOL_LIST(SYMBOL_INIT)
3338
#undef SYMBOL_INIT
3339
  }
3340

3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351
  {
    HandleScope scope(isolate());
#define SYMBOL_INIT(name, varname, description)                             \
  Handle<Symbol> name = factory->NewSymbol();                               \
  Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
  name->set_name(*name##d);                                                 \
  roots_[k##name##RootIndex] = *name;
    PUBLIC_SYMBOL_LIST(SYMBOL_INIT)
#undef SYMBOL_INIT
  }

3352 3353
  CreateFixedStubs();

3354
  // Allocate the dictionary of intrinsic function names.
3355
  Handle<NameDictionary> intrinsic_names =
3356
      NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED);
3357 3358
  Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
  set_intrinsic_function_names(*intrinsic_names);
3359

3360 3361
  set_number_string_cache(
      *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
3362

3363
  // Allocate cache for single character one byte strings.
3364 3365
  set_single_character_string_cache(
      *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
3366

3367 3368 3369 3370 3371
  // Allocate cache for string split and regexp-multiple.
  set_string_split_cache(*factory->NewFixedArray(
      RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
  set_regexp_multiple_cache(*factory->NewFixedArray(
      RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
3372

3373
  // Allocate cache for external strings pointing to native source code.
3374 3375
  set_natives_source_cache(
      *factory->NewFixedArray(Natives::GetBuiltinsCount()));
3376

3377 3378 3379
  set_experimental_natives_source_cache(
      *factory->NewFixedArray(ExperimentalNatives::GetBuiltinsCount()));

3380 3381 3382
  set_extra_natives_source_cache(
      *factory->NewFixedArray(ExtraNatives::GetBuiltinsCount()));

3383 3384 3385
  set_code_stub_natives_source_cache(
      *factory->NewFixedArray(CodeStubNatives::GetBuiltinsCount()));

3386
  set_undefined_cell(*factory->NewCell(factory->undefined_value()));
3387

3388
  // The symbol registry is initialized lazily.
3389
  set_symbol_registry(Smi::FromInt(0));
3390

3391
  // Allocate object to hold object observation state.
3392 3393
  set_observation_state(*factory->NewJSObjectFromMap(
      factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
3394

3395 3396 3397
  // Microtask queue uses the empty fixed array as a sentinel for "empty".
  // Number of queued microtasks stored in Isolate::pending_microtask_count().
  set_microtask_queue(empty_fixed_array());
3398

3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419
  {
    FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
    Handle<TypeFeedbackVector> dummy_vector =
        factory->NewTypeFeedbackVector(&spec);
    dummy_vector->Set(FeedbackVectorICSlot(0),
                      *TypeFeedbackVector::MegamorphicSentinel(isolate()),
                      SKIP_WRITE_BARRIER);
    set_keyed_load_dummy_vector(*dummy_vector);
  }

  if (FLAG_vector_stores) {
    FeedbackVectorSpec spec(0, Code::KEYED_STORE_IC);
    Handle<TypeFeedbackVector> dummy_vector =
        factory->NewTypeFeedbackVector(&spec);
    dummy_vector->Set(FeedbackVectorICSlot(0),
                      *TypeFeedbackVector::MegamorphicSentinel(isolate()),
                      SKIP_WRITE_BARRIER);
    set_keyed_store_dummy_vector(*dummy_vector);
  } else {
    set_keyed_store_dummy_vector(empty_fixed_array());
  }
3420

3421
  set_detached_contexts(empty_fixed_array());
3422
  set_retained_maps(ArrayList::cast(empty_fixed_array()));
3423

ulan's avatar
ulan committed
3424 3425 3426 3427
  set_weak_object_to_code_table(
      *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
                          TENURED));

3428
  Handle<SeededNumberDictionary> slow_element_dictionary =
3429
      SeededNumberDictionary::New(isolate(), 0, TENURED);
3430 3431
  slow_element_dictionary->set_requires_slow_elements();
  set_empty_slow_element_dictionary(*slow_element_dictionary);
3432

3433
  set_materialized_objects(*factory->NewFixedArray(0, TENURED));
jarin@chromium.org's avatar
jarin@chromium.org committed
3434

3435
  // Handling of script id generation is in Factory::NewScript.
3436
  set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
3437

3438
  Handle<PropertyCell> cell = factory->NewPropertyCell();
3439
  cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
3440 3441
  set_array_protector(*cell);

3442 3443 3444 3445
  cell = factory->NewPropertyCell();
  cell->set_value(the_hole_value());
  set_empty_property_cell(*cell);

3446 3447
  set_weak_stack_trace_list(Smi::FromInt(0));

3448 3449
  set_allocation_sites_scratchpad(
      *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
3450 3451
  InitializeAllocationSitesScratchpad();

3452
  // Initialize keyed lookup cache.
3453
  isolate_->keyed_lookup_cache()->Clear();
3454

3455
  // Initialize context slot cache.
3456
  isolate_->context_slot_cache()->Clear();
3457

3458
  // Initialize descriptor cache.
3459
  isolate_->descriptor_lookup_cache()->Clear();
3460

3461
  // Initialize compilation cache.
3462
  isolate_->compilation_cache()->Clear();
3463 3464 3465
}


3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478
void Heap::AddPrivateGlobalSymbols(Handle<Object> private_intern_table) {
#define ADD_SYMBOL_TO_PRIVATE_INTERN_TABLE(name_arg)                     \
  {                                                                      \
    Handle<Symbol> symbol(Symbol::cast(roots_[k##name_arg##RootIndex])); \
    Handle<String> name_arg##d(String::cast(symbol->name()));            \
    JSObject::AddProperty(Handle<JSObject>::cast(private_intern_table),  \
                          name_arg##d, symbol, NONE);                    \
  }
  PRIVATE_SYMBOL_LIST(ADD_SYMBOL_TO_PRIVATE_INTERN_TABLE)
#undef ADD_SYMBOL_TO_PRIVATE_INTERN_TABLE
}


3479
bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495
  switch (root_index) {
    case kStoreBufferTopRootIndex:
    case kNumberStringCacheRootIndex:
    case kInstanceofCacheFunctionRootIndex:
    case kInstanceofCacheMapRootIndex:
    case kInstanceofCacheAnswerRootIndex:
    case kCodeStubsRootIndex:
    case kNonMonomorphicCacheRootIndex:
    case kPolymorphicCodeCacheRootIndex:
    case kEmptyScriptRootIndex:
    case kSymbolRegistryRootIndex:
    case kMaterializedObjectsRootIndex:
    case kAllocationSitesScratchpadRootIndex:
    case kMicrotaskQueueRootIndex:
    case kDetachedContextsRootIndex:
    case kWeakObjectToCodeTableRootIndex:
3496
    case kRetainedMapsRootIndex:
3497
    case kWeakStackTraceListRootIndex:
3498 3499 3500 3501 3502 3503 3504
// Smi values
#define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
      SMI_ROOT_LIST(SMI_ENTRY)
#undef SMI_ENTRY
    // String table
    case kStringTableRootIndex:
      return true;
3505

3506 3507
    default:
      return false;
3508 3509 3510 3511
  }
}


3512 3513
bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
  return !RootCanBeWrittenAfterInitialization(root_index) &&
3514
         !InNewSpace(roots_array_start()[root_index]);
3515 3516 3517
}


3518 3519
Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
                                   Object* key_pattern, ResultsCacheType type) {
3520
  FixedArray* cache;
3521
  if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3522
  if (type == STRING_SPLIT_SUBSTRINGS) {
3523
    DCHECK(key_pattern->IsString());
3524
    if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3525 3526
    cache = heap->string_split_cache();
  } else {
3527 3528
    DCHECK(type == REGEXP_MULTIPLE_INDICES);
    DCHECK(key_pattern->IsFixedArray());
3529 3530 3531 3532 3533
    cache = heap->regexp_multiple_cache();
  }

  uint32_t hash = key_string->Hash();
  uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3534
                    ~(kArrayEntriesPerCacheEntry - 1));
3535 3536
  if (cache->get(index + kStringOffset) == key_string &&
      cache->get(index + kPatternOffset) == key_pattern) {
3537 3538
    return cache->get(index + kArrayOffset);
  }
3539 3540 3541 3542
  index =
      ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
  if (cache->get(index + kStringOffset) == key_string &&
      cache->get(index + kPatternOffset) == key_pattern) {
3543 3544 3545 3546 3547
    return cache->get(index + kArrayOffset);
  }
  return Smi::FromInt(0);
}

erik.corry@gmail.com's avatar
erik.corry@gmail.com committed
3548

3549
void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
3550 3551
                               Handle<Object> key_pattern,
                               Handle<FixedArray> value_array,
3552
                               ResultsCacheType type) {
3553 3554
  Factory* factory = isolate->factory();
  Handle<FixedArray> cache;
3555
  if (!key_string->IsInternalizedString()) return;
3556
  if (type == STRING_SPLIT_SUBSTRINGS) {
3557
    DCHECK(key_pattern->IsString());
3558
    if (!key_pattern->IsInternalizedString()) return;
3559
    cache = factory->string_split_cache();
3560
  } else {
3561 3562
    DCHECK(type == REGEXP_MULTIPLE_INDICES);
    DCHECK(key_pattern->IsFixedArray());
3563
    cache = factory->regexp_multiple_cache();
3564 3565 3566 3567
  }

  uint32_t hash = key_string->Hash();
  uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3568
                    ~(kArrayEntriesPerCacheEntry - 1));
3569
  if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3570 3571 3572
    cache->set(index + kStringOffset, *key_string);
    cache->set(index + kPatternOffset, *key_pattern);
    cache->set(index + kArrayOffset, *value_array);
3573 3574
  } else {
    uint32_t index2 =
3575
        ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3576
    if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3577 3578 3579
      cache->set(index2 + kStringOffset, *key_string);
      cache->set(index2 + kPatternOffset, *key_pattern);
      cache->set(index2 + kArrayOffset, *value_array);
3580 3581 3582 3583
    } else {
      cache->set(index2 + kStringOffset, Smi::FromInt(0));
      cache->set(index2 + kPatternOffset, Smi::FromInt(0));
      cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3584 3585 3586
      cache->set(index + kStringOffset, *key_string);
      cache->set(index + kPatternOffset, *key_pattern);
      cache->set(index + kArrayOffset, *value_array);
3587
    }
3588
  }
3589
  // If the array is a reasonably short list of substrings, convert it into a
3590
  // list of internalized strings.
3591 3592
  if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
    for (int i = 0; i < value_array->length(); i++) {
3593 3594 3595
      Handle<String> str(String::cast(value_array->get(i)), isolate);
      Handle<String> internalized_str = factory->InternalizeString(str);
      value_array->set(i, *internalized_str);
3596 3597
    }
  }
3598
  // Convert backing store to a copy-on-write array.
3599
  value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
3600 3601 3602
}


3603 3604
void RegExpResultsCache::Clear(FixedArray* cache) {
  for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3605 3606 3607 3608 3609
    cache->set(i, Smi::FromInt(0));
  }
}


3610 3611 3612 3613
int Heap::FullSizeNumberStringCacheLength() {
  // Compute the size of the number string cache based on the max newspace size.
  // The number string cache has a minimum size based on twice the initial cache
  // size to ensure that it is bigger after being made 'full size'.
3614
  int number_string_cache_size = max_semi_space_size_ / 512;
3615 3616 3617 3618 3619 3620 3621 3622
  number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
                                 Min(0x4000, number_string_cache_size));
  // There is a string and a number per entry so the length is twice the number
  // of entries.
  return number_string_cache_size * 2;
}


3623 3624 3625 3626
void Heap::FlushNumberStringCache() {
  // Flush the number to string cache.
  int len = number_string_cache()->length();
  for (int i = 0; i < len; i++) {
3627
    number_string_cache()->set_undefined(i);
3628 3629 3630 3631
  }
}


3632 3633 3634 3635 3636 3637 3638 3639 3640
void Heap::FlushAllocationSitesScratchpad() {
  for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
    allocation_sites_scratchpad()->set_undefined(i);
  }
  allocation_sites_scratchpad_length_ = 0;
}


void Heap::InitializeAllocationSitesScratchpad() {
3641
  DCHECK(allocation_sites_scratchpad()->length() ==
3642 3643 3644 3645 3646 3647 3648
         kAllocationSiteScratchpadSize);
  for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
    allocation_sites_scratchpad()->set_undefined(i);
  }
}


3649 3650
void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
                                         ScratchpadSlotMode mode) {
3651
  if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
3652 3653 3654
    // We cannot use the normal write-barrier because slots need to be
    // recorded with non-incremental marking as well. We have to explicitly
    // record the slot to take evacuation candidates into account.
3655 3656
    allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
                                       site, SKIP_WRITE_BARRIER);
3657 3658
    Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
        allocation_sites_scratchpad_length_);
3659 3660 3661 3662 3663 3664

    if (mode == RECORD_SCRATCHPAD_SLOT) {
      // We need to allow slots buffer overflow here since the evacuation
      // candidates are not part of the global list of old space pages and
      // releasing an evacuation candidate due to a slots buffer overflow
      // results in lost pages.
3665 3666
      mark_compact_collector()->RecordSlot(slot, slot, *slot,
                                           SlotsBuffer::IGNORE_OVERFLOW);
3667
    }
3668 3669 3670 3671 3672
    allocation_sites_scratchpad_length_++;
  }
}


3673 3674 3675 3676 3677 3678 3679 3680
Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
  return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
}


Heap::RootListIndex Heap::RootIndexForExternalArrayType(
    ExternalArrayType array_type) {
  switch (array_type) {
3681 3682 3683
#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
  case kExternal##Type##Array:                                  \
    return kExternal##Type##ArrayMapRootIndex;
3684 3685 3686 3687

    TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
#undef ARRAY_TYPE_TO_ROOT_INDEX

3688 3689 3690 3691 3692 3693
    default:
      UNREACHABLE();
      return kUndefinedValueRootIndex;
  }
}

3694

3695 3696 3697 3698 3699 3700 3701 3702
Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
  return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
}


Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
    ExternalArrayType array_type) {
  switch (array_type) {
3703 3704 3705
#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
  case kExternal##Type##Array:                                  \
    return kFixed##Type##ArrayMapRootIndex;
3706 3707 3708 3709

    TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
#undef ARRAY_TYPE_TO_ROOT_INDEX

3710 3711 3712 3713 3714 3715 3716
    default:
      UNREACHABLE();
      return kUndefinedValueRootIndex;
  }
}


3717 3718 3719
Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
    ElementsKind elementsKind) {
  switch (elementsKind) {
3720 3721 3722
#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
  case EXTERNAL_##TYPE##_ELEMENTS:                                \
    return kEmptyExternal##Type##ArrayRootIndex;
3723 3724 3725 3726

    TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
#undef ELEMENT_KIND_TO_ROOT_INDEX

3727 3728 3729 3730 3731 3732
    default:
      UNREACHABLE();
      return kUndefinedValueRootIndex;
  }
}

3733

3734 3735 3736
Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
    ElementsKind elementsKind) {
  switch (elementsKind) {
3737 3738 3739
#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
  case TYPE##_ELEMENTS:                                           \
    return kEmptyFixed##Type##ArrayRootIndex;
3740 3741 3742 3743 3744 3745 3746 3747 3748 3749

    TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
#undef ELEMENT_KIND_TO_ROOT_INDEX
    default:
      UNREACHABLE();
      return kUndefinedValueRootIndex;
  }
}


3750 3751 3752 3753 3754 3755
ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
  return ExternalArray::cast(
      roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
}


3756 3757 3758 3759 3760 3761
FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
  return FixedTypedArrayBase::cast(
      roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
}


3762 3763
AllocationResult Heap::AllocateForeign(Address address,
                                       PretenureFlag pretenure) {
3764
  // Statically ensure that it is safe to allocate foreigns in paged spaces.
3765
  STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
3766
  AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
3767
  Foreign* result;
3768 3769
  AllocationResult allocation = Allocate(foreign_map(), space);
  if (!allocation.To(&result)) return allocation;
3770
  result->set_foreign_address(address);
3771 3772 3773 3774
  return result;
}


3775
AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3776
  if (length < 0 || length > ByteArray::kMaxLength) {
3777
    v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3778
  }
3779
  int size = ByteArray::SizeFor(length);
3780
  AllocationSpace space = SelectSpace(size, pretenure);
3781
  HeapObject* result;
3782
  {
3783
    AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
3784
    if (!allocation.To(&result)) return allocation;
3785
  }
3786

3787 3788
  result->set_map_no_write_barrier(byte_array_map());
  ByteArray::cast(result)->set_length(length);
3789 3790 3791 3792
  return result;
}


3793 3794 3795 3796
void Heap::CreateFillerObjectAt(Address addr, int size) {
  if (size == 0) return;
  HeapObject* filler = HeapObject::FromAddress(addr);
  if (size == kPointerSize) {
3797
    filler->set_map_no_write_barrier(raw_unchecked_one_pointer_filler_map());
3798
  } else if (size == 2 * kPointerSize) {
3799
    filler->set_map_no_write_barrier(raw_unchecked_two_pointer_filler_map());
3800
  } else {
3801 3802
    filler->set_map_no_write_barrier(raw_unchecked_free_space_map());
    FreeSpace::cast(filler)->nobarrier_set_size(size);
3803
  }
3804 3805 3806 3807
  // At this point, we may be deserializing the heap from a snapshot, and
  // none of the maps have been created yet and are NULL.
  DCHECK((filler->map() == NULL && !deserialization_complete_) ||
         filler->map()->IsMap());
3808 3809 3810
}


3811 3812 3813 3814 3815
bool Heap::CanMoveObjectStart(HeapObject* object) {
  Address address = object->address();

  if (lo_space()->Contains(object)) return false;

3816 3817
  Page* page = Page::FromAddress(address);
  // We can move the object start if:
3818
  // (1) the object is not in old space,
3819 3820 3821 3822
  // (2) the page of the object was already swept,
  // (3) the page was already concurrently swept. This case is an optimization
  // for concurrent sweeping. The WasSwept predicate for concurrently swept
  // pages is set after sweeping all pages.
3823
  return !InOldSpace(address) || page->WasSwept() || page->SweepingCompleted();
3824 3825 3826
}


3827 3828 3829
void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
  if (incremental_marking()->IsMarking() &&
      Marking::IsBlack(Marking::MarkBitFrom(address))) {
3830
    if (mode == SEQUENTIAL_TO_SWEEPER) {
3831 3832 3833 3834 3835 3836 3837 3838
      MemoryChunk::IncrementLiveBytesFromGC(address, by);
    } else {
      MemoryChunk::IncrementLiveBytesFromMutator(address, by);
    }
  }
}


3839 3840
FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
                                         int elements_to_trim) {
3841
  DCHECK(!object->IsFixedTypedArrayBase());
3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879
  const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
  const int bytes_to_trim = elements_to_trim * element_size;
  Map* map = object->map();

  // For now this trick is only applied to objects in new and paged space.
  // In large object space the object's start must coincide with chunk
  // and thus the trick is just not applicable.
  DCHECK(!lo_space()->Contains(object));
  DCHECK(object->map() != fixed_cow_array_map());

  STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
  STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
  STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);

  const int len = object->length();
  DCHECK(elements_to_trim <= len);

  // Calculate location of new array start.
  Address new_start = object->address() + bytes_to_trim;

  // Technically in new space this write might be omitted (except for
  // debug mode which iterates through the heap), but to play safer
  // we still do it.
  CreateFillerObjectAt(object->address(), bytes_to_trim);

  // Initialize header of the trimmed array. Since left trimming is only
  // performed on pages which are not concurrently swept creating a filler
  // object does not require synchronization.
  DCHECK(CanMoveObjectStart(object));
  Object** former_start = HeapObject::RawField(object, 0);
  int new_start_index = elements_to_trim * (element_size / kPointerSize);
  former_start[new_start_index] = map;
  former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
  FixedArrayBase* new_object =
      FixedArrayBase::cast(HeapObject::FromAddress(new_start));

  // Maintain consistency of live bytes during incremental marking
  marking()->TransferMark(object->address(), new_start);
3880
  AdjustLiveBytes(new_start, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER);
3881 3882 3883 3884 3885 3886 3887 3888

  // Notify the heap profiler of change in object layout.
  OnMoveEvent(new_object, object, new_object->Size());
  return new_object;
}


// Force instantiation of templatized method.
3889 3890 3891 3892
template void Heap::RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
    FixedArrayBase*, int);
template void Heap::RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
    FixedArrayBase*, int);
3893 3894 3895 3896


template<Heap::InvocationMode mode>
void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910
  const int len = object->length();
  DCHECK(elements_to_trim < len);

  int bytes_to_trim;
  if (object->IsFixedTypedArrayBase()) {
    InstanceType type = object->map()->instance_type();
    bytes_to_trim =
        FixedTypedArrayBase::TypedArraySize(type, len) -
        FixedTypedArrayBase::TypedArraySize(type, len - elements_to_trim);
  } else {
    const int element_size =
        object->IsFixedArray() ? kPointerSize : kDoubleSize;
    bytes_to_trim = elements_to_trim * element_size;
  }
3911 3912 3913 3914

  // For now this trick is only applied to objects in new and paged space.
  DCHECK(object->map() != fixed_cow_array_map());

3915 3916 3917 3918 3919 3920
  if (bytes_to_trim == 0) {
    // No need to create filler and update live bytes counters, just initialize
    // header of the trimmed array.
    object->synchronized_set_length(len - elements_to_trim);
    return;
  }
3921 3922 3923 3924 3925 3926 3927

  // Calculate location of new array end.
  Address new_end = object->address() + object->Size() - bytes_to_trim;

  // Technically in new space this write might be omitted (except for
  // debug mode which iterates through the heap), but to play safer
  // we still do it.
3928 3929 3930 3931 3932 3933
  // We do not create a filler for objects in large object space.
  // TODO(hpayer): We should shrink the large object page if the size
  // of the object changed significantly.
  if (!lo_space()->Contains(object)) {
    CreateFillerObjectAt(new_end, bytes_to_trim);
  }
3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951

  // Initialize header of the trimmed array. We are storing the new length
  // using release store after creating a filler for the left-over space to
  // avoid races with the sweeper thread.
  object->synchronized_set_length(len - elements_to_trim);

  // Maintain consistency of live bytes during incremental marking
  AdjustLiveBytes(object->address(), -bytes_to_trim, mode);

  // Notify the heap profiler of change in object layout. The array may not be
  // moved during GC, and size has to be adjusted nevertheless.
  HeapProfiler* profiler = isolate()->heap_profiler();
  if (profiler->is_tracking_allocations()) {
    profiler->UpdateObjectSizeEvent(object->address(), object->Size());
  }
}


3952
AllocationResult Heap::AllocateExternalArray(int length,
3953 3954 3955
                                             ExternalArrayType array_type,
                                             void* external_pointer,
                                             PretenureFlag pretenure) {
3956
  int size = ExternalArray::kSize;
3957
  AllocationSpace space = SelectSpace(size, pretenure);
3958
  HeapObject* result;
3959
  {
3960
    AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
3961
    if (!allocation.To(&result)) return allocation;
3962
  }
3963

3964
  result->set_map_no_write_barrier(MapForExternalArrayType(array_type));
3965 3966
  ExternalArray::cast(result)->set_length(length);
  ExternalArray::cast(result)->set_external_pointer(external_pointer);
3967 3968 3969
  return result;
}

3970
static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
3971 3972
                               ElementsKind* element_kind) {
  switch (array_type) {
3973 3974 3975 3976 3977
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
  case kExternal##Type##Array:                          \
    *element_size = size;                               \
    *element_kind = TYPE##_ELEMENTS;                    \
    return;
3978 3979 3980 3981

    TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE

3982
    default:
3983
      *element_size = 0;               // Bogus
3984 3985 3986 3987 3988 3989
      *element_kind = UINT8_ELEMENTS;  // Bogus
      UNREACHABLE();
  }
}


3990 3991
AllocationResult Heap::AllocateFixedTypedArray(int length,
                                               ExternalArrayType array_type,
3992
                                               bool initialize,
3993
                                               PretenureFlag pretenure) {
3994 3995 3996
  int element_size;
  ElementsKind elements_kind;
  ForFixedTypedArray(array_type, &element_size, &elements_kind);
3997 3998
  int size = OBJECT_POINTER_ALIGN(length * element_size +
                                  FixedTypedArrayBase::kDataOffset);
3999
  AllocationSpace space = SelectSpace(size, pretenure);
4000 4001

  HeapObject* object;
4002 4003 4004
  AllocationResult allocation = AllocateRaw(
      size, space, OLD_SPACE,
      array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
4005
  if (!allocation.To(&object)) return allocation;
4006

4007 4008
  object->set_map(MapForFixedTypedArray(array_type));
  FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
4009
  elements->set_base_pointer(elements, SKIP_WRITE_BARRIER);
4010
  elements->set_length(length);
4011
  if (initialize) memset(elements->DataPtr(), 0, elements->DataSize());
4012 4013 4014
  return elements;
}

4015

4016
AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
4017
  DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
4018 4019 4020
  AllocationResult allocation =
      AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);

4021
  HeapObject* result;
4022
  if (!allocation.To(&result)) return allocation;
4023

4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037
  if (immovable) {
    Address address = result->address();
    // Code objects which should stay at a fixed address are allocated either
    // in the first page of code space (objects on the first page of each space
    // are never moved) or in large object space.
    if (!code_space_->FirstPage()->Contains(address) &&
        MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
      // Discard the first code allocation, which was on a page where it could
      // be moved.
      CreateFillerObjectAt(result->address(), object_size);
      allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
      if (!allocation.To(&result)) return allocation;
      OnAllocationEvent(result, object_size);
    }
4038
  }
4039

4040
  result->set_map_no_write_barrier(code_map());
4041
  Code* code = Code::cast(result);
4042
  DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
4043
  DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
4044 4045
         isolate_->code_range()->contains(code->address()) ||
         object_size <= code_space()->AreaSize());
4046
  code->set_gc_metadata(Smi::FromInt(0));
ulan's avatar
ulan committed
4047
  code->set_ic_age(global_ic_age_);
4048 4049 4050 4051
  return code;
}


4052 4053
AllocationResult Heap::CopyCode(Code* code) {
  AllocationResult allocation;
4054

4055
  HeapObject* result = NULL;
4056 4057
  // Allocate an object the same size as the code object.
  int obj_size = code->Size();
4058
  allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
4059
  if (!allocation.To(&result)) return allocation;
4060 4061 4062

  // Copy code object.
  Address old_addr = code->address();
4063
  Address new_addr = result->address();
4064
  CopyBlock(new_addr, old_addr, obj_size);
4065
  Code* new_code = Code::cast(result);
4066 4067

  // Relocate the copy.
4068
  DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
4069
  DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
4070 4071
         isolate_->code_range()->contains(code->address()) ||
         obj_size <= code_space()->AreaSize());
4072 4073 4074 4075 4076
  new_code->Relocate(new_addr - old_addr);
  return new_code;
}


4077
AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
4078 4079
  // Allocate ByteArray before the Code object, so that we do not risk
  // leaving uninitialized Code object (and breaking the heap).
4080
  ByteArray* reloc_info_array;
4081 4082
  {
    AllocationResult allocation =
4083
        AllocateByteArray(reloc_info.length(), TENURED);
4084
    if (!allocation.To(&reloc_info_array)) return allocation;
4085
  }
4086

4087
  int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
4088

4089
  int new_obj_size = Code::SizeFor(new_body_size);
4090 4091 4092

  Address old_addr = code->address();

4093
  size_t relocation_offset =
4094
      static_cast<size_t>(code->instruction_end() - old_addr);
4095

4096
  HeapObject* result;
4097 4098
  AllocationResult allocation =
      AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
4099
  if (!allocation.To(&result)) return allocation;
4100 4101

  // Copy code object.
4102
  Address new_addr = result->address();
4103 4104

  // Copy header and instructions.
4105
  CopyBytes(new_addr, old_addr, relocation_offset);
4106 4107

  Code* new_code = Code::cast(result);
4108
  new_code->set_relocation_info(reloc_info_array);
4109

4110
  // Copy patched rinfo.
4111
  CopyBytes(new_code->relocation_start(), reloc_info.start(),
4112
            static_cast<size_t>(reloc_info.length()));
4113 4114

  // Relocate the copy.
4115
  DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
4116
  DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
4117 4118 4119
         isolate_->code_range()->contains(code->address()) ||
         new_obj_size <= code_space()->AreaSize());

4120 4121
  new_code->Relocate(new_addr - old_addr);

4122
#ifdef VERIFY_HEAP
4123
  if (FLAG_verify_heap) code->ObjectVerify();
4124 4125 4126 4127 4128
#endif
  return new_code;
}


4129 4130 4131
void Heap::InitializeAllocationMemento(AllocationMemento* memento,
                                       AllocationSite* allocation_site) {
  memento->set_map_no_write_barrier(allocation_memento_map());
4132
  DCHECK(allocation_site->map() == allocation_site_map());
4133 4134 4135 4136 4137 4138 4139
  memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
  if (FLAG_allocation_site_pretenuring) {
    allocation_site->IncrementMementoCreateCount();
  }
}


4140
AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
4141
                                AllocationSite* allocation_site) {
4142 4143
  DCHECK(gc_state_ == NOT_IN_GC);
  DCHECK(map->instance_type() != MAP_TYPE);
4144 4145
  // If allocation failures are disallowed, we may allocate in a different
  // space when new space is full and the object is not a large object.
4146
  AllocationSpace retry_space = (space != NEW_SPACE) ? space : OLD_SPACE;
4147
  int size = map->instance_size();
4148 4149 4150
  if (allocation_site != NULL) {
    size += AllocationMemento::kSize;
  }
4151 4152 4153
  HeapObject* result;
  AllocationResult allocation = AllocateRaw(size, space, retry_space);
  if (!allocation.To(&result)) return allocation;
4154
  // No need for write barrier since object is white and map is in old space.
4155
  result->set_map_no_write_barrier(map);
4156 4157 4158 4159 4160
  if (allocation_site != NULL) {
    AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
        reinterpret_cast<Address>(result) + map->instance_size());
    InitializeAllocationMemento(alloc_memento, allocation_site);
  }
4161 4162 4163 4164
  return result;
}


4165
void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
4166 4167 4168 4169 4170 4171
                                     Map* map) {
  obj->set_properties(properties);
  obj->initialize_elements();
  // TODO(1240798): Initialize the object's body using valid initial values
  // according to the object's initial map.  For example, if the map's
  // instance type is JS_ARRAY_TYPE, the length field should be initialized
4172 4173
  // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
  // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
4174 4175
  // verification code has to cope with (temporarily) invalid objects.  See
  // for example, JSArray::JSArrayVerify).
4176 4177 4178 4179
  Object* filler;
  // We cannot always fill with one_pointer_filler_map because objects
  // created from API functions expect their internal fields to be initialized
  // with undefined_value.
4180 4181 4182
  // Pre-allocated fields need to be initialized with undefined_value as well
  // so that object accesses before the constructor completes (e.g. in the
  // debugger) will not cause a crash.
4183 4184 4185
  Object* constructor = map->GetConstructor();
  if (constructor->IsJSFunction() &&
      JSFunction::cast(constructor)->IsInobjectSlackTrackingInProgress()) {
4186
    // We might want to shrink the object later.
4187
    DCHECK(obj->GetInternalFieldCount() == 0);
4188 4189 4190 4191
    filler = Heap::one_pointer_filler_map();
  } else {
    filler = Heap::undefined_value();
  }
4192
  obj->InitializeBody(map, Heap::undefined_value(), filler);
4193 4194 4195
}


4196
AllocationResult Heap::AllocateJSObjectFromMap(
4197
    Map* map, PretenureFlag pretenure, AllocationSite* allocation_site) {
4198 4199
  // JSFunctions should be allocated using AllocateFunction to be
  // properly initialized.
4200
  DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
4201

4202 4203
  // Both types of global objects should be allocated using
  // AllocateGlobalObject to be properly initialized.
4204 4205
  DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
  DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4206

4207
  // Allocate the backing storage for the properties.
4208
  FixedArray* properties = empty_fixed_array();
4209 4210

  // Allocate the JSObject.
4211
  int size = map->instance_size();
4212
  AllocationSpace space = SelectSpace(size, pretenure);
4213 4214 4215
  JSObject* js_obj;
  AllocationResult allocation = Allocate(map, space, allocation_site);
  if (!allocation.To(&js_obj)) return allocation;
4216 4217

  // Initialize the JSObject.
4218
  InitializeJSObjectFromMap(js_obj, properties, map);
4219
  DCHECK(js_obj->HasFastElements() || js_obj->HasExternalArrayElements() ||
4220 4221
         js_obj->HasFixedTypedArrayElements());
  return js_obj;
4222 4223 4224
}


4225 4226 4227
AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
                                        PretenureFlag pretenure,
                                        AllocationSite* allocation_site) {
4228
  DCHECK(constructor->has_initial_map());
4229

4230
  // Allocate the object based on the constructors initial map.
4231
  AllocationResult allocation = AllocateJSObjectFromMap(
4232
      constructor->initial_map(), pretenure, allocation_site);
4233 4234
#ifdef DEBUG
  // Make sure result is NOT a global object if valid.
4235
  HeapObject* obj;
4236
  DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject());
4237
#endif
4238
  return allocation;
4239 4240 4241
}


4242
AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
4243 4244
  // Make the clone.
  Map* map = source->map();
4245 4246 4247 4248 4249 4250

  // We can only clone normal objects or arrays. Copying anything else
  // will break invariants.
  CHECK(map->instance_type() == JS_OBJECT_TYPE ||
        map->instance_type() == JS_ARRAY_TYPE);

4251
  int object_size = map->instance_size();
4252
  HeapObject* clone;
4253

4254
  DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
4255

4256
  WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4257

4258 4259 4260
  // If we're forced to always allocate, we use the general allocation
  // functions which may leave us with an object in old space.
  if (always_allocate()) {
4261 4262
    {
      AllocationResult allocation =
4263
          AllocateRaw(object_size, NEW_SPACE, OLD_SPACE);
4264
      if (!allocation.To(&clone)) return allocation;
4265
    }
4266
    Address clone_address = clone->address();
4267
    CopyBlock(clone_address, source->address(), object_size);
4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294

    // Update write barrier for all tagged fields that lie beyond the header.
    const int start_offset = JSObject::kHeaderSize;
    const int end_offset = object_size;

#if V8_DOUBLE_FIELDS_UNBOXING
    LayoutDescriptorHelper helper(map);
    bool has_only_tagged_fields = helper.all_fields_tagged();

    if (!has_only_tagged_fields) {
      for (int offset = start_offset; offset < end_offset;) {
        int end_of_region_offset;
        if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
          RecordWrites(clone_address, offset,
                       (end_of_region_offset - offset) / kPointerSize);
        }
        offset = end_of_region_offset;
      }
    } else {
#endif
      // Object has only tagged fields.
      RecordWrites(clone_address, start_offset,
                   (end_offset - start_offset) / kPointerSize);
#if V8_DOUBLE_FIELDS_UNBOXING
    }
#endif

4295 4296 4297
  } else {
    wb_mode = SKIP_WRITE_BARRIER;

4298 4299 4300 4301
    {
      int adjusted_object_size =
          site != NULL ? object_size + AllocationMemento::kSize : object_size;
      AllocationResult allocation =
4302
          AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
4303
      if (!allocation.To(&clone)) return allocation;
4304
    }
4305
    SLOW_DCHECK(InNewSpace(clone));
4306 4307
    // Since we know the clone is allocated in new space, we can copy
    // the contents without worrying about updating the write barrier.
4308
    CopyBlock(clone->address(), source->address(), object_size);
4309

4310 4311 4312
    if (site != NULL) {
      AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
          reinterpret_cast<Address>(clone) + object_size);
4313
      InitializeAllocationMemento(alloc_memento, site);
4314
    }
4315 4316
  }

4317 4318
  SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
              source->GetElementsKind());
4319
  FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4320 4321
  FixedArray* properties = FixedArray::cast(source->properties());
  // Update elements if necessary.
4322
  if (elements->length() > 0) {
4323
    FixedArrayBase* elem;
4324 4325
    {
      AllocationResult allocation;
4326
      if (elements->map() == fixed_cow_array_map()) {
4327
        allocation = FixedArray::cast(elements);
4328
      } else if (source->HasFastDoubleElements()) {
4329
        allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4330
      } else {
4331
        allocation = CopyFixedArray(FixedArray::cast(elements));
4332
      }
4333
      if (!allocation.To(&elem)) return allocation;
4334
    }
4335
    JSObject::cast(clone)->set_elements(elem, wb_mode);
4336 4337 4338
  }
  // Update properties if necessary.
  if (properties->length() > 0) {
4339
    FixedArray* prop;
4340 4341
    {
      AllocationResult allocation = CopyFixedArray(properties);
4342
      if (!allocation.To(&prop)) return allocation;
4343
    }
4344
    JSObject::cast(clone)->set_properties(prop, wb_mode);
4345 4346 4347 4348 4349 4350
  }
  // Return the new clone.
  return clone;
}


4351
static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
4352
                                    int len) {
4353
  // Only works for one byte strings.
4354
  DCHECK(vector.length() == len);
4355
  MemCopy(chars, vector.start(), len);
4356
}
4357

4358
static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
4359 4360
                                    int len) {
  const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
4361
  size_t stream_length = vector.length();
4362
  while (stream_length != 0) {
4363
    size_t consumed = 0;
4364
    uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
4365 4366
    DCHECK(c != unibrow::Utf8::kBadChar);
    DCHECK(consumed <= stream_length);
4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377
    stream_length -= consumed;
    stream += consumed;
    if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
      len -= 2;
      if (len < 0) break;
      *chars++ = unibrow::Utf16::LeadSurrogate(c);
      *chars++ = unibrow::Utf16::TrailSurrogate(c);
    } else {
      len -= 1;
      if (len < 0) break;
      *chars++ = c;
4378
    }
4379
  }
4380 4381
  DCHECK(stream_length == 0);
  DCHECK(len == 0);
4382
}
4383

4384

4385
static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
4386
  DCHECK(s->length() == len);
4387 4388
  String::WriteToFlat(s, chars, 0, len);
}
4389

4390

4391
static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
4392
  DCHECK(s->length() == len);
4393 4394
  String::WriteToFlat(s, chars, 0, len);
}
4395 4396


4397 4398 4399
template <bool is_one_byte, typename T>
AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
                                                      uint32_t hash_field) {
4400
  DCHECK(chars >= 0);
4401 4402 4403 4404
  // Compute map and object size.
  int size;
  Map* map;

4405 4406
  DCHECK_LE(0, chars);
  DCHECK_GE(String::kMaxLength, chars);
4407
  if (is_one_byte) {
4408
    map = one_byte_internalized_string_map();
4409
    size = SeqOneByteString::SizeFor(chars);
4410
  } else {
4411
    map = internalized_string_map();
4412
    size = SeqTwoByteString::SizeFor(chars);
4413
  }
4414
  AllocationSpace space = SelectSpace(size, TENURED);
4415 4416

  // Allocate string.
4417
  HeapObject* result;
4418
  {
4419
    AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
4420
    if (!allocation.To(&result)) return allocation;
4421
  }
4422

4423
  result->set_map_no_write_barrier(map);
4424
  // Set length and hash fields of the allocated string.
4425
  String* answer = String::cast(result);
4426 4427
  answer->set_length(chars);
  answer->set_hash_field(hash_field);
4428

4429
  DCHECK_EQ(size, answer->Size());
4430

4431
  if (is_one_byte) {
4432
    WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
4433
  } else {
4434
    WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
4435
  }
4436
  return answer;
4437 4438 4439
}


4440
// Need explicit instantiations.
4441 4442 4443 4444 4445 4446 4447
template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
                                                                     int,
                                                                     uint32_t);
template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
                                                                      int,
                                                                      uint32_t);
template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
4448
    Vector<const char>, int, uint32_t);
4449 4450


4451 4452
AllocationResult Heap::AllocateRawOneByteString(int length,
                                                PretenureFlag pretenure) {
4453 4454
  DCHECK_LE(0, length);
  DCHECK_GE(String::kMaxLength, length);
4455
  int size = SeqOneByteString::SizeFor(length);
4456
  DCHECK(size <= SeqOneByteString::kMaxSize);
4457
  AllocationSpace space = SelectSpace(size, pretenure);
4458

4459
  HeapObject* result;
4460
  {
4461
    AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
4462
    if (!allocation.To(&result)) return allocation;
4463
  }
4464 4465

  // Partially initialize the object.
4466
  result->set_map_no_write_barrier(one_byte_string_map());
4467
  String::cast(result)->set_length(length);
4468
  String::cast(result)->set_hash_field(String::kEmptyHashField);
4469
  DCHECK_EQ(size, HeapObject::cast(result)->Size());
4470

4471 4472 4473 4474
  return result;
}


4475 4476
AllocationResult Heap::AllocateRawTwoByteString(int length,
                                                PretenureFlag pretenure) {
4477 4478
  DCHECK_LE(0, length);
  DCHECK_GE(String::kMaxLength, length);
4479
  int size = SeqTwoByteString::SizeFor(length);
4480
  DCHECK(size <= SeqTwoByteString::kMaxSize);
4481
  AllocationSpace space = SelectSpace(size, pretenure);
4482

4483
  HeapObject* result;
4484
  {
4485
    AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
4486
    if (!allocation.To(&result)) return allocation;
4487
  }
4488 4489

  // Partially initialize the object.
4490
  result->set_map_no_write_barrier(string_map());
4491
  String::cast(result)->set_length(length);
4492
  String::cast(result)->set_hash_field(String::kEmptyHashField);
4493
  DCHECK_EQ(size, HeapObject::cast(result)->Size());
4494 4495 4496 4497
  return result;
}


4498
AllocationResult Heap::AllocateEmptyFixedArray() {
4499
  int size = FixedArray::SizeFor(0);
4500
  HeapObject* result;
4501
  {
4502
    AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
4503
    if (!allocation.To(&result)) return allocation;
4504
  }
4505
  // Initialize the object.
4506 4507
  result->set_map_no_write_barrier(fixed_array_map());
  FixedArray::cast(result)->set_length(0);
4508 4509 4510
  return result;
}

4511

4512 4513
AllocationResult Heap::AllocateEmptyExternalArray(
    ExternalArrayType array_type) {
4514 4515 4516
  return AllocateExternalArray(0, array_type, NULL, TENURED);
}

4517

4518
AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
4519 4520 4521 4522 4523
  if (!InNewSpace(src)) {
    return src;
  }

  int len = src->length();
4524
  HeapObject* obj;
4525 4526
  {
    AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
4527
    if (!allocation.To(&obj)) return allocation;
4528
  }
4529
  obj->set_map_no_write_barrier(fixed_array_map());
4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545
  FixedArray* result = FixedArray::cast(obj);
  result->set_length(len);

  // Copy the content
  DisallowHeapAllocation no_gc;
  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
  for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);

  // TODO(mvstanton): The map is set twice because of protection against calling
  // set() on a COW FixedArray. Issue v8:3221 created to track this, and
  // we might then be able to remove this whole method.
  HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
  return result;
}


4546 4547
AllocationResult Heap::AllocateEmptyFixedTypedArray(
    ExternalArrayType array_type) {
4548
  return AllocateFixedTypedArray(0, array_type, false, TENURED);
4549 4550 4551
}


4552
AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
4553
  int len = src->length();
4554
  HeapObject* obj;
4555 4556
  {
    AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
4557
    if (!allocation.To(&obj)) return allocation;
4558
  }
4559
  if (InNewSpace(obj)) {
4560
    obj->set_map_no_write_barrier(map);
4561
    CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
4562
              FixedArray::SizeFor(len) - kPointerSize);
4563 4564
    return obj;
  }
4565
  obj->set_map_no_write_barrier(map);
4566 4567
  FixedArray* result = FixedArray::cast(obj);
  result->set_length(len);
4568

4569
  // Copy the content
4570
  DisallowHeapAllocation no_gc;
4571
  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4572
  for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4573 4574 4575 4576
  return result;
}


4577 4578
AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
                                                   Map* map) {
4579
  int len = src->length();
4580
  HeapObject* obj;
4581 4582
  {
    AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
4583
    if (!allocation.To(&obj)) return allocation;
4584
  }
4585
  obj->set_map_no_write_barrier(map);
4586 4587 4588
  CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
            src->address() + FixedDoubleArray::kLengthOffset,
            FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
4589 4590 4591 4592
  return obj;
}


4593 4594
AllocationResult Heap::AllocateRawFixedArray(int length,
                                             PretenureFlag pretenure) {
4595
  if (length < 0 || length > FixedArray::kMaxLength) {
4596
    v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4597
  }
4598
  int size = FixedArray::SizeFor(length);
4599
  AllocationSpace space = SelectSpace(size, pretenure);
4600

4601
  return AllocateRaw(size, space, OLD_SPACE);
4602 4603 4604
}


4605 4606 4607
AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
                                                    PretenureFlag pretenure,
                                                    Object* filler) {
4608 4609
  DCHECK(length >= 0);
  DCHECK(empty_fixed_array()->IsFixedArray());
4610
  if (length == 0) return empty_fixed_array();
4611

4612
  DCHECK(!InNewSpace(filler));
4613
  HeapObject* result = nullptr;
4614 4615
  {
    AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
4616
    if (!allocation.To(&result)) return allocation;
4617
  }
4618

4619
  result->set_map_no_write_barrier(fixed_array_map());
4620 4621
  FixedArray* array = FixedArray::cast(result);
  array->set_length(length);
4622
  MemsetPointer(array->data_start(), filler, length);
4623 4624 4625 4626
  return array;
}


4627
AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4628
  return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
4629 4630 4631
}


4632
AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
4633 4634
  if (length == 0) return empty_fixed_array();

4635
  HeapObject* obj;
4636 4637
  {
    AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
4638
    if (!allocation.To(&obj)) return allocation;
4639
  }
4640

4641
  obj->set_map_no_write_barrier(fixed_array_map());
4642 4643 4644 4645 4646
  FixedArray::cast(obj)->set_length(length);
  return obj;
}


4647
AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
4648
    int length, PretenureFlag pretenure) {
4649
  if (length == 0) return empty_fixed_array();
4650

4651 4652 4653
  HeapObject* elements;
  AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
  if (!allocation.To(&elements)) return allocation;
4654 4655

  elements->set_map_no_write_barrier(fixed_double_array_map());
4656
  FixedDoubleArray::cast(elements)->set_length(length);
4657 4658 4659 4660
  return elements;
}


4661 4662
AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
                                                   PretenureFlag pretenure) {
4663
  if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4664 4665
    v8::internal::Heap::FatalProcessOutOfMemory("invalid array length",
                                                kDoubleAligned);
4666 4667
  }
  int size = FixedDoubleArray::SizeFor(length);
4668
  AllocationSpace space = SelectSpace(size, pretenure);
4669

4670
  HeapObject* object;
4671
  {
4672 4673
    AllocationResult allocation =
        AllocateRaw(size, space, OLD_SPACE, kDoubleAligned);
4674
    if (!allocation.To(&object)) return allocation;
4675 4676
  }

4677
  return object;
4678 4679 4680
}


4681
AllocationResult Heap::AllocateSymbol() {
4682
  // Statically ensure that it is safe to allocate symbols in paged spaces.
4683
  STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
4684

4685
  HeapObject* result = NULL;
4686
  AllocationResult allocation =
4687
      AllocateRaw(Symbol::kSize, OLD_SPACE, OLD_SPACE);
4688
  if (!allocation.To(&result)) return allocation;
4689

4690
  result->set_map_no_write_barrier(symbol_map());
4691 4692 4693 4694 4695

  // Generate a random hash value.
  int hash;
  int attempts = 0;
  do {
4696
    hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
4697 4698 4699 4700
    attempts++;
  } while (hash == 0 && attempts < 30);
  if (hash == 0) hash = 1;  // never return 0

4701 4702
  Symbol::cast(result)
      ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
4703
  Symbol::cast(result)->set_name(undefined_value());
4704
  Symbol::cast(result)->set_flags(Smi::FromInt(0));
4705

4706
  DCHECK(!Symbol::cast(result)->is_private());
4707 4708 4709 4710
  return result;
}


4711
AllocationResult Heap::AllocateStruct(InstanceType type) {
4712 4713
  Map* map;
  switch (type) {
4714
#define MAKE_CASE(NAME, Name, name) \
4715 4716 4717 4718
  case NAME##_TYPE:                 \
    map = name##_map();             \
    break;
    STRUCT_LIST(MAKE_CASE)
4719 4720 4721
#undef MAKE_CASE
    default:
      UNREACHABLE();
4722
      return exception();
4723 4724
  }
  int size = map->instance_size();
4725
  AllocationSpace space = SelectSpace(size, TENURED);
4726
  Struct* result;
4727 4728
  {
    AllocationResult allocation = Allocate(map, space);
4729
    if (!allocation.To(&result)) return allocation;
4730
  }
4731
  result->InitializeBody(size);
4732 4733 4734 4735
  return result;
}


4736
bool Heap::IsHeapIterable() {
4737 4738
  // TODO(hpayer): This function is not correct. Allocation folding in old
  // space breaks the iterability.
4739
  return new_space_top_after_last_gc_ == new_space()->top();
4740 4741 4742
}


4743
void Heap::MakeHeapIterable() {
4744
  DCHECK(AllowHeapAllocation::IsAllowed());
4745
  if (!IsHeapIterable()) {
4746
    CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
4747
  }
4748 4749 4750
  if (mark_compact_collector()->sweeping_in_progress()) {
    mark_compact_collector()->EnsureSweepingCompleted();
  }
4751
  DCHECK(IsHeapIterable());
4752 4753
}

4754

4755
bool Heap::HasLowYoungGenerationAllocationRate() {
4756
  const double high_mutator_utilization = 0.993;
4757 4758
  double mutator_speed = static_cast<double>(
      tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
4759 4760
  double gc_speed = static_cast<double>(
      tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects));
4761 4762 4763
  if (mutator_speed == 0 || gc_speed == 0) return false;
  double mutator_utilization = gc_speed / (mutator_speed + gc_speed);
  return mutator_utilization > high_mutator_utilization;
4764 4765 4766
}


4767
bool Heap::HasLowOldGenerationAllocationRate() {
4768
  const double high_mutator_utilization = 0.993;
4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784
  double mutator_speed = static_cast<double>(
      tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
  double gc_speed = static_cast<double>(
      tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
  if (mutator_speed == 0 || gc_speed == 0) return false;
  double mutator_utilization = gc_speed / (mutator_speed + gc_speed);
  return mutator_utilization > high_mutator_utilization;
}


bool Heap::HasLowAllocationRate() {
  return HasLowYoungGenerationAllocationRate() &&
         HasLowOldGenerationAllocationRate();
}


4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799
bool Heap::HasHighFragmentation() {
  intptr_t used = PromotedSpaceSizeOfObjects();
  intptr_t committed = CommittedOldGenerationMemory();
  return HasHighFragmentation(used, committed);
}


bool Heap::HasHighFragmentation(intptr_t used, intptr_t committed) {
  const intptr_t kSlack = 16 * MB;
  // Fragmentation is high if committed > 2 * used + kSlack.
  // Rewrite the exression to avoid overflow.
  return committed - used > used + kSlack;
}


4800 4801
void Heap::ReduceNewSpaceSize() {
  if (!FLAG_predictable && HasLowAllocationRate()) {
4802 4803 4804 4805 4806 4807
    new_space_.Shrink();
    UncommitFromSpace();
  }
}


4808
bool Heap::TryFinalizeIdleIncrementalMarking(
4809
    double idle_time_in_ms, size_t size_of_objects,
4810
    size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
4811 4812 4813 4814 4815 4816
  if (FLAG_overapproximate_weak_closure &&
      (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
       (!incremental_marking()->weak_closure_was_overapproximated() &&
        mark_compact_collector_.marking_deque()->IsEmpty() &&
        gc_idle_time_handler_.ShouldDoOverApproximateWeakClosure(
            static_cast<size_t>(idle_time_in_ms))))) {
4817 4818 4819 4820 4821 4822 4823 4824
    OverApproximateWeakClosure(
        "Idle notification: overapproximate weak closure");
    return true;
  } else if (incremental_marking()->IsComplete() ||
             (mark_compact_collector_.marking_deque()->IsEmpty() &&
              gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact(
                  static_cast<size_t>(idle_time_in_ms), size_of_objects,
                  final_incremental_mark_compact_speed_in_bytes_per_ms))) {
4825
    CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
4826
    return true;
4827
  }
4828
  return false;
4829 4830 4831
}


4832
GCIdleTimeHandler::HeapState Heap::ComputeHeapState() {
4833 4834
  GCIdleTimeHandler::HeapState heap_state;
  heap_state.contexts_disposed = contexts_disposed_;
4835 4836
  heap_state.contexts_disposal_rate =
      tracer()->ContextDisposalRateInMilliseconds();
4837 4838
  heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
  heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
4839 4840
  heap_state.sweeping_in_progress =
      mark_compact_collector()->sweeping_in_progress();
4841 4842
  heap_state.sweeping_completed =
      mark_compact_collector()->IsSweepingCompleted();
4843 4844 4845 4846
  heap_state.mark_compact_speed_in_bytes_per_ms =
      static_cast<size_t>(tracer()->MarkCompactSpeedInBytesPerMillisecond());
  heap_state.incremental_marking_speed_in_bytes_per_ms = static_cast<size_t>(
      tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
4847 4848 4849
  heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms =
      static_cast<size_t>(
          tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
4850 4851
  heap_state.scavenge_speed_in_bytes_per_ms =
      static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
4852
  heap_state.used_new_space_size = new_space_.Size();
4853
  heap_state.new_space_capacity = new_space_.Capacity();
4854
  heap_state.new_space_allocation_throughput_in_bytes_per_ms =
4855
      tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond();
4856 4857
  return heap_state;
}
4858 4859


4860 4861
bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
                                 GCIdleTimeHandler::HeapState heap_state,
4862
                                 double deadline_in_ms) {
4863 4864
  bool result = false;
  switch (action.type) {
4865 4866 4867
    case DONE:
      result = true;
      break;
4868
    case DO_INCREMENTAL_MARKING: {
4869
      DCHECK(!incremental_marking()->IsStopped());
4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881
      double remaining_idle_time_in_ms = 0.0;
      do {
        incremental_marking()->Step(
            action.parameter, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
            IncrementalMarking::FORCE_MARKING,
            IncrementalMarking::DO_NOT_FORCE_COMPLETION);
        remaining_idle_time_in_ms =
            deadline_in_ms - MonotonicallyIncreasingTimeInMs();
      } while (remaining_idle_time_in_ms >=
                   2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
               !incremental_marking()->IsComplete() &&
               !mark_compact_collector_.marking_deque()->IsEmpty());
4882
      if (remaining_idle_time_in_ms > 0.0) {
4883
        action.additional_work = TryFinalizeIdleIncrementalMarking(
4884
            remaining_idle_time_in_ms, heap_state.size_of_objects,
4885
            heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms);
4886
      }
4887
      break;
4888
    }
4889
    case DO_FULL_GC: {
4890 4891 4892
      DCHECK(contexts_disposed_ > 0);
      HistogramTimerScope scope(isolate_->counters()->gc_context());
      CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
4893
      break;
4894
    }
4895 4896 4897
    case DO_SCAVENGE:
      CollectGarbage(NEW_SPACE, "idle notification: scavenge");
      break;
4898 4899 4900
    case DO_FINALIZE_SWEEPING:
      mark_compact_collector()->EnsureSweepingCompleted();
      break;
4901 4902
    case DO_NOTHING:
      break;
4903
  }
4904

4905 4906 4907
  return result;
}

4908

4909 4910
void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
                                    GCIdleTimeHandler::HeapState heap_state,
4911
                                    double start_ms, double deadline_in_ms) {
4912
  double idle_time_in_ms = deadline_in_ms - start_ms;
4913
  double current_time = MonotonicallyIncreasingTimeInMs();
4914
  last_idle_notification_time_ = current_time;
4915 4916
  double deadline_difference = deadline_in_ms - current_time;

4917 4918 4919 4920 4921
  contexts_disposed_ = 0;

  isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
      static_cast<int>(idle_time_in_ms));

4922
  if (deadline_difference >= 0) {
4923 4924
    if (action.type != DONE && action.type != DO_NOTHING) {
      isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
4925
          static_cast<int>(deadline_difference));
4926
    }
4927 4928
  } else {
    isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
4929
        static_cast<int>(-deadline_difference));
4930 4931
  }

4932 4933
  if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
      FLAG_trace_idle_notification_verbose) {
4934
    PrintIsolate(isolate_, "%8.0f ms: ", isolate()->time_millis_since_init());
4935 4936 4937
    PrintF(
        "Idle notification: requested idle time %.2f ms, used idle time %.2f "
        "ms, deadline usage %.2f ms [",
4938 4939
        idle_time_in_ms, idle_time_in_ms - deadline_difference,
        deadline_difference);
4940
    action.Print();
4941 4942 4943 4944 4945 4946 4947
    PrintF("]");
    if (FLAG_trace_idle_notification_verbose) {
      PrintF("[");
      heap_state.Print();
      PrintF("]");
    }
    PrintF("\n");
4948
  }
4949
}
4950

4951

4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964
void Heap::CheckAndNotifyBackgroundIdleNotification(double idle_time_in_ms,
                                                    double now_ms) {
  if (idle_time_in_ms >= GCIdleTimeHandler::kMinBackgroundIdleTime) {
    MemoryReducer::Event event;
    event.type = MemoryReducer::kBackgroundIdleNotification;
    event.time_ms = now_ms;
    event.can_start_incremental_gc = incremental_marking()->IsStopped() &&
                                     incremental_marking()->CanBeActivated();
    memory_reducer_.NotifyBackgroundIdleNotification(event);
  }
}


4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988
double Heap::MonotonicallyIncreasingTimeInMs() {
  return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
         static_cast<double>(base::Time::kMillisecondsPerSecond);
}


bool Heap::IdleNotification(int idle_time_in_ms) {
  return IdleNotification(
      V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
      (static_cast<double>(idle_time_in_ms) /
       static_cast<double>(base::Time::kMillisecondsPerSecond)));
}


bool Heap::IdleNotification(double deadline_in_seconds) {
  CHECK(HasBeenSetUp());
  double deadline_in_ms =
      deadline_in_seconds *
      static_cast<double>(base::Time::kMillisecondsPerSecond);
  HistogramTimerScope idle_notification_scope(
      isolate_->counters()->gc_idle_notification());
  double start_ms = MonotonicallyIncreasingTimeInMs();
  double idle_time_in_ms = deadline_in_ms - start_ms;

4989 4990
  CheckAndNotifyBackgroundIdleNotification(idle_time_in_ms, start_ms);

4991 4992
  tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
                             OldGenerationAllocationCounter());
4993

4994
  GCIdleTimeHandler::HeapState heap_state = ComputeHeapState();
4995 4996 4997 4998

  GCIdleTimeAction action =
      gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);

4999
  bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
5000

5001
  IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms);
5002
  return result;
5003 5004 5005
}


yunchao.he's avatar
yunchao.he committed
5006
bool Heap::RecentIdleNotificationHappened() {
5007
  return (last_idle_notification_time_ +
5008
          GCIdleTimeHandler::kMaxScheduledIdleTime) >
5009 5010 5011 5012
         MonotonicallyIncreasingTimeInMs();
}


5013 5014 5015
#ifdef DEBUG

void Heap::Print() {
5016
  if (!HasBeenSetUp()) return;
5017
  isolate()->PrintStack(stdout);
5018 5019
  AllSpaces spaces(this);
  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
5020
    space->Print();
5021
  }
5022 5023 5024 5025 5026
}


void Heap::ReportCodeStatistics(const char* title) {
  PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
5027
  PagedSpace::ResetCodeStatistics(isolate());
5028 5029 5030 5031
  // We do not look for code in new space, map space, or old space.  If code
  // somehow ends up in those spaces, we would miss it here.
  code_space_->CollectCodeStatistics();
  lo_space_->CollectCodeStatistics();
5032
  PagedSpace::ReportCodeStatistics(isolate());
5033 5034 5035 5036 5037 5038 5039 5040
}


// This function expects that NewSpace's allocated objects histogram is
// populated (via a call to CollectStatistics or else as a side effect of a
// just-completed scavenge collection).
void Heap::ReportHeapStatistics(const char* title) {
  USE(title);
5041 5042
  PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
         gc_count_);
5043 5044
  PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
         old_generation_allocation_limit_);
5045 5046

  PrintF("\n");
5047
  PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
5048
  isolate_->global_handles()->PrintStats();
5049 5050 5051
  PrintF("\n");

  PrintF("Heap statistics : ");
5052
  isolate_->memory_allocator()->ReportStatistics();
5053
  PrintF("To space : ");
5054
  new_space_.ReportStatistics();
5055 5056
  PrintF("Old space : ");
  old_space_->ReportStatistics();
5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067
  PrintF("Code space : ");
  code_space_->ReportStatistics();
  PrintF("Map space : ");
  map_space_->ReportStatistics();
  PrintF("Large object space : ");
  lo_space_->ReportStatistics();
  PrintF(">>>>>> ========================================= >>>>>>\n");
}

#endif  // DEBUG

5068
bool Heap::Contains(HeapObject* value) { return Contains(value->address()); }
5069 5070 5071


bool Heap::Contains(Address addr) {
5072
  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
5073
  return HasBeenSetUp() &&
5074 5075
         (new_space_.ToSpaceContains(addr) || old_space_->Contains(addr) ||
          code_space_->Contains(addr) || map_space_->Contains(addr) ||
5076
          lo_space_->SlowContains(addr));
5077 5078 5079 5080 5081 5082 5083 5084 5085
}


bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
  return InSpace(value->address(), space);
}


bool Heap::InSpace(Address addr, AllocationSpace space) {
5086
  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
5087
  if (!HasBeenSetUp()) return false;
5088 5089 5090

  switch (space) {
    case NEW_SPACE:
5091
      return new_space_.ToSpaceContains(addr);
5092 5093
    case OLD_SPACE:
      return old_space_->Contains(addr);
5094 5095 5096 5097 5098 5099 5100
    case CODE_SPACE:
      return code_space_->Contains(addr);
    case MAP_SPACE:
      return map_space_->Contains(addr);
    case LO_SPACE:
      return lo_space_->SlowContains(addr);
  }
5101
  UNREACHABLE();
5102 5103 5104 5105
  return false;
}


5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119
bool Heap::IsValidAllocationSpace(AllocationSpace space) {
  switch (space) {
    case NEW_SPACE:
    case OLD_SPACE:
    case CODE_SPACE:
    case MAP_SPACE:
    case LO_SPACE:
      return true;
    default:
      return false;
  }
}


5120 5121 5122 5123 5124 5125 5126
bool Heap::RootIsImmortalImmovable(int root_index) {
  switch (root_index) {
#define CASE(name)               \
  case Heap::k##name##RootIndex: \
    return true;
    IMMORTAL_IMMOVABLE_ROOT_LIST(CASE);
#undef CASE
5127 5128 5129 5130 5131 5132
    default:
      return false;
  }
}


5133
#ifdef VERIFY_HEAP
5134
void Heap::Verify() {
5135
  CHECK(HasBeenSetUp());
5136
  HandleScope scope(isolate());
5137

5138 5139
  store_buffer()->Verify();

5140 5141 5142 5143 5144
  if (mark_compact_collector()->sweeping_in_progress()) {
    // We have to wait here for the sweeper threads to have an iterable heap.
    mark_compact_collector()->EnsureSweepingCompleted();
  }

5145
  VerifyPointersVisitor visitor;
5146
  IterateRoots(&visitor, VISIT_ONLY_STRONG);
5147

5148 5149 5150
  VerifySmisVisitor smis_visitor;
  IterateSmiRoots(&smis_visitor);

5151 5152
  new_space_.Verify();

5153
  old_space_->Verify(&visitor);
5154
  map_space_->Verify(&visitor);
5155 5156 5157

  VerifyPointersVisitor no_dirty_regions_visitor;
  code_space_->Verify(&no_dirty_regions_visitor);
5158 5159

  lo_space_->Verify();
5160
}
5161
#endif
5162 5163 5164


void Heap::ZapFromSpace() {
5165
  if (!new_space_.IsFromSpaceCommitted()) return;
5166 5167 5168 5169
  NewSpacePageIterator it(new_space_.FromSpaceStart(),
                          new_space_.FromSpaceEnd());
  while (it.has_next()) {
    NewSpacePage* page = it.next();
5170
    for (Address cursor = page->area_start(), limit = page->area_end();
5171
         cursor < limit; cursor += kPointerSize) {
5172 5173
      Memory::Address_at(cursor) = kFromSpaceZapValue;
    }
5174 5175 5176 5177
  }
}


5178 5179
void Heap::IterateAndMarkPointersToFromSpace(bool record_slots, Address start,
                                             Address end,
5180
                                             ObjectSlotCallback callback) {
5181
  Address slot_address = start;
5182

5183 5184
  while (slot_address < end) {
    Object** slot = reinterpret_cast<Object**>(slot_address);
5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195
    Object* object = *slot;
    // If the store buffer becomes overfull we mark pages as being exempt from
    // the store buffer.  These pages are scanned to find pointers that point
    // to the new space.  In that case we may hit newly promoted objects and
    // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
    if (object->IsHeapObject()) {
      if (Heap::InFromSpace(object)) {
        callback(reinterpret_cast<HeapObject**>(slot),
                 HeapObject::cast(object));
        Object* new_object = *slot;
        if (InNewSpace(new_object)) {
5196 5197
          SLOW_DCHECK(Heap::InToSpace(new_object));
          SLOW_DCHECK(new_object->IsHeapObject());
5198 5199 5200
          store_buffer_.EnterDirectlyIntoStoreBuffer(
              reinterpret_cast<Address>(slot));
        }
5201
        SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
5202 5203 5204
      } else if (record_slots &&
                 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
        mark_compact_collector()->RecordSlot(slot, slot, object);
5205
      }
5206 5207 5208 5209 5210 5211
    }
    slot_address += kPointerSize;
  }
}


5212 5213
void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
  IterateStrongRoots(v, mode);
5214 5215 5216 5217 5218
  IterateWeakRoots(v, mode);
}


void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
5219 5220
  v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
  v->Synchronize(VisitorSynchronization::kStringTable);
5221
  if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
5222
    // Scavenge collections have special processing for this.
5223
    external_string_table_.Iterate(v);
5224
  }
5225
  v->Synchronize(VisitorSynchronization::kExternalStringsTable);
5226 5227 5228
}


5229
void Heap::IterateSmiRoots(ObjectVisitor* v) {
5230 5231
  // Acquire execution access since we are going to read stack limit values.
  ExecutionAccess access(isolate());
5232 5233 5234 5235 5236
  v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
  v->Synchronize(VisitorSynchronization::kSmiRootList);
}


5237
void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
5238
  v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
5239
  v->Synchronize(VisitorSynchronization::kStrongRootList);
5240

5241
  v->VisitPointer(bit_cast<Object**>(&hidden_string_));
5242
  v->Synchronize(VisitorSynchronization::kInternalizedString);
5243

5244
  isolate_->bootstrapper()->Iterate(v);
5245
  v->Synchronize(VisitorSynchronization::kBootstrapper);
5246
  isolate_->Iterate(v);
5247
  v->Synchronize(VisitorSynchronization::kTop);
5248
  Relocatable::Iterate(isolate_, v);
5249
  v->Synchronize(VisitorSynchronization::kRelocatable);
5250

5251 5252 5253
  if (isolate_->deoptimizer_data() != NULL) {
    isolate_->deoptimizer_data()->Iterate(v);
  }
5254
  v->Synchronize(VisitorSynchronization::kDebug);
5255
  isolate_->compilation_cache()->Iterate(v);
5256
  v->Synchronize(VisitorSynchronization::kCompilationCache);
5257 5258

  // Iterate over local handles in handle scopes.
5259
  isolate_->handle_scope_implementer()->Iterate(v);
5260
  isolate_->IterateDeferredHandles(v);
5261
  v->Synchronize(VisitorSynchronization::kHandleScope);
5262

5263 5264 5265
  // Iterate over the builtin code objects and code stubs in the
  // heap. Note that it is not necessary to iterate over code objects
  // on scavenge collections.
5266
  if (mode != VISIT_ALL_IN_SCAVENGE) {
5267
    isolate_->builtins()->IterateBuiltins(v);
5268
  }
5269
  v->Synchronize(VisitorSynchronization::kBuiltins);
5270 5271

  // Iterate over global handles.
5272 5273 5274 5275 5276
  switch (mode) {
    case VISIT_ONLY_STRONG:
      isolate_->global_handles()->IterateStrongRoots(v);
      break;
    case VISIT_ALL_IN_SCAVENGE:
5277
      isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
5278 5279 5280 5281 5282
      break;
    case VISIT_ALL_IN_SWEEP_NEWSPACE:
    case VISIT_ALL:
      isolate_->global_handles()->IterateAllRoots(v);
      break;
5283
  }
5284
  v->Synchronize(VisitorSynchronization::kGlobalHandles);
5285

5286 5287 5288 5289 5290 5291 5292 5293
  // Iterate over eternal handles.
  if (mode == VISIT_ALL_IN_SCAVENGE) {
    isolate_->eternal_handles()->IterateNewSpaceRoots(v);
  } else {
    isolate_->eternal_handles()->IterateAllRoots(v);
  }
  v->Synchronize(VisitorSynchronization::kEternalHandles);

5294
  // Iterate over pointers being held by inactive threads.
5295
  isolate_->thread_manager()->Iterate(v);
5296
  v->Synchronize(VisitorSynchronization::kThreadManager);
5297

5298 5299 5300 5301 5302 5303
  // Iterate over other strong roots (currently only identity maps).
  for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
    v->VisitPointers(list->start, list->end);
  }
  v->Synchronize(VisitorSynchronization::kStrongRoots);

5304 5305 5306 5307 5308 5309 5310 5311
  // Iterate over the pointers the Serialization/Deserialization code is
  // holding.
  // During garbage collection this keeps the partial snapshot cache alive.
  // During deserialization of the startup snapshot this creates the partial
  // snapshot cache and deserializes the objects it refers to.  During
  // serialization this does nothing, since the partial snapshot cache is
  // empty.  However the next thing we do is create the partial snapshot,
  // filling up the partial snapshot cache with objects it needs as we go.
5312
  SerializerDeserializer::Iterate(isolate_, v);
5313 5314 5315 5316
  // We don't do a v->Synchronize call here, because in debug mode that will
  // output a flag to the snapshot.  However at this point the serializer and
  // deserializer are deliberately a little unsynchronized (see above) so the
  // checking of the sync flag in the snapshot would fail.
5317 5318 5319 5320 5321 5322
}


// TODO(1236194): Since the heap size is configurable on the command line
// and through the API, we should gracefully handle the case that the heap
// size is not big enough to fit all the initial objects.
5323 5324
bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
                         int max_executable_size, size_t code_range_size) {
5325
  if (HasBeenSetUp()) return false;
5326

5327 5328 5329 5330 5331
  // Overwrite default configuration.
  if (max_semi_space_size > 0) {
    max_semi_space_size_ = max_semi_space_size * MB;
  }
  if (max_old_space_size > 0) {
5332
    max_old_generation_size_ = static_cast<intptr_t>(max_old_space_size) * MB;
5333 5334
  }
  if (max_executable_size > 0) {
5335
    max_executable_size_ = static_cast<intptr_t>(max_executable_size) * MB;
5336 5337
  }

5338
  // If max space size flags are specified overwrite the configuration.
5339 5340
  if (FLAG_max_semi_space_size > 0) {
    max_semi_space_size_ = FLAG_max_semi_space_size * MB;
5341 5342
  }
  if (FLAG_max_old_space_size > 0) {
5343 5344
    max_old_generation_size_ =
        static_cast<intptr_t>(FLAG_max_old_space_size) * MB;
5345 5346
  }
  if (FLAG_max_executable_size > 0) {
5347
    max_executable_size_ = static_cast<intptr_t>(FLAG_max_executable_size) * MB;
5348 5349
  }

5350 5351
  if (FLAG_stress_compaction) {
    // This will cause more frequent GCs when stressing.
5352
    max_semi_space_size_ = Page::kPageSize;
5353
  }
5354

5355
  if (isolate()->snapshot_available()) {
5356 5357 5358 5359 5360
    // If we are using a snapshot we always reserve the default amount
    // of memory for each semispace because code in the snapshot has
    // write-barrier code that relies on the size and alignment of new
    // space.  We therefore cannot use a larger max semispace size
    // than the default reserved semispace size.
5361 5362
    if (max_semi_space_size_ > reserved_semispace_size_) {
      max_semi_space_size_ = reserved_semispace_size_;
5363
      if (FLAG_trace_gc) {
5364 5365 5366
        PrintIsolate(isolate_,
                     "Max semi-space size cannot be more than %d kbytes\n",
                     reserved_semispace_size_ >> 10);
5367
      }
5368 5369 5370 5371
    }
  } else {
    // If we are not using snapshots we reserve space for the actual
    // max semispace size.
5372
    reserved_semispace_size_ = max_semi_space_size_;
5373
  }
5374

5375 5376 5377 5378 5379 5380
  // The max executable size must be less than or equal to the max old
  // generation size.
  if (max_executable_size_ > max_old_generation_size_) {
    max_executable_size_ = max_old_generation_size_;
  }

5381 5382
  // The new space size must be a power of two to support single-bit testing
  // for containment.
5383 5384 5385 5386
  max_semi_space_size_ =
      base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
  reserved_semispace_size_ =
      base::bits::RoundUpToPowerOfTwo32(reserved_semispace_size_);
5387 5388 5389 5390 5391 5392

  if (FLAG_min_semi_space_size > 0) {
    int initial_semispace_size = FLAG_min_semi_space_size * MB;
    if (initial_semispace_size > max_semi_space_size_) {
      initial_semispace_size_ = max_semi_space_size_;
      if (FLAG_trace_gc) {
5393 5394 5395 5396
        PrintIsolate(isolate_,
                     "Min semi-space size cannot be more than the maximum "
                     "semi-space size of %d MB\n",
                     max_semi_space_size_ / MB);
5397 5398 5399 5400 5401 5402
      }
    } else {
      initial_semispace_size_ = initial_semispace_size;
    }
  }

5403
  initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
5404

5405 5406 5407 5408 5409
  if (FLAG_target_semi_space_size > 0) {
    int target_semispace_size = FLAG_target_semi_space_size * MB;
    if (target_semispace_size < initial_semispace_size_) {
      target_semispace_size_ = initial_semispace_size_;
      if (FLAG_trace_gc) {
5410 5411 5412 5413
        PrintIsolate(isolate_,
                     "Target semi-space size cannot be less than the minimum "
                     "semi-space size of %d MB\n",
                     initial_semispace_size_ / MB);
5414 5415 5416 5417
      }
    } else if (target_semispace_size > max_semi_space_size_) {
      target_semispace_size_ = max_semi_space_size_;
      if (FLAG_trace_gc) {
5418 5419 5420 5421
        PrintIsolate(isolate_,
                     "Target semi-space size cannot be less than the maximum "
                     "semi-space size of %d MB\n",
                     max_semi_space_size_ / MB);
5422 5423 5424 5425 5426 5427
      }
    } else {
      target_semispace_size_ = target_semispace_size;
    }
  }

5428
  target_semispace_size_ = Max(initial_semispace_size_, target_semispace_size_);
5429

5430 5431 5432 5433
  if (FLAG_semi_space_growth_factor < 2) {
    FLAG_semi_space_growth_factor = 2;
  }

5434 5435
  // The old generation is paged and needs at least one page for each space.
  int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
5436 5437 5438
  max_old_generation_size_ =
      Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
          max_old_generation_size_);
5439

5440 5441 5442
  if (FLAG_initial_old_space_size > 0) {
    initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
  } else {
5443 5444
    initial_old_generation_size_ =
        max_old_generation_size_ / kInitalOldGenerationLimitFactor;
5445 5446 5447
  }
  old_generation_allocation_limit_ = initial_old_generation_size_;

5448
  // We rely on being able to allocate new arrays in paged spaces.
5449
  DCHECK(Page::kMaxRegularHeapObjectSize >=
5450 5451 5452 5453
         (JSArray::kSize +
          FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
          AllocationMemento::kSize));

5454
  code_range_size_ = code_range_size * MB;
5455

5456
  configured_ = true;
5457 5458 5459 5460
  return true;
}


5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484
void Heap::AddToRingBuffer(const char* string) {
  size_t first_part =
      Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
  memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part);
  ring_buffer_end_ += first_part;
  if (first_part < strlen(string)) {
    ring_buffer_full_ = true;
    size_t second_part = strlen(string) - first_part;
    memcpy(trace_ring_buffer_, string + first_part, second_part);
    ring_buffer_end_ = second_part;
  }
}


void Heap::GetFromRingBuffer(char* buffer) {
  size_t copied = 0;
  if (ring_buffer_full_) {
    copied = kTraceRingBufferSize - ring_buffer_end_;
    memcpy(buffer, trace_ring_buffer_ + ring_buffer_end_, copied);
  }
  memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
}


5485
bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
5486 5487


5488
void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
5489 5490
  *stats->start_marker = HeapStats::kStartMarker;
  *stats->end_marker = HeapStats::kEndMarker;
5491 5492
  *stats->new_space_size = new_space_.SizeAsInt();
  *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
5493 5494
  *stats->old_space_size = old_space_->SizeOfObjects();
  *stats->old_space_capacity = old_space_->Capacity();
5495
  *stats->code_space_size = code_space_->SizeOfObjects();
5496
  *stats->code_space_capacity = code_space_->Capacity();
5497
  *stats->map_space_size = map_space_->SizeOfObjects();
5498 5499
  *stats->map_space_capacity = map_space_->Capacity();
  *stats->lo_space_size = lo_space_->Size();
5500 5501
  isolate_->global_handles()->RecordStats(stats);
  *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
5502
  *stats->memory_allocator_capacity =
5503 5504
      isolate()->memory_allocator()->Size() +
      isolate()->memory_allocator()->Available();
5505
  *stats->os_error = base::OS::GetLastError();
5506
  isolate()->memory_allocator()->Available();
5507
  if (take_snapshot) {
5508
    HeapIterator iterator(this);
5509
    for (HeapObject* obj = iterator.next(); obj != NULL;
5510 5511
         obj = iterator.next()) {
      InstanceType type = obj->map()->instance_type();
5512
      DCHECK(0 <= type && type <= LAST_TYPE);
5513 5514 5515 5516
      stats->objects_per_type[type]++;
      stats->size_per_type[type] += obj->Size();
    }
  }
5517 5518
  if (stats->last_few_messages != NULL)
    GetFromRingBuffer(stats->last_few_messages);
5519 5520 5521 5522 5523
  if (stats->js_stacktrace != NULL) {
    FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
    StringStream accumulator(&fixed);
    isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
  }
5524 5525 5526
}


5527
intptr_t Heap::PromotedSpaceSizeOfObjects() {
5528
  return old_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
5529
         map_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
5530 5531 5532
}


5533
int64_t Heap::PromotedExternalMemorySize() {
5534 5535 5536 5537 5538
  if (amount_of_external_allocated_memory_ <=
      amount_of_external_allocated_memory_at_last_global_gc_)
    return 0;
  return amount_of_external_allocated_memory_ -
         amount_of_external_allocated_memory_at_last_global_gc_;
5539 5540
}

5541

5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605
const double Heap::kMinHeapGrowingFactor = 1.1;
const double Heap::kMaxHeapGrowingFactor = 4.0;
const double Heap::kMaxHeapGrowingFactorMemoryConstrained = 2.0;
const double Heap::kMaxHeapGrowingFactorIdle = 1.5;
const double Heap::kTargetMutatorUtilization = 0.97;


// Given GC speed in bytes per ms, the allocation throughput in bytes per ms
// (mutator speed), this function returns the heap growing factor that will
// achieve the kTargetMutatorUtilisation if the GC speed and the mutator speed
// remain the same until the next GC.
//
// For a fixed time-frame T = TM + TG, the mutator utilization is the ratio
// TM / (TM + TG), where TM is the time spent in the mutator and TG is the
// time spent in the garbage collector.
//
// Let MU be kTargetMutatorUtilisation, the desired mutator utilization for the
// time-frame from the end of the current GC to the end of the next GC. Based
// on the MU we can compute the heap growing factor F as
//
// F = R * (1 - MU) / (R * (1 - MU) - MU), where R = gc_speed / mutator_speed.
//
// This formula can be derived as follows.
//
// F = Limit / Live by definition, where the Limit is the allocation limit,
// and the Live is size of live objects.
// Let’s assume that we already know the Limit. Then:
//   TG = Limit / gc_speed
//   TM = (TM + TG) * MU, by definition of MU.
//   TM = TG * MU / (1 - MU)
//   TM = Limit *  MU / (gc_speed * (1 - MU))
// On the other hand, if the allocation throughput remains constant:
//   Limit = Live + TM * allocation_throughput = Live + TM * mutator_speed
// Solving it for TM, we get
//   TM = (Limit - Live) / mutator_speed
// Combining the two equation for TM:
//   (Limit - Live) / mutator_speed = Limit * MU / (gc_speed * (1 - MU))
//   (Limit - Live) = Limit * MU * mutator_speed / (gc_speed * (1 - MU))
// substitute R = gc_speed / mutator_speed
//   (Limit - Live) = Limit * MU  / (R * (1 - MU))
// substitute F = Limit / Live
//   F - 1 = F * MU  / (R * (1 - MU))
//   F - F * MU / (R * (1 - MU)) = 1
//   F * (1 - MU / (R * (1 - MU))) = 1
//   F * (R * (1 - MU) - MU) / (R * (1 - MU)) = 1
//   F = R * (1 - MU) / (R * (1 - MU) - MU)
double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed) {
  if (gc_speed == 0 || mutator_speed == 0) return kMaxHeapGrowingFactor;

  const double speed_ratio = gc_speed / mutator_speed;
  const double mu = kTargetMutatorUtilization;

  const double a = speed_ratio * (1 - mu);
  const double b = speed_ratio * (1 - mu) - mu;

  // The factor is a / b, but we need to check for small b first.
  double factor =
      (a < b * kMaxHeapGrowingFactor) ? a / b : kMaxHeapGrowingFactor;
  factor = Min(factor, kMaxHeapGrowingFactor);
  factor = Max(factor, kMinHeapGrowingFactor);
  return factor;
}


5606 5607 5608 5609 5610
intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
                                                     intptr_t old_gen_size) {
  CHECK(factor > 1.0);
  CHECK(old_gen_size > 0);
  intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
5611
  limit = Max(limit, old_gen_size + kMinimumOldGenerationAllocationLimit);
5612 5613 5614 5615 5616 5617
  limit += new_space_.Capacity();
  intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
  return Min(limit, halfway_to_the_max);
}


5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630
void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
                                           double gc_speed,
                                           double mutator_speed) {
  double factor = HeapGrowingFactor(gc_speed, mutator_speed);

  if (FLAG_trace_gc_verbose) {
    PrintIsolate(isolate_,
                 "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f "
                 "(gc=%.f, mutator=%.f)\n",
                 factor, kTargetMutatorUtilization, gc_speed / mutator_speed,
                 gc_speed, mutator_speed);
  }

5631 5632
  // We set the old generation growing factor to 2 to grow the heap slower on
  // memory-constrained devices.
5633 5634
  if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice ||
      FLAG_optimize_for_size) {
5635
    factor = Min(factor, kMaxHeapGrowingFactorMemoryConstrained);
5636 5637 5638 5639
  }

  if (FLAG_stress_compaction ||
      mark_compact_collector()->reduce_memory_footprint_) {
5640
    factor = kMinHeapGrowingFactor;
5641 5642
  }

5643 5644
  old_generation_allocation_limit_ =
      CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5645 5646

  if (FLAG_trace_gc_verbose) {
5647 5648 5649 5650
    PrintIsolate(isolate_, "Grow: old size: %" V8_PTR_PREFIX
                           "d KB, new limit: %" V8_PTR_PREFIX "d KB (%.1f)\n",
                 old_gen_size / KB, old_generation_allocation_limit_ / KB,
                 factor);
5651
  }
5652 5653 5654
}


5655 5656 5657 5658 5659 5660 5661 5662
void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
                                              double gc_speed,
                                              double mutator_speed) {
  double factor = HeapGrowingFactor(gc_speed, mutator_speed);
  intptr_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
  if (limit < old_generation_allocation_limit_) {
    if (FLAG_trace_gc_verbose) {
      PrintIsolate(isolate_, "Dampen: old size: %" V8_PTR_PREFIX
5663 5664 5665
                             "d KB, old limit: %" V8_PTR_PREFIX
                             "d KB, "
                             "new limit: %" V8_PTR_PREFIX "d KB (%.1f)\n",
5666 5667 5668 5669 5670 5671 5672 5673
                   old_gen_size / KB, old_generation_allocation_limit_ / KB,
                   limit / KB, factor);
    }
    old_generation_allocation_limit_ = limit;
  }
}


5674
void Heap::EnableInlineAllocation() {
5675
  if (!inline_allocation_disabled_) return;
5676 5677 5678 5679 5680 5681 5682 5683
  inline_allocation_disabled_ = false;

  // Update inline allocation limit for new space.
  new_space()->UpdateInlineAllocationLimit(0);
}


void Heap::DisableInlineAllocation() {
5684
  if (inline_allocation_disabled_) return;
5685 5686 5687 5688 5689 5690 5691
  inline_allocation_disabled_ = true;

  // Update inline allocation limit for new space.
  new_space()->UpdateInlineAllocationLimit(0);

  // Update inline allocation limit for old spaces.
  PagedSpaces spaces(this);
5692
  for (PagedSpace* space = spaces.next(); space != NULL;
5693 5694 5695 5696 5697 5698
       space = spaces.next()) {
    space->EmptyAllocationInfo();
  }
}


5699 5700 5701 5702 5703 5704 5705 5706
V8_DECLARE_ONCE(initialize_gc_once);

static void InitializeGCOnce() {
  InitializeScavengingVisitorsTables();
  NewSpaceScavenger::Initialize();
  MarkCompactCollector::Initialize();
}

5707

5708
bool Heap::SetUp() {
5709
#ifdef DEBUG
5710
  allocation_timeout_ = FLAG_gc_interval;
5711 5712
#endif

5713 5714 5715 5716
  // Initialize heap spaces and initial maps and objects. Whenever something
  // goes wrong, just return false. The caller should check the results and
  // call Heap::TearDown() to release allocated memory.
  //
5717
  // If the heap is not yet configured (e.g. through the API), configure it.
5718 5719 5720
  // Configuration is based on the flags new-space-size (really the semispace
  // size) and old-space-size if set or the initial values of semispace_size_
  // and old_generation_size_ otherwise.
5721
  if (!configured_) {
5722
    if (!ConfigureHeapDefault()) return false;
5723 5724
  }

5725
  concurrent_sweeping_enabled_ = FLAG_concurrent_sweeping;
5726

5727
  base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
5728

5729 5730
  MarkMapPointersAsEncoded(false);

5731 5732
  // Set up memory allocator.
  if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
5733
    return false;
5734

5735
  // Set up new space.
5736
  if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
kmillikin@chromium.org's avatar
kmillikin@chromium.org committed
5737 5738
    return false;
  }
5739
  new_space_top_after_last_gc_ = new_space()->top();
5740

5741 5742 5743 5744 5745
  // Initialize old space.
  old_space_ =
      new OldSpace(this, max_old_generation_size_, OLD_SPACE, NOT_EXECUTABLE);
  if (old_space_ == NULL) return false;
  if (!old_space_->SetUp()) return false;
5746

5747 5748
  if (!isolate_->code_range()->SetUp(code_range_size_)) return false;

5749
  // Initialize the code space, set its maximum capacity to the old
5750
  // generation size. It needs executable memory.
5751 5752
  code_space_ =
      new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
5753
  if (code_space_ == NULL) return false;
5754
  if (!code_space_->SetUp()) return false;
5755 5756

  // Initialize map space.
5757
  map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
5758
  if (map_space_ == NULL) return false;
5759
  if (!map_space_->SetUp()) return false;
5760

5761 5762 5763
  // The large object code space may contain code or data.  We set the memory
  // to be non-executable here for safety, but this means we need to enable it
  // explicitly when allocating large code objects.
5764
  lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
5765
  if (lo_space_ == NULL) return false;
5766
  if (!lo_space_->SetUp()) return false;
5767

5768
  // Set up the seed that is used to randomize the string hash function.
5769
  DCHECK(hash_seed() == 0);
5770 5771
  if (FLAG_randomize_hashes) {
    if (FLAG_hash_seed == 0) {
5772 5773
      int rnd = isolate()->random_number_generator()->NextInt();
      set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5774
    } else {
5775
      set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5776 5777 5778
    }
  }

5779 5780 5781 5782 5783 5784
  for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
       i++) {
    deferred_counters_[i] = 0;
  }


5785 5786
  LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
  LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5787

5788
  store_buffer()->SetUp();
5789

5790 5791
  mark_compact_collector()->SetUp();

5792 5793 5794
  return true;
}

5795

5796 5797 5798
bool Heap::CreateHeapObjects() {
  // Create initial maps.
  if (!CreateInitialMaps()) return false;
5799
  CreateApiObjects();
5800 5801

  // Create initial objects
5802
  CreateInitialObjects();
5803
  CHECK_EQ(0u, gc_count_);
5804

5805 5806
  set_native_contexts_list(undefined_value());
  set_allocation_sites_list(undefined_value());
5807 5808 5809
  return true;
}

5810

5811
void Heap::SetStackLimits() {
5812 5813
  DCHECK(isolate_ != NULL);
  DCHECK(isolate_ == isolate());
5814 5815 5816
  // On 64 bit machines, pointers are generally out of range of Smis.  We write
  // something that looks like an out of range Smi to the GC.

5817 5818
  // Set up the special root array entries containing the stack limits.
  // These are actually addresses, but the tag makes the GC ignore it.
5819 5820 5821 5822
  roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
      (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
  roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
      (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5823 5824 5825
}


5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836
void Heap::NotifyDeserializationComplete() {
  deserialization_complete_ = true;
#ifdef DEBUG
  // All pages right after bootstrapping must be marked as never-evacuate.
  PagedSpaces spaces(this);
  for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
    PageIterator it(s);
    while (it.has_next()) CHECK(it.next()->NeverEvacuate());
  }
#endif  // DEBUG
}
5837 5838


5839
void Heap::TearDown() {
5840
#ifdef VERIFY_HEAP
5841 5842 5843 5844
  if (FLAG_verify_heap) {
    Verify();
  }
#endif
5845

5846 5847
  UpdateMaximumCommitted();

5848
  if (FLAG_print_cumulative_gc_stat) {
5849
    PrintF("\n");
5850 5851
    PrintF("gc_count=%d ", gc_count_);
    PrintF("mark_sweep_count=%d ", ms_count_);
5852 5853 5854
    PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
    PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
    PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
5855
    PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
5856
    PrintF("total_marking_time=%.1f ", tracer_.cumulative_marking_duration());
5857
    PrintF("total_sweeping_time=%.1f ", tracer_.cumulative_sweeping_duration());
5858 5859 5860
    PrintF("\n\n");
  }

5861 5862 5863
  if (FLAG_print_max_heap_committed) {
    PrintF("\n");
    PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
5864
           MaximumCommittedMemory());
5865
    PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
5866
           new_space_.MaximumCommittedMemory());
5867 5868
    PrintF("maximum_committed_by_old_space=%" V8_PTR_PREFIX "d ",
           old_space_->MaximumCommittedMemory());
5869
    PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
5870
           code_space_->MaximumCommittedMemory());
5871
    PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
5872
           map_space_->MaximumCommittedMemory());
5873
    PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
5874
           lo_space_->MaximumCommittedMemory());
5875 5876 5877
    PrintF("\n\n");
  }

5878 5879 5880 5881
  if (FLAG_verify_predictable) {
    PrintAlloctionsHash();
  }

5882 5883
  memory_reducer_.TearDown();

5884 5885
  TearDownArrayBuffers();

5886
  isolate_->global_handles()->TearDown();
5887

5888
  external_string_table_.TearDown();
5889

5890 5891
  mark_compact_collector()->TearDown();

5892
  new_space_.TearDown();
5893

5894 5895 5896 5897
  if (old_space_ != NULL) {
    old_space_->TearDown();
    delete old_space_;
    old_space_ = NULL;
5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917
  }

  if (code_space_ != NULL) {
    code_space_->TearDown();
    delete code_space_;
    code_space_ = NULL;
  }

  if (map_space_ != NULL) {
    map_space_->TearDown();
    delete map_space_;
    map_space_ = NULL;
  }

  if (lo_space_ != NULL) {
    lo_space_->TearDown();
    delete lo_space_;
    lo_space_ = NULL;
  }

5918 5919
  store_buffer()->TearDown();

5920
  isolate_->memory_allocator()->TearDown();
5921 5922 5923 5924 5925 5926 5927

  StrongRootsList* next = NULL;
  for (StrongRootsList* list = strong_roots_list_; list; list = next) {
    next = list->next;
    delete list;
  }
  strong_roots_list_ = NULL;
5928 5929 5930
}


5931
void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
5932
                                 GCType gc_type, bool pass_isolate) {
5933
  DCHECK(callback != NULL);
5934
  GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
5935
  DCHECK(!gc_prologue_callbacks_.Contains(pair));
5936 5937 5938 5939
  return gc_prologue_callbacks_.Add(pair);
}


5940
void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
5941
  DCHECK(callback != NULL);
5942 5943 5944 5945 5946 5947 5948 5949 5950 5951
  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
    if (gc_prologue_callbacks_[i].callback == callback) {
      gc_prologue_callbacks_.Remove(i);
      return;
    }
  }
  UNREACHABLE();
}


5952
void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
5953
                                 GCType gc_type, bool pass_isolate) {
5954
  DCHECK(callback != NULL);
5955
  GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
5956
  DCHECK(!gc_epilogue_callbacks_.Contains(pair));
5957 5958 5959 5960
  return gc_epilogue_callbacks_.Add(pair);
}


5961
void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
5962
  DCHECK(callback != NULL);
5963 5964 5965 5966 5967 5968 5969 5970 5971 5972
  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
    if (gc_epilogue_callbacks_[i].callback == callback) {
      gc_epilogue_callbacks_.Remove(i);
      return;
    }
  }
  UNREACHABLE();
}


5973
// TODO(ishell): Find a better place for this.
ulan's avatar
ulan committed
5974
void Heap::AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
5975
                                         Handle<DependentCode> dep) {
5976 5977
  DCHECK(!InNewSpace(*obj));
  DCHECK(!InNewSpace(*dep));
ulan's avatar
ulan committed
5978
  Handle<WeakHashTable> table(weak_object_to_code_table(), isolate());
5979
  table = WeakHashTable::Put(table, obj, dep);
ulan's avatar
ulan committed
5980 5981 5982
  if (*table != weak_object_to_code_table())
    set_weak_object_to_code_table(*table);
  DCHECK_EQ(*dep, LookupWeakObjectToCodeDependency(obj));
5983 5984 5985
}


ulan's avatar
ulan committed
5986 5987
DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<HeapObject> obj) {
  Object* dep = weak_object_to_code_table()->Lookup(obj);
5988 5989 5990 5991 5992
  if (dep->IsDependentCode()) return DependentCode::cast(dep);
  return DependentCode::cast(empty_fixed_array());
}


5993 5994 5995 5996 5997
void Heap::AddRetainedMap(Handle<Map> map) {
  if (FLAG_retain_maps_for_n_gc == 0) return;
  Handle<WeakCell> cell = Map::WeakCellForMap(map);
  Handle<ArrayList> array(retained_maps(), isolate());
  array = ArrayList::Add(
5998 5999
      array, cell, handle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()),
      ArrayList::kReloadLengthAfterAllocation);
6000 6001 6002 6003 6004 6005
  if (*array != retained_maps()) {
    set_retained_maps(*array);
  }
}


6006 6007 6008 6009
void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
  v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
}

6010 6011
#ifdef DEBUG

6012
class PrintHandleVisitor : public ObjectVisitor {
6013 6014 6015
 public:
  void VisitPointers(Object** start, Object** end) {
    for (Object** p = start; p < end; p++)
6016
      PrintF("  handle %p to %p\n", reinterpret_cast<void*>(p),
6017
             reinterpret_cast<void*>(*p));
6018 6019 6020
  }
};

6021

6022 6023 6024
void Heap::PrintHandles() {
  PrintF("Handles:\n");
  PrintHandleVisitor v;
6025
  isolate_->handle_scope_implementer()->Iterate(&v);
6026 6027 6028 6029
}

#endif

6030 6031 6032
class CheckHandleCountVisitor : public ObjectVisitor {
 public:
  CheckHandleCountVisitor() : handle_count_(0) {}
6033 6034 6035
  ~CheckHandleCountVisitor() {
    CHECK(handle_count_ < HandleScope::kCheckHandleThreshold);
  }
6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049
  void VisitPointers(Object** start, Object** end) {
    handle_count_ += end - start;
  }

 private:
  ptrdiff_t handle_count_;
};


void Heap::CheckHandleCount() {
  CheckHandleCountVisitor v;
  isolate_->handle_scope_implementer()->Iterate(&v);
}

6050

6051 6052 6053
Space* AllSpaces::next() {
  switch (counter_++) {
    case NEW_SPACE:
6054
      return heap_->new_space();
6055 6056
    case OLD_SPACE:
      return heap_->old_space();
6057
    case CODE_SPACE:
6058
      return heap_->code_space();
6059
    case MAP_SPACE:
6060
      return heap_->map_space();
6061
    case LO_SPACE:
6062
      return heap_->lo_space();
6063 6064 6065 6066 6067 6068 6069 6070
    default:
      return NULL;
  }
}


PagedSpace* PagedSpaces::next() {
  switch (counter_++) {
6071 6072
    case OLD_SPACE:
      return heap_->old_space();
6073
    case CODE_SPACE:
6074
      return heap_->code_space();
6075
    case MAP_SPACE:
6076
      return heap_->map_space();
6077 6078 6079 6080 6081 6082 6083 6084
    default:
      return NULL;
  }
}


OldSpace* OldSpaces::next() {
  switch (counter_++) {
6085 6086
    case OLD_SPACE:
      return heap_->old_space();
6087
    case CODE_SPACE:
6088
      return heap_->code_space();
6089 6090 6091 6092 6093 6094
    default:
      return NULL;
  }
}


6095 6096 6097
SpaceIterator::SpaceIterator(Heap* heap)
    : heap_(heap),
      current_space_(FIRST_SPACE),
6098
      iterator_(NULL),
6099
      size_func_(NULL) {}
6100 6101


6102 6103 6104
SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
    : heap_(heap),
      current_space_(FIRST_SPACE),
6105
      iterator_(NULL),
6106
      size_func_(size_func) {}
6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138


SpaceIterator::~SpaceIterator() {
  // Delete active iterator if any.
  delete iterator_;
}


bool SpaceIterator::has_next() {
  // Iterate until no more spaces.
  return current_space_ != LAST_SPACE;
}


ObjectIterator* SpaceIterator::next() {
  if (iterator_ != NULL) {
    delete iterator_;
    iterator_ = NULL;
    // Move to the next space
    current_space_++;
    if (current_space_ > LAST_SPACE) {
      return NULL;
    }
  }

  // Return iterator for the new current space.
  return CreateIterator();
}


// Create an iterator for the space to iterate.
ObjectIterator* SpaceIterator::CreateIterator() {
6139
  DCHECK(iterator_ == NULL);
6140 6141 6142

  switch (current_space_) {
    case NEW_SPACE:
6143
      iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
6144
      break;
6145 6146
    case OLD_SPACE:
      iterator_ = new HeapObjectIterator(heap_->old_space(), size_func_);
6147 6148
      break;
    case CODE_SPACE:
6149
      iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
6150 6151
      break;
    case MAP_SPACE:
6152
      iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
6153 6154
      break;
    case LO_SPACE:
6155
      iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
6156 6157 6158 6159
      break;
  }

  // Return the newly allocated iterator;
6160
  DCHECK(iterator_ != NULL);
6161 6162 6163 6164
  return iterator_;
}


6165 6166 6167 6168 6169 6170 6171 6172 6173
class HeapObjectsFilter {
 public:
  virtual ~HeapObjectsFilter() {}
  virtual bool SkipObject(HeapObject* object) = 0;
};


class UnreachableObjectsFilter : public HeapObjectsFilter {
 public:
6174
  explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
6175 6176 6177 6178
    MarkReachableObjects();
  }

  ~UnreachableObjectsFilter() {
6179
    heap_->mark_compact_collector()->ClearMarkbits();
6180 6181 6182
  }

  bool SkipObject(HeapObject* object) {
6183
    MarkBit mark_bit = Marking::MarkBitFrom(object);
6184
    return Marking::IsWhite(mark_bit);
6185 6186 6187
  }

 private:
6188
  class MarkingVisitor : public ObjectVisitor {
6189
   public:
6190
    MarkingVisitor() : marking_stack_(10) {}
6191 6192 6193 6194 6195

    void VisitPointers(Object** start, Object** end) {
      for (Object** p = start; p < end; p++) {
        if (!(*p)->IsHeapObject()) continue;
        HeapObject* obj = HeapObject::cast(*p);
6196
        MarkBit mark_bit = Marking::MarkBitFrom(obj);
6197 6198
        if (Marking::IsWhite(mark_bit)) {
          Marking::WhiteToBlack(mark_bit);
6199
          marking_stack_.Add(obj);
6200 6201 6202 6203
        }
      }
    }

6204 6205 6206 6207 6208
    void TransitiveClosure() {
      while (!marking_stack_.is_empty()) {
        HeapObject* obj = marking_stack_.RemoveLast();
        obj->Iterate(this);
      }
6209 6210 6211
    }

   private:
6212
    List<HeapObject*> marking_stack_;
6213 6214
  };

6215 6216
  void MarkReachableObjects() {
    MarkingVisitor visitor;
6217
    heap_->IterateRoots(&visitor, VISIT_ALL);
6218
    visitor.TransitiveClosure();
6219 6220
  }

6221
  Heap* heap_;
6222
  DisallowHeapAllocation no_allocation_;
6223 6224 6225
};


6226
HeapIterator::HeapIterator(Heap* heap)
6227 6228 6229
    : make_heap_iterable_helper_(heap),
      no_heap_allocation_(),
      heap_(heap),
6230
      filtering_(HeapIterator::kNoFiltering),
6231 6232 6233 6234 6235
      filter_(NULL) {
  Init();
}


6236 6237
HeapIterator::HeapIterator(Heap* heap,
                           HeapIterator::HeapObjectsFiltering filtering)
6238 6239 6240
    : make_heap_iterable_helper_(heap),
      no_heap_allocation_(),
      heap_(heap),
6241
      filtering_(filtering),
6242
      filter_(NULL) {
6243 6244 6245 6246
  Init();
}


6247
HeapIterator::~HeapIterator() { Shutdown(); }
6248 6249 6250 6251


void HeapIterator::Init() {
  // Start the iteration.
6252
  space_iterator_ = new SpaceIterator(heap_);
6253 6254
  switch (filtering_) {
    case kFilterUnreachable:
6255
      filter_ = new UnreachableObjectsFilter(heap_);
6256 6257 6258
      break;
    default:
      break;
6259
  }
6260 6261 6262 6263 6264
  object_iterator_ = space_iterator_->next();
}


void HeapIterator::Shutdown() {
6265
#ifdef DEBUG
6266
  // Assert that in filtering mode we have iterated through all
6267
  // objects. Otherwise, heap will be left in an inconsistent state.
6268
  if (filtering_ != kNoFiltering) {
6269
    DCHECK(object_iterator_ == NULL);
6270 6271
  }
#endif
6272 6273 6274 6275
  // Make sure the last iterator is deallocated.
  delete space_iterator_;
  space_iterator_ = NULL;
  object_iterator_ = NULL;
6276 6277
  delete filter_;
  filter_ = NULL;
6278 6279 6280
}


6281
HeapObject* HeapIterator::next() {
6282 6283 6284
  if (filter_ == NULL) return NextObject();

  HeapObject* obj = NextObject();
6285
  while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
6286 6287 6288 6289 6290
  return obj;
}


HeapObject* HeapIterator::NextObject() {
6291
  // No iterator means we are done.
6292
  if (object_iterator_ == NULL) return NULL;
6293

6294
  if (HeapObject* obj = object_iterator_->next_object()) {
6295
    // If the current iterator has more objects we are fine.
6296
    return obj;
6297 6298 6299 6300
  } else {
    // Go though the spaces looking for one that has objects.
    while (space_iterator_->has_next()) {
      object_iterator_ = space_iterator_->next();
6301 6302
      if (HeapObject* obj = object_iterator_->next_object()) {
        return obj;
6303 6304 6305 6306 6307
      }
    }
  }
  // Done with the last space.
  object_iterator_ = NULL;
6308
  return NULL;
6309 6310 6311 6312 6313 6314 6315 6316 6317 6318
}


void HeapIterator::reset() {
  // Restart the iterator.
  Shutdown();
  Init();
}


6319
#ifdef DEBUG
6320

6321
Object* const PathTracer::kAnyGlobalObject = NULL;
6322

6323
class PathTracer::MarkVisitor : public ObjectVisitor {
6324 6325 6326 6327 6328
 public:
  explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
  void VisitPointers(Object** start, Object** end) {
    // Scan all HeapObject pointers in [start, end)
    for (Object** p = start; !tracer_->found() && (p < end); p++) {
6329
      if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
6330 6331
    }
  }
6332

6333 6334 6335
 private:
  PathTracer* tracer_;
};
6336 6337


6338
class PathTracer::UnmarkVisitor : public ObjectVisitor {
6339
 public:
6340
  explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6341
  void VisitPointers(Object** start, Object** end) {
6342
    // Scan all HeapObject pointers in [start, end)
6343
    for (Object** p = start; p < end; p++) {
6344
      if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
6345 6346
    }
  }
6347 6348 6349

 private:
  PathTracer* tracer_;
6350 6351 6352
};


6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371
void PathTracer::VisitPointers(Object** start, Object** end) {
  bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
  // Visit all HeapObject pointers in [start, end)
  for (Object** p = start; !done && (p < end); p++) {
    if ((*p)->IsHeapObject()) {
      TracePathFrom(p);
      done = ((what_to_find_ == FIND_FIRST) && found_target_);
    }
  }
}


void PathTracer::Reset() {
  found_target_ = false;
  object_stack_.Clear();
}


void PathTracer::TracePathFrom(Object** root) {
6372
  DCHECK((search_target_ == kAnyGlobalObject) ||
6373 6374
         search_target_->IsHeapObject());
  found_target_in_trace_ = false;
6375
  Reset();
6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386

  MarkVisitor mark_visitor(this);
  MarkRecursively(root, &mark_visitor);

  UnmarkVisitor unmark_visitor(this);
  UnmarkRecursively(root, &unmark_visitor);

  ProcessResults();
}


6387 6388
static bool SafeIsNativeContext(HeapObject* obj) {
  return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
6389 6390 6391
}


6392
void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
6393 6394 6395 6396
  if (!(*p)->IsHeapObject()) return;

  HeapObject* obj = HeapObject::cast(*p);

ishell@chromium.org's avatar
ishell@chromium.org committed
6397 6398
  MapWord map_word = obj->map_word();
  if (!map_word.ToMap()->IsHeapObject()) return;  // visited before
6399

6400 6401 6402 6403 6404 6405
  if (found_target_in_trace_) return;  // stop if target found
  object_stack_.Add(obj);
  if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
      (obj == search_target_)) {
    found_target_in_trace_ = true;
    found_target_ = true;
6406 6407 6408
    return;
  }

6409
  bool is_native_context = SafeIsNativeContext(obj);
6410

6411
  // not visited yet
ishell@chromium.org's avatar
ishell@chromium.org committed
6412
  Map* map = Map::cast(map_word.ToMap());
6413

ishell@chromium.org's avatar
ishell@chromium.org committed
6414 6415 6416
  MapWord marked_map_word =
      MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
  obj->set_map_word(marked_map_word);
6417

6418
  // Scan the object body.
6419
  if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
6420
    // This is specialized to scan Context's properly.
6421 6422 6423 6424 6425
    Object** start =
        reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
    Object** end =
        reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
                                   Context::FIRST_WEAK_SLOT * kPointerSize);
6426
    mark_visitor->VisitPointers(start, end);
6427
  } else {
ishell@chromium.org's avatar
ishell@chromium.org committed
6428
    obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
6429
  }
6430

6431 6432
  // Scan the map after the body because the body is a lot more interesting
  // when doing leak detection.
ishell@chromium.org's avatar
ishell@chromium.org committed
6433
  MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
6434

ishell@chromium.org's avatar
ishell@chromium.org committed
6435
  if (!found_target_in_trace_) {  // don't pop if found the target
6436
    object_stack_.RemoveLast();
ishell@chromium.org's avatar
ishell@chromium.org committed
6437
  }
6438 6439 6440
}


6441
void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
6442 6443 6444 6445
  if (!(*p)->IsHeapObject()) return;

  HeapObject* obj = HeapObject::cast(*p);

ishell@chromium.org's avatar
ishell@chromium.org committed
6446 6447
  MapWord map_word = obj->map_word();
  if (map_word.ToMap()->IsHeapObject()) return;  // unmarked already
6448

ishell@chromium.org's avatar
ishell@chromium.org committed
6449 6450 6451
  MapWord unmarked_map_word =
      MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
  obj->set_map_word(unmarked_map_word);
6452

ishell@chromium.org's avatar
ishell@chromium.org committed
6453
  Map* map = Map::cast(unmarked_map_word.ToMap());
6454

ishell@chromium.org's avatar
ishell@chromium.org committed
6455
  UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
6456

ishell@chromium.org's avatar
ishell@chromium.org committed
6457
  obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
6458 6459 6460
}


6461 6462
void PathTracer::ProcessResults() {
  if (found_target_) {
6463 6464 6465 6466
    OFStream os(stdout);
    os << "=====================================\n"
       << "====        Path to object       ====\n"
       << "=====================================\n\n";
6467

6468
    DCHECK(!object_stack_.is_empty());
6469
    for (int i = 0; i < object_stack_.length(); i++) {
6470 6471
      if (i > 0) os << "\n     |\n     |\n     V\n\n";
      object_stack_[i]->Print(os);
6472
    }
6473
    os << "=====================================\n";
6474 6475 6476 6477
  }
}


6478 6479 6480 6481 6482 6483 6484 6485 6486
// Triggers a depth-first traversal of reachable objects from one
// given root object and finds a path to a specific heap object and
// prints it.
void Heap::TracePathToObjectFrom(Object* target, Object* root) {
  PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
  tracer.VisitPointer(&root);
}


6487 6488
// Triggers a depth-first traversal of reachable objects from roots
// and finds a path to a specific heap object and prints it.
6489
void Heap::TracePathToObject(Object* target) {
6490 6491
  PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
  IterateRoots(&tracer, VISIT_ONLY_STRONG);
6492 6493 6494 6495 6496 6497 6498
}


// Triggers a depth-first traversal of reachable objects from roots
// and finds a path to any global object and prints it. Useful for
// determining the source for leaks of global objects.
void Heap::TracePathToGlobal() {
6499
  PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
6500 6501
                    VISIT_ALL);
  IterateRoots(&tracer, VISIT_ONLY_STRONG);
6502 6503 6504 6505
}
#endif


6506 6507 6508
void Heap::UpdateCumulativeGCStatistics(double duration,
                                        double spent_in_mutator,
                                        double marking_time) {
6509 6510 6511
  if (FLAG_print_cumulative_gc_stat) {
    total_gc_time_ms_ += duration;
    max_gc_pause_ = Max(max_gc_pause_, duration);
6512
    max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects());
6513
    min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator);
6514 6515 6516 6517 6518 6519 6520 6521
  } else if (FLAG_trace_gc_verbose) {
    total_gc_time_ms_ += duration;
  }

  marking_time_ += marking_time;
}


6522 6523
int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
  DisallowHeapAllocation no_gc;
6524 6525
  // Uses only lower 32 bits if pointers are larger.
  uintptr_t addr_hash =
6526
      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
6527
  return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
6528 6529 6530
}


6531 6532
int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
  DisallowHeapAllocation no_gc;
6533
  int index = (Hash(map, name) & kHashMask);
6534 6535
  for (int i = 0; i < kEntriesPerBucket; i++) {
    Key& key = keys_[index + i];
6536
    if ((key.map == *map) && key.name->Equals(*name)) {
6537 6538
      return field_offsets_[index + i];
    }
6539
  }
6540
  return kNotFound;
6541 6542 6543
}


6544
void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
6545 6546
                              int field_offset) {
  DisallowHeapAllocation no_gc;
6547
  if (!name->IsUniqueName()) {
6548 6549
    if (!StringTable::InternalizeStringIfExists(
             name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
6550
      return;
6551
    }
6552
  }
6553 6554
  // This cache is cleared only between mark compact passes, so we expect the
  // cache to only contain old space names.
6555
  DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
6556

6557 6558 6559
  int index = (Hash(map, name) & kHashMask);
  // After a GC there will be free slots, so we use them in order (this may
  // help to get the most frequently used one in position 0).
6560
  for (int i = 0; i < kEntriesPerBucket; i++) {
6561
    Key& key = keys_[index];
6562 6563
    Object* free_entry_indicator = NULL;
    if (key.map == free_entry_indicator) {
6564 6565
      key.map = *map;
      key.name = *name;
6566 6567 6568
      field_offsets_[index + i] = field_offset;
      return;
    }
6569
  }
6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580
  // No free entry found in this bucket, so we move them all down one and
  // put the new entry at position zero.
  for (int i = kEntriesPerBucket - 1; i > 0; i--) {
    Key& key = keys_[index + i];
    Key& key2 = keys_[index + i - 1];
    key = key2;
    field_offsets_[index + i] = field_offsets_[index + i - 1];
  }

  // Write the new first entry.
  Key& key = keys_[index];
6581 6582
  key.map = *map;
  key.name = *name;
6583
  field_offsets_[index] = field_offset;
6584 6585 6586 6587 6588 6589 6590
}


void KeyedLookupCache::Clear() {
  for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
}

6591

6592
void DescriptorLookupCache::Clear() {
6593
  for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
6594 6595 6596
}


6597 6598 6599
void ExternalStringTable::CleanUp() {
  int last = 0;
  for (int i = 0; i < new_space_strings_.length(); ++i) {
6600
    if (new_space_strings_[i] == heap_->the_hole_value()) {
6601 6602
      continue;
    }
6603
    DCHECK(new_space_strings_[i]->IsExternalString());
6604
    if (heap_->InNewSpace(new_space_strings_[i])) {
6605 6606 6607 6608 6609 6610
      new_space_strings_[last++] = new_space_strings_[i];
    } else {
      old_space_strings_.Add(new_space_strings_[i]);
    }
  }
  new_space_strings_.Rewind(last);
6611 6612
  new_space_strings_.Trim();

6613 6614
  last = 0;
  for (int i = 0; i < old_space_strings_.length(); ++i) {
6615
    if (old_space_strings_[i] == heap_->the_hole_value()) {
6616 6617
      continue;
    }
6618 6619
    DCHECK(old_space_strings_[i]->IsExternalString());
    DCHECK(!heap_->InNewSpace(old_space_strings_[i]));
6620 6621 6622
    old_space_strings_[last++] = old_space_strings_[i];
  }
  old_space_strings_.Rewind(last);
6623
  old_space_strings_.Trim();
6624
#ifdef VERIFY_HEAP
6625 6626 6627
  if (FLAG_verify_heap) {
    Verify();
  }
6628
#endif
6629 6630 6631 6632
}


void ExternalStringTable::TearDown() {
6633 6634 6635
  for (int i = 0; i < new_space_strings_.length(); ++i) {
    heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
  }
6636
  new_space_strings_.Free();
6637 6638 6639
  for (int i = 0; i < old_space_strings_.length(); ++i) {
    heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
  }
6640 6641 6642 6643
  old_space_strings_.Free();
}


6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664
void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
  chunk->set_next_chunk(chunks_queued_for_free_);
  chunks_queued_for_free_ = chunk;
}


void Heap::FreeQueuedChunks() {
  if (chunks_queued_for_free_ == NULL) return;
  MemoryChunk* next;
  MemoryChunk* chunk;
  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
    next = chunk->next_chunk();
    chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);

    if (chunk->owner()->identity() == LO_SPACE) {
      // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
      // If FromAnyPointerAddress encounters a slot that belongs to a large
      // chunk queued for deletion it will fail to find the chunk because
      // it try to perform a search in the list of pages owned by of the large
      // object space and queued chunks were detached from that list.
      // To work around this we split large chunk into normal kPageSize aligned
6665 6666
      // pieces and initialize size, owner and flags field of every piece.
      // If FromAnyPointerAddress encounters a slot that belongs to one of
6667
      // these smaller pieces it will treat it as a slot on a normal Page.
6668
      Address chunk_end = chunk->address() + chunk->size();
6669 6670
      MemoryChunk* inner =
          MemoryChunk::FromAddress(chunk->address() + Page::kPageSize);
6671
      MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
6672 6673
      while (inner <= inner_last) {
        // Size of a large chunk is always a multiple of
6674 6675
        // OS::AllocateAlignment() so there is always
        // enough space for a fake MemoryChunk header.
6676 6677 6678 6679
        Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
        // Guard against overflow.
        if (area_end < inner->address()) area_end = chunk_end;
        inner->SetArea(inner->address(), area_end);
6680
        inner->set_size(Page::kPageSize);
6681 6682
        inner->set_owner(lo_space());
        inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6683
        inner = MemoryChunk::FromAddress(inner->address() + Page::kPageSize);
6684 6685 6686
      }
    }
  }
6687
  isolate_->heap()->store_buffer()->Compact();
6688 6689 6690 6691 6692 6693 6694 6695
  isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
    next = chunk->next_chunk();
    isolate_->memory_allocator()->Free(chunk);
  }
  chunks_queued_for_free_ = NULL;
}

6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710

void Heap::RememberUnmappedPage(Address page, bool compacted) {
  uintptr_t p = reinterpret_cast<uintptr_t>(page);
  // Tag the page pointer to make it findable in the dump file.
  if (compacted) {
    p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
  } else {
    p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
  }
  remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
      reinterpret_cast<Address>(p);
  remembered_unmapped_pages_index_++;
  remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
}

6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721

void Heap::ClearObjectStats(bool clear_last_time_stats) {
  memset(object_counts_, 0, sizeof(object_counts_));
  memset(object_sizes_, 0, sizeof(object_sizes_));
  if (clear_last_time_stats) {
    memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
    memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
  }
}


6722 6723 6724
static base::LazyMutex object_stats_mutex = LAZY_MUTEX_INITIALIZER;


6725
void Heap::TraceObjectStat(const char* name, int count, int size, double time) {
6726 6727 6728
  PrintIsolate(isolate_,
               "heap:%p, time:%f, gc:%d, type:%s, count:%d, size:%d\n",
               static_cast<void*>(this), time, ms_count_, name, count, size);
6729 6730 6731 6732 6733 6734 6735 6736 6737
}


void Heap::TraceObjectStats() {
  base::LockGuard<base::Mutex> lock_guard(object_stats_mutex.Pointer());
  int index;
  int count;
  int size;
  int total_size = 0;
6738
  double time = isolate_->time_millis_since_init();
6739 6740 6741 6742
#define TRACE_OBJECT_COUNT(name)                     \
  count = static_cast<int>(object_counts_[name]);    \
  size = static_cast<int>(object_sizes_[name]) / KB; \
  total_size += size;                                \
6743
  TraceObjectStat(#name, count, size, time);
6744 6745 6746 6747 6748 6749
  INSTANCE_TYPE_LIST(TRACE_OBJECT_COUNT)
#undef TRACE_OBJECT_COUNT
#define TRACE_OBJECT_COUNT(name)                      \
  index = FIRST_CODE_KIND_SUB_TYPE + Code::name;      \
  count = static_cast<int>(object_counts_[index]);    \
  size = static_cast<int>(object_sizes_[index]) / KB; \
6750
  TraceObjectStat("*CODE_" #name, count, size, time);
6751 6752 6753 6754 6755 6756
  CODE_KIND_LIST(TRACE_OBJECT_COUNT)
#undef TRACE_OBJECT_COUNT
#define TRACE_OBJECT_COUNT(name)                      \
  index = FIRST_FIXED_ARRAY_SUB_TYPE + name;          \
  count = static_cast<int>(object_counts_[index]);    \
  size = static_cast<int>(object_sizes_[index]) / KB; \
6757
  TraceObjectStat("*FIXED_ARRAY_" #name, count, size, time);
6758 6759 6760 6761 6762 6763 6764
  FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(TRACE_OBJECT_COUNT)
#undef TRACE_OBJECT_COUNT
#define TRACE_OBJECT_COUNT(name)                                              \
  index =                                                                     \
      FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
  count = static_cast<int>(object_counts_[index]);                            \
  size = static_cast<int>(object_sizes_[index]) / KB;                         \
6765
  TraceObjectStat("*CODE_AGE_" #name, count, size, time);
6766 6767 6768
  CODE_AGE_LIST_COMPLETE(TRACE_OBJECT_COUNT)
#undef TRACE_OBJECT_COUNT
}
6769 6770 6771


void Heap::CheckpointObjectStats() {
6772
  base::LockGuard<base::Mutex> lock_guard(object_stats_mutex.Pointer());
6773
  Counters* counters = isolate()->counters();
6774 6775 6776 6777 6778 6779 6780 6781
#define ADJUST_LAST_TIME_OBJECT_COUNT(name)              \
  counters->count_of_##name()->Increment(                \
      static_cast<int>(object_counts_[name]));           \
  counters->count_of_##name()->Decrement(                \
      static_cast<int>(object_counts_last_time_[name])); \
  counters->size_of_##name()->Increment(                 \
      static_cast<int>(object_sizes_[name]));            \
  counters->size_of_##name()->Decrement(                 \
6782
      static_cast<int>(object_sizes_last_time_[name]));
6783 6784
  INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
#undef ADJUST_LAST_TIME_OBJECT_COUNT
danno@chromium.org's avatar
danno@chromium.org committed
6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795
  int index;
#define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
  index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
  counters->count_of_CODE_TYPE_##name()->Increment(       \
      static_cast<int>(object_counts_[index]));           \
  counters->count_of_CODE_TYPE_##name()->Decrement(       \
      static_cast<int>(object_counts_last_time_[index])); \
  counters->size_of_CODE_TYPE_##name()->Increment(        \
      static_cast<int>(object_sizes_[index]));            \
  counters->size_of_CODE_TYPE_##name()->Decrement(        \
      static_cast<int>(object_sizes_last_time_[index]));
6796 6797
  CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
#undef ADJUST_LAST_TIME_OBJECT_COUNT
6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809
#define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
  index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
  counters->count_of_FIXED_ARRAY_##name()->Increment(     \
      static_cast<int>(object_counts_[index]));           \
  counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
      static_cast<int>(object_counts_last_time_[index])); \
  counters->size_of_FIXED_ARRAY_##name()->Increment(      \
      static_cast<int>(object_sizes_[index]));            \
  counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
      static_cast<int>(object_sizes_last_time_[index]));
  FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
#undef ADJUST_LAST_TIME_OBJECT_COUNT
6810 6811 6812 6813 6814 6815 6816 6817 6818 6819
#define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                   \
  index =                                                                     \
      FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
  counters->count_of_CODE_AGE_##name()->Increment(                            \
      static_cast<int>(object_counts_[index]));                               \
  counters->count_of_CODE_AGE_##name()->Decrement(                            \
      static_cast<int>(object_counts_last_time_[index]));                     \
  counters->size_of_CODE_AGE_##name()->Increment(                             \
      static_cast<int>(object_sizes_[index]));                                \
  counters->size_of_CODE_AGE_##name()->Decrement(                             \
6820
      static_cast<int>(object_sizes_last_time_[index]));
6821
  CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
6822
#undef ADJUST_LAST_TIME_OBJECT_COUNT
6823

6824 6825
  MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
  MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
6826 6827
  ClearObjectStats();
}
6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856


void Heap::RegisterStrongRoots(Object** start, Object** end) {
  StrongRootsList* list = new StrongRootsList();
  list->next = strong_roots_list_;
  list->start = start;
  list->end = end;
  strong_roots_list_ = list;
}


void Heap::UnregisterStrongRoots(Object** start) {
  StrongRootsList* prev = NULL;
  StrongRootsList* list = strong_roots_list_;
  while (list != nullptr) {
    StrongRootsList* next = list->next;
    if (list->start == start) {
      if (prev) {
        prev->next = next;
      } else {
        strong_roots_list_ = next;
      }
      delete list;
    } else {
      prev = list;
    }
    list = next;
  }
}
6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894


bool Heap::GetObjectTypeName(size_t index, const char** object_type,
                             const char** object_sub_type) {
  if (index >= OBJECT_STATS_COUNT) return false;

  switch (static_cast<int>(index)) {
#define COMPARE_AND_RETURN_NAME(name) \
  case name:                          \
    *object_type = #name;             \
    *object_sub_type = "";            \
    return true;
    INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
#undef COMPARE_AND_RETURN_NAME
#define COMPARE_AND_RETURN_NAME(name)         \
  case FIRST_CODE_KIND_SUB_TYPE + Code::name: \
    *object_type = "CODE_TYPE";               \
    *object_sub_type = "CODE_KIND/" #name;    \
    return true;
    CODE_KIND_LIST(COMPARE_AND_RETURN_NAME)
#undef COMPARE_AND_RETURN_NAME
#define COMPARE_AND_RETURN_NAME(name)     \
  case FIRST_FIXED_ARRAY_SUB_TYPE + name: \
    *object_type = "FIXED_ARRAY_TYPE";    \
    *object_sub_type = #name;             \
    return true;
    FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
#undef COMPARE_AND_RETURN_NAME
#define COMPARE_AND_RETURN_NAME(name)                                          \
  case FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge: \
    *object_type = "CODE_TYPE";                                                \
    *object_sub_type = "CODE_AGE/" #name;                                      \
    return true;
    CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME)
#undef COMPARE_AND_RETURN_NAME
  }
  return false;
}
6895 6896
}  // namespace internal
}  // namespace v8