spaces.cc 101 KB
Newer Older
danno@chromium.org's avatar
danno@chromium.org committed
1
// Copyright 2011 the V8 project authors. All rights reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
//     * Redistributions of source code must retain the above copyright
//       notice, this list of conditions and the following disclaimer.
//     * Redistributions in binary form must reproduce the above
//       copyright notice, this list of conditions and the following
//       disclaimer in the documentation and/or other materials provided
//       with the distribution.
//     * Neither the name of Google Inc. nor the names of its
//       contributors may be used to endorse or promote products derived
//       from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include "v8.h"

#include "macro-assembler.h"
#include "mark-compact.h"
#include "platform.h"

34 35
namespace v8 {
namespace internal {
36 37 38 39 40 41


// ----------------------------------------------------------------------------
// HeapObjectIterator

HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
42 43 44 45 46 47 48 49 50
  // You can't actually iterate over the anchor page.  It is not a real page,
  // just an anchor for the double linked page list.  Initialize as if we have
  // reached the end of the anchor page, then the first iteration will move on
  // to the first page.
  Initialize(space,
             NULL,
             NULL,
             kAllPagesInSpace,
             NULL);
51 52 53 54 55
}


HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
                                       HeapObjectCallback size_func) {
56 57 58 59 60 61 62 63 64
  // You can't actually iterate over the anchor page.  It is not a real page,
  // just an anchor for the double linked page list.  Initialize the current
  // address and end as NULL, then the first iteration will move on
  // to the first page.
  Initialize(space,
             NULL,
             NULL,
             kAllPagesInSpace,
             size_func);
65 66 67
}


68 69
HeapObjectIterator::HeapObjectIterator(Page* page,
                                       HeapObjectCallback size_func) {
70
  Space* owner = page->owner();
71 72 73 74 75
  ASSERT(owner == page->heap()->old_pointer_space() ||
         owner == page->heap()->old_data_space() ||
         owner == page->heap()->map_space() ||
         owner == page->heap()->cell_space() ||
         owner == page->heap()->code_space());
76
  Initialize(reinterpret_cast<PagedSpace*>(owner),
77 78
             page->area_start(),
             page->area_end(),
79 80 81 82 83 84 85 86 87
             kOnePageOnly,
             size_func);
  ASSERT(page->WasSweptPrecisely());
}


void HeapObjectIterator::Initialize(PagedSpace* space,
                                    Address cur, Address end,
                                    HeapObjectIterator::PageMode mode,
88
                                    HeapObjectCallback size_f) {
89 90 91 92
  // Check that we actually can iterate this space.
  ASSERT(!space->was_swept_conservatively());

  space_ = space;
93
  cur_addr_ = cur;
94 95
  cur_end_ = end;
  page_mode_ = mode;
96 97 98 99
  size_func_ = size_f;
}


100 101 102 103 104 105 106 107 108 109
// We have hit the end of the page and should advance to the next block of
// objects.  This happens at the end of the page.
bool HeapObjectIterator::AdvanceToNextPage() {
  ASSERT(cur_addr_ == cur_end_);
  if (page_mode_ == kOnePageOnly) return false;
  Page* cur_page;
  if (cur_addr_ == NULL) {
    cur_page = space_->anchor();
  } else {
    cur_page = Page::FromAddress(cur_addr_ - 1);
110
    ASSERT(cur_addr_ == cur_page->area_end());
111
  }
112
  cur_page = cur_page->next_page();
113
  if (cur_page == space_->anchor()) return false;
114 115
  cur_addr_ = cur_page->area_start();
  cur_end_ = cur_page->area_end();
116 117
  ASSERT(cur_page->WasSweptPrecisely());
  return true;
118 119 120
}


121 122 123
// -----------------------------------------------------------------------------
// CodeRange

124

125 126 127
CodeRange::CodeRange(Isolate* isolate)
    : isolate_(isolate),
      code_range_(NULL),
128 129
      free_list_(0),
      allocation_list_(0),
130
      current_allocation_block_index_(0) {
131
}
132 133


134
bool CodeRange::SetUp(const size_t requested) {
135 136 137 138 139 140 141 142 143 144 145 146
  ASSERT(code_range_ == NULL);

  code_range_ = new VirtualMemory(requested);
  CHECK(code_range_ != NULL);
  if (!code_range_->IsReserved()) {
    delete code_range_;
    code_range_ = NULL;
    return false;
  }

  // We are sure that we have mapped a block of requested addresses.
  ASSERT(code_range_->size() == requested);
147
  LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
148 149 150 151 152 153
  Address base = reinterpret_cast<Address>(code_range_->address());
  Address aligned_base =
      RoundUp(reinterpret_cast<Address>(code_range_->address()),
              MemoryChunk::kAlignment);
  size_t size = code_range_->size() - (aligned_base - base);
  allocation_list_.Add(FreeBlock(aligned_base, size));
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
  current_allocation_block_index_ = 0;
  return true;
}


int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
                                       const FreeBlock* right) {
  // The entire point of CodeRange is that the difference between two
  // addresses in the range can be represented as a signed 32-bit int,
  // so the cast is semantically correct.
  return static_cast<int>(left->start - right->start);
}


void CodeRange::GetNextAllocationBlock(size_t requested) {
  for (current_allocation_block_index_++;
       current_allocation_block_index_ < allocation_list_.length();
       current_allocation_block_index_++) {
    if (requested <= allocation_list_[current_allocation_block_index_].size) {
      return;  // Found a large enough allocation block.
    }
  }

  // Sort and merge the free blocks on the free list and the allocation list.
  free_list_.AddAll(allocation_list_);
  allocation_list_.Clear();
  free_list_.Sort(&CompareFreeBlockAddress);
  for (int i = 0; i < free_list_.length();) {
    FreeBlock merged = free_list_[i];
    i++;
    // Add adjacent free blocks to the current merged block.
    while (i < free_list_.length() &&
           free_list_[i].start == merged.start + merged.size) {
      merged.size += free_list_[i].size;
      i++;
    }
    if (merged.size > 0) {
      allocation_list_.Add(merged);
    }
  }
  free_list_.Clear();

  for (current_allocation_block_index_ = 0;
       current_allocation_block_index_ < allocation_list_.length();
       current_allocation_block_index_++) {
    if (requested <= allocation_list_[current_allocation_block_index_].size) {
      return;  // Found a large enough allocation block.
    }
  }

  // Code range is full or too fragmented.
  V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
}


209 210
Address CodeRange::AllocateRawMemory(const size_t requested_size,
                                     const size_t commit_size,
211
                                     size_t* allocated) {
212
  ASSERT(commit_size <= requested_size);
213
  ASSERT(current_allocation_block_index_ < allocation_list_.length());
214
  if (requested_size > allocation_list_[current_allocation_block_index_].size) {
215 216
    // Find an allocation block large enough.  This function call may
    // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
217
    GetNextAllocationBlock(requested_size);
218 219
  }
  // Commit the requested memory at the start of the current allocation block.
220
  size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
221
  FreeBlock current = allocation_list_[current_allocation_block_index_];
222
  if (aligned_requested >= (current.size - Page::kPageSize)) {
223 224
    // Don't leave a small free block, useless for a large object or chunk.
    *allocated = current.size;
225 226
  } else {
    *allocated = aligned_requested;
227 228
  }
  ASSERT(*allocated <= current.size);
229
  ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
230 231 232 233
  if (!MemoryAllocator::CommitExecutableMemory(code_range_,
                                               current.start,
                                               commit_size,
                                               *allocated)) {
234 235 236 237 238 239 240 241 242 243 244 245
    *allocated = 0;
    return NULL;
  }
  allocation_list_[current_allocation_block_index_].start += *allocated;
  allocation_list_[current_allocation_block_index_].size -= *allocated;
  if (*allocated == current.size) {
    GetNextAllocationBlock(0);  // This block is used up, get the next one.
  }
  return current.start;
}


246 247 248 249 250 251 252 253 254 255
bool CodeRange::CommitRawMemory(Address start, size_t length) {
  return code_range_->Commit(start, length, true);
}


bool CodeRange::UncommitRawMemory(Address start, size_t length) {
  return code_range_->Uncommit(start, length);
}


256 257
void CodeRange::FreeRawMemory(Address address, size_t length) {
  ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
258 259 260 261 262 263 264 265 266 267 268 269 270
  free_list_.Add(FreeBlock(address, length));
  code_range_->Uncommit(address, length);
}


void CodeRange::TearDown() {
    delete code_range_;  // Frees all memory in the virtual memory range.
    code_range_ = NULL;
    free_list_.Free();
    allocation_list_.Free();
}


271 272 273 274
// -----------------------------------------------------------------------------
// MemoryAllocator
//

275 276 277
MemoryAllocator::MemoryAllocator(Isolate* isolate)
    : isolate_(isolate),
      capacity_(0),
278
      capacity_executable_(0),
279
      size_(0),
280
      size_executable_(0) {
281 282 283
}


284
bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
285
  capacity_ = RoundUp(capacity, Page::kPageSize);
286 287
  capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
  ASSERT_GE(capacity_, capacity_executable_);
288

289
  size_ = 0;
290
  size_executable_ = 0;
291

292 293 294 295 296
  return true;
}


void MemoryAllocator::TearDown() {
297
  // Check that spaces were torn down before MemoryAllocator.
298
  ASSERT(size_ == 0);
299 300
  // TODO(gc) this will be true again when we fix FreeMemory.
  // ASSERT(size_executable_ == 0);
301
  capacity_ = 0;
302
  capacity_executable_ = 0;
303 304 305
}


306 307 308 309 310
void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
                                 Executability executable) {
  // TODO(gc) make code_range part of memory allocator?
  ASSERT(reservation->IsReserved());
  size_t size = reservation->size();
311 312
  ASSERT(size_ >= size);
  size_ -= size;
313 314

  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
315 316

  if (executable == EXECUTABLE) {
317 318
    ASSERT(size_executable_ >= size);
    size_executable_ -= size;
319
  }
320 321 322 323 324
  // Code which is part of the code-range does not have its own VirtualMemory.
  ASSERT(!isolate_->code_range()->contains(
      static_cast<Address>(reservation->address())));
  ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
  reservation->Release();
325 326 327
}


328 329 330 331
void MemoryAllocator::FreeMemory(Address base,
                                 size_t size,
                                 Executability executable) {
  // TODO(gc) make code_range part of memory allocator?
332 333
  ASSERT(size_ >= size);
  size_ -= size;
334 335 336 337 338 339 340 341 342 343

  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));

  if (executable == EXECUTABLE) {
    ASSERT(size_executable_ >= size);
    size_executable_ -= size;
  }
  if (isolate_->code_range()->contains(static_cast<Address>(base))) {
    ASSERT(executable == EXECUTABLE);
    isolate_->code_range()->FreeRawMemory(base, size);
344
  } else {
345
    ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
346 347 348
    bool result = VirtualMemory::ReleaseRegion(base, size);
    USE(result);
    ASSERT(result);
349
  }
350 351 352 353 354 355 356
}


Address MemoryAllocator::ReserveAlignedMemory(size_t size,
                                              size_t alignment,
                                              VirtualMemory* controller) {
  VirtualMemory reservation(size, alignment);
357

358
  if (!reservation.IsReserved()) return NULL;
359
  size_ += reservation.size();
360 361
  Address base = RoundUp(static_cast<Address>(reservation.address()),
                         alignment);
362 363
  controller->TakeControl(&reservation);
  return base;
364 365 366
}


367 368
Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
                                               size_t commit_size,
369 370 371
                                               size_t alignment,
                                               Executability executable,
                                               VirtualMemory* controller) {
372
  ASSERT(commit_size <= reserve_size);
373
  VirtualMemory reservation;
374
  Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
375
  if (base == NULL) return NULL;
376 377

  if (executable == EXECUTABLE) {
378 379 380 381
    if (!CommitExecutableMemory(&reservation,
                                base,
                                commit_size,
                                reserve_size)) {
382 383
      base = NULL;
    }
384
  } else {
385
    if (!reservation.Commit(base, commit_size, false)) {
386
      base = NULL;
387
    }
388
  }
389

390 391 392 393 394 395 396
  if (base == NULL) {
    // Failed to commit the body. Release the mapping and any partially
    // commited regions inside it.
    reservation.Release();
    return NULL;
  }

397 398
  controller->TakeControl(&reservation);
  return base;
399 400 401
}


402 403 404 405
void Page::InitializeAsAnchor(PagedSpace* owner) {
  set_owner(owner);
  set_prev_page(this);
  set_next_page(this);
406 407 408
}


409 410 411
NewSpacePage* NewSpacePage::Initialize(Heap* heap,
                                       Address start,
                                       SemiSpace* semi_space) {
412 413 414
  Address area_start = start + NewSpacePage::kObjectStartOffset;
  Address area_end = start + Page::kPageSize;

415 416 417
  MemoryChunk* chunk = MemoryChunk::Initialize(heap,
                                               start,
                                               Page::kPageSize,
418 419
                                               area_start,
                                               area_end,
420 421 422 423 424 425 426 427 428 429 430 431 432
                                               NOT_EXECUTABLE,
                                               semi_space);
  chunk->set_next_chunk(NULL);
  chunk->set_prev_chunk(NULL);
  chunk->initialize_scan_on_scavenge(true);
  bool in_to_space = (semi_space->id() != kFromSpace);
  chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
                             : MemoryChunk::IN_FROM_SPACE);
  ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
                                       : MemoryChunk::IN_TO_SPACE));
  NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
  heap->incremental_marking()->SetNewSpacePageFlags(page);
  return page;
433 434 435
}


436 437 438 439 440 441 442
void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
  set_owner(semi_space);
  set_next_chunk(this);
  set_prev_chunk(this);
  // Flags marks this invalid page as not being in new-space.
  // All real new-space pages will be in new-space.
  SetFlags(0, ~0);
443 444
}

445

446 447 448
MemoryChunk* MemoryChunk::Initialize(Heap* heap,
                                     Address base,
                                     size_t size,
449 450
                                     Address area_start,
                                     Address area_end,
451 452 453
                                     Executability executable,
                                     Space* owner) {
  MemoryChunk* chunk = FromAddress(base);
454

455
  ASSERT(base == chunk->address());
456

457 458
  chunk->heap_ = heap;
  chunk->size_ = size;
459 460
  chunk->area_start_ = area_start;
  chunk->area_end_ = area_end;
461 462 463 464 465
  chunk->flags_ = 0;
  chunk->set_owner(owner);
  chunk->InitializeReservedMemory();
  chunk->slots_buffer_ = NULL;
  chunk->skip_list_ = NULL;
466
  chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
467
  chunk->progress_bar_ = 0;
468
  chunk->high_water_mark_ = static_cast<int>(area_start - base);
469
  chunk->parallel_sweeping_ = 0;
470 471 472 473 474
  chunk->available_in_small_free_list_ = 0;
  chunk->available_in_medium_free_list_ = 0;
  chunk->available_in_large_free_list_ = 0;
  chunk->available_in_huge_free_list_ = 0;
  chunk->non_available_small_blocks_ = 0;
475
  chunk->ResetLiveBytes();
476 477 478
  Bitmap::Clear(chunk);
  chunk->initialize_scan_on_scavenge(false);
  chunk->SetFlag(WAS_SWEPT_PRECISELY);
479

480
  ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
481
  ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
482

483 484 485
  if (executable == EXECUTABLE) {
    chunk->SetFlag(IS_EXECUTABLE);
  }
486

487 488 489
  if (owner == heap->old_data_space()) {
    chunk->SetFlag(CONTAINS_ONLY_DATA);
  }
490 491

  return chunk;
492 493 494
}


495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
// Commit MemoryChunk area to the requested size.
bool MemoryChunk::CommitArea(size_t requested) {
  size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
                      MemoryAllocator::CodePageGuardSize() : 0;
  size_t header_size = area_start() - address() - guard_size;
  size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
  size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
                                  OS::CommitPageSize());

  if (commit_size > committed_size) {
    // Commit size should be less or equal than the reserved size.
    ASSERT(commit_size <= size() - 2 * guard_size);
    // Append the committed area.
    Address start = address() + committed_size + guard_size;
    size_t length = commit_size - committed_size;
    if (reservation_.IsReserved()) {
      if (!reservation_.Commit(start, length, IsFlagSet(IS_EXECUTABLE))) {
        return false;
      }
    } else {
      CodeRange* code_range = heap_->isolate()->code_range();
      ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
      if (!code_range->CommitRawMemory(start, length)) return false;
    }

    if (Heap::ShouldZapGarbage()) {
      heap_->isolate()->memory_allocator()->ZapBlock(start, length);
    }
  } else if (commit_size < committed_size) {
    ASSERT(commit_size > 0);
    // Shrink the committed area.
    size_t length = committed_size - commit_size;
    Address start = address() + committed_size + guard_size - length;
    if (reservation_.IsReserved()) {
      if (!reservation_.Uncommit(start, length)) return false;
    } else {
      CodeRange* code_range = heap_->isolate()->code_range();
      ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
      if (!code_range->UncommitRawMemory(start, length)) return false;
    }
  }

  area_end_ = area_start_ + requested;
  return true;
}


542 543 544
void MemoryChunk::InsertAfter(MemoryChunk* other) {
  next_chunk_ = other->next_chunk_;
  prev_chunk_ = other;
545 546 547 548 549 550 551 552 553 554 555

  // This memory barrier is needed since concurrent sweeper threads may iterate
  // over the list of pages while a new page is inserted.
  // TODO(hpayer): find a cleaner way to guarantee that the page list can be
  // expanded concurrently
  MemoryBarrier();

  // The following two write operations can take effect in arbitrary order
  // since pages are always iterated by the sweeper threads in LIFO order, i.e,
  // the inserted page becomes visible for the sweeper threads after
  // other->next_chunk_ = this;
556 557 558
  other->next_chunk_->prev_chunk_ = this;
  other->next_chunk_ = this;
}
559 560


561 562 563 564 565 566 567 568 569 570
void MemoryChunk::Unlink() {
  if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
    heap_->decrement_scan_on_scavenge_pages();
    ClearFlag(SCAN_ON_SCAVENGE);
  }
  next_chunk_->prev_chunk_ = prev_chunk_;
  prev_chunk_->next_chunk_ = next_chunk_;
  prev_chunk_ = NULL;
  next_chunk_ = NULL;
}
571 572


573 574
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
                                            intptr_t commit_area_size,
575 576
                                            Executability executable,
                                            Space* owner) {
577 578
  ASSERT(commit_area_size <= reserve_area_size);

579
  size_t chunk_size;
580 581 582
  Heap* heap = isolate_->heap();
  Address base = NULL;
  VirtualMemory reservation;
583 584
  Address area_start = NULL;
  Address area_end = NULL;
585

586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
  //
  // MemoryChunk layout:
  //
  //             Executable
  // +----------------------------+<- base aligned with MemoryChunk::kAlignment
  // |           Header           |
  // +----------------------------+<- base + CodePageGuardStartOffset
  // |           Guard            |
  // +----------------------------+<- area_start_
  // |           Area             |
  // +----------------------------+<- area_end_ (area_start + commit_area_size)
  // |   Committed but not used   |
  // +----------------------------+<- aligned at OS page boundary
  // | Reserved but not committed |
  // +----------------------------+<- aligned at OS page boundary
  // |           Guard            |
  // +----------------------------+<- base + chunk_size
  //
  //           Non-executable
  // +----------------------------+<- base aligned with MemoryChunk::kAlignment
  // |          Header            |
  // +----------------------------+<- area_start_ (base + kObjectStartOffset)
  // |           Area             |
  // +----------------------------+<- area_end_ (area_start + commit_area_size)
  // |  Committed but not used    |
  // +----------------------------+<- aligned at OS page boundary
  // | Reserved but not committed |
  // +----------------------------+<- base + chunk_size
  //

616
  if (executable == EXECUTABLE) {
617
    chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
618 619
                         OS::CommitPageSize()) + CodePageGuardSize();

620 621 622 623 624 625 626 627
    // Check executable memory limit.
    if (size_executable_ + chunk_size > capacity_executable_) {
      LOG(isolate_,
          StringEvent("MemoryAllocator::AllocateRawMemory",
                      "V8 Executable Allocation capacity exceeded"));
      return NULL;
    }

628 629 630
    // Size of header (not executable) plus area (executable).
    size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
                                 OS::CommitPageSize());
631 632 633
    // Allocate executable memory either from code range or from the
    // OS.
    if (isolate_->code_range()->exists()) {
634 635 636
      base = isolate_->code_range()->AllocateRawMemory(chunk_size,
                                                       commit_size,
                                                       &chunk_size);
637 638 639
      ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
                       MemoryChunk::kAlignment));
      if (base == NULL) return NULL;
640 641 642
      size_ += chunk_size;
      // Update executable memory size.
      size_executable_ += chunk_size;
643
    } else {
644
      base = AllocateAlignedMemory(chunk_size,
645
                                   commit_size,
646 647 648 649
                                   MemoryChunk::kAlignment,
                                   executable,
                                   &reservation);
      if (base == NULL) return NULL;
650 651
      // Update executable memory size.
      size_executable_ += reservation.size();
652
    }
653

654 655
    if (Heap::ShouldZapGarbage()) {
      ZapBlock(base, CodePageGuardStartOffset());
656
      ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
657 658
    }

659
    area_start = base + CodePageAreaStartOffset();
660
    area_end = area_start + commit_area_size;
661
  } else {
662 663 664 665
    chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
                         OS::CommitPageSize());
    size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
                                 commit_area_size, OS::CommitPageSize());
666
    base = AllocateAlignedMemory(chunk_size,
667
                                 commit_size,
668 669 670
                                 MemoryChunk::kAlignment,
                                 executable,
                                 &reservation);
671

672
    if (base == NULL) return NULL;
673

674
    if (Heap::ShouldZapGarbage()) {
675
      ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
676
    }
677 678

    area_start = base + Page::kObjectStartOffset;
679
    area_end = area_start + commit_area_size;
680 681
  }

682 683
  // Use chunk_size for statistics and callbacks because we assume that they
  // treat reserved but not-yet committed memory regions of chunks as allocated.
684
  isolate_->counters()->memory_allocated()->
685
      Increment(static_cast<int>(chunk_size));
686

687
  LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
688 689
  if (owner != NULL) {
    ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
690
    PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
691
  }
692 693 694 695

  MemoryChunk* result = MemoryChunk::Initialize(heap,
                                                base,
                                                chunk_size,
696 697
                                                area_start,
                                                area_end,
698 699 700 701
                                                executable,
                                                owner);
  result->set_reserved_memory(&reservation);
  return result;
702 703 704
}


705 706 707 708 709 710 711 712 713
void Page::ResetFreeListStatistics() {
  non_available_small_blocks_ = 0;
  available_in_small_free_list_ = 0;
  available_in_medium_free_list_ = 0;
  available_in_large_free_list_ = 0;
  available_in_huge_free_list_ = 0;
}


714 715
Page* MemoryAllocator::AllocatePage(intptr_t size,
                                    PagedSpace* owner,
716
                                    Executability executable) {
717
  MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
718 719 720 721 722

  if (chunk == NULL) return NULL;

  return Page::Initialize(isolate_->heap(), chunk, executable, owner);
}
723

724

725
LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
726 727
                                              Space* owner,
                                              Executability executable) {
728 729 730 731
  MemoryChunk* chunk = AllocateChunk(object_size,
                                     object_size,
                                     executable,
                                     owner);
732 733
  if (chunk == NULL) return NULL;
  return LargePage::Initialize(isolate_->heap(), chunk);
734 735 736
}


737 738
void MemoryAllocator::Free(MemoryChunk* chunk) {
  LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
739
  if (chunk->owner() != NULL) {
740 741 742
    ObjectSpace space =
        static_cast<ObjectSpace>(1 << chunk->owner()->identity());
    PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
743 744
  }

745 746 747
  isolate_->heap()->RememberUnmappedPage(
      reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());

748 749 750 751 752 753 754 755
  delete chunk->slots_buffer();
  delete chunk->skip_list();

  VirtualMemory* reservation = chunk->reserved_memory();
  if (reservation->IsReserved()) {
    FreeMemory(reservation, chunk->executable());
  } else {
    FreeMemory(chunk->address(),
756
               chunk->size(),
757 758
               chunk->executable());
  }
759 760 761
}


762 763
bool MemoryAllocator::CommitBlock(Address start,
                                  size_t size,
764
                                  Executability executable) {
765
  if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
766 767 768 769 770

  if (Heap::ShouldZapGarbage()) {
    ZapBlock(start, size);
  }

771
  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
772 773 774
  return true;
}

775

776
bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
777
  if (!VirtualMemory::UncommitRegion(start, size)) return false;
778
  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
779 780
  return true;
}
781

782 783 784 785 786 787 788 789

void MemoryAllocator::ZapBlock(Address start, size_t size) {
  for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
    Memory::Address_at(start + s) = kZapValue;
  }
}


790 791 792 793 794 795 796 797 798
void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
                                                AllocationAction action,
                                                size_t size) {
  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
    MemoryAllocationCallbackRegistration registration =
      memory_allocation_callbacks_[i];
    if ((registration.space & space) == space &&
        (registration.action & action) == action)
      registration.callback(space, action, static_cast<int>(size));
799 800 801 802
  }
}


803 804 805 806
bool MemoryAllocator::MemoryAllocationCallbackRegistered(
    MemoryAllocationCallback callback) {
  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
    if (memory_allocation_callbacks_[i].callback == callback) return true;
807
  }
808
  return false;
809 810 811
}


812 813 814 815 816 817 818 819
void MemoryAllocator::AddMemoryAllocationCallback(
    MemoryAllocationCallback callback,
    ObjectSpace space,
    AllocationAction action) {
  ASSERT(callback != NULL);
  MemoryAllocationCallbackRegistration registration(callback, space, action);
  ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
  return memory_allocation_callbacks_.Add(registration);
820 821 822
}


823 824 825 826 827 828 829 830
void MemoryAllocator::RemoveMemoryAllocationCallback(
     MemoryAllocationCallback callback) {
  ASSERT(callback != NULL);
  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
    if (memory_allocation_callbacks_[i].callback == callback) {
      memory_allocation_callbacks_.Remove(i);
      return;
    }
831
  }
832
  UNREACHABLE();
833 834 835 836 837
}


#ifdef DEBUG
void MemoryAllocator::ReportStatistics() {
838
  float pct = static_cast<float>(capacity_ - size_) / capacity_;
839 840 841
  PrintF("  capacity: %" V8_PTR_PREFIX "d"
             ", used: %" V8_PTR_PREFIX "d"
             ", available: %%%d\n\n",
842
         capacity_, size_, static_cast<int>(pct*100));
843 844 845
}
#endif

846 847 848 849 850 851 852 853 854

int MemoryAllocator::CodePageGuardStartOffset() {
  // We are guarding code pages: the first OS page after the header
  // will be protected as non-writable.
  return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
}


int MemoryAllocator::CodePageGuardSize() {
855
  return static_cast<int>(OS::CommitPageSize());
856 857 858 859 860 861 862 863 864 865 866 867 868
}


int MemoryAllocator::CodePageAreaStartOffset() {
  // We are guarding code pages: the first OS page after the header
  // will be protected as non-writable.
  return CodePageGuardStartOffset() + CodePageGuardSize();
}


int MemoryAllocator::CodePageAreaEndOffset() {
  // We are guarding code pages: the last OS page will be protected as
  // non-writable.
869
  return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
870 871 872
}


873 874 875 876
bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
                                             Address start,
                                             size_t commit_size,
                                             size_t reserved_size) {
877 878 879 880 881 882 883 884 885 886 887 888 889 890
  // Commit page header (not executable).
  if (!vm->Commit(start,
                  CodePageGuardStartOffset(),
                  false)) {
    return false;
  }

  // Create guard page after the header.
  if (!vm->Guard(start + CodePageGuardStartOffset())) {
    return false;
  }

  // Commit page body (executable).
  if (!vm->Commit(start + CodePageAreaStartOffset(),
891
                  commit_size - CodePageGuardStartOffset(),
892 893 894 895
                  true)) {
    return false;
  }

896 897
  // Create guard page before the end.
  if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
898 899 900 901 902 903 904
    return false;
  }

  return true;
}


905 906 907 908 909 910 911 912 913 914 915
// -----------------------------------------------------------------------------
// MemoryChunk implementation

void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
  if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
    static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
  }
  chunk->IncrementLiveBytes(by);
}

916 917 918
// -----------------------------------------------------------------------------
// PagedSpace implementation

919 920
PagedSpace::PagedSpace(Heap* heap,
                       intptr_t max_capacity,
921 922
                       AllocationSpace id,
                       Executability executable)
923 924 925
    : Space(heap, id, executable),
      free_list_(this),
      was_swept_conservatively_(false),
926 927
      first_unswept_page_(Page::FromAddress(NULL)),
      unswept_free_bytes_(0) {
928 929 930 931 932 933
  if (id == CODE_SPACE) {
    area_size_ = heap->isolate()->memory_allocator()->
        CodePageAreaSize();
  } else {
    area_size_ = Page::kPageSize - Page::kObjectStartOffset;
  }
934
  max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
935
      * AreaSize();
936 937 938 939 940
  accounting_stats_.Clear();

  allocation_info_.top = NULL;
  allocation_info_.limit = NULL;

941
  anchor_.InitializeAsAnchor(this);
942 943 944
}


945
bool PagedSpace::SetUp() {
946 947 948 949
  return true;
}


950
bool PagedSpace::HasBeenSetUp() {
951
  return true;
952 953 954 955
}


void PagedSpace::TearDown() {
956 957 958
  PageIterator iterator(this);
  while (iterator.has_next()) {
    heap()->isolate()->memory_allocator()->Free(iterator.next());
959
  }
960 961 962
  anchor_.set_next_page(&anchor_);
  anchor_.set_prev_page(&anchor_);
  accounting_stats_.Clear();
963 964 965
}


966 967 968 969 970 971 972 973 974 975 976 977
size_t PagedSpace::CommittedPhysicalMemory() {
  if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
  MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
  size_t size = 0;
  PageIterator it(this);
  while (it.has_next()) {
    size += it.next()->CommittedPhysicalMemory();
  }
  return size;
}


978
MaybeObject* PagedSpace::FindObject(Address addr) {
979
  // Note: this function can only be called on precisely swept spaces.
980
  ASSERT(!heap()->mark_compact_collector()->in_use());
981 982 983 984

  if (!Contains(addr)) return Failure::Exception();

  Page* p = Page::FromAddress(addr);
985 986 987
  HeapObjectIterator it(p, NULL);
  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
    Address cur = obj->address();
988 989 990 991
    Address next = cur + obj->Size();
    if ((cur <= addr) && (addr < next)) return obj;
  }

992
  UNREACHABLE();
993 994 995
  return Failure::Exception();
}

996
bool PagedSpace::CanExpand() {
997
  ASSERT(max_capacity_ % AreaSize() == 0);
998

999
  if (Capacity() == max_capacity_) return false;
1000

1001
  ASSERT(Capacity() < max_capacity_);
1002

1003 1004
  // Are we going to exceed capacity for this space?
  if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
1005

1006
  return true;
1007 1008
}

1009

1010
bool PagedSpace::Expand() {
1011
  if (!CanExpand()) return false;
1012

1013 1014 1015 1016 1017 1018 1019 1020
  intptr_t size = AreaSize();

  if (anchor_.next_page() == &anchor_) {
    size = SizeOfFirstPage();
  }

  Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
      size, this, executable());
1021
  if (p == NULL) return false;
1022 1023 1024

  ASSERT(Capacity() <= max_capacity_);

1025
  p->InsertAfter(anchor_.prev_page());
1026 1027 1028 1029 1030

  return true;
}


1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
intptr_t PagedSpace::SizeOfFirstPage() {
  int size = 0;
  switch (identity()) {
    case OLD_POINTER_SPACE:
      size = 64 * kPointerSize * KB;
      break;
    case OLD_DATA_SPACE:
      size = 192 * KB;
      break;
    case MAP_SPACE:
1041
      size = 16 * kPointerSize * KB;
1042 1043
      break;
    case CELL_SPACE:
1044
      size = 16 * kPointerSize * KB;
1045 1046
      break;
    case CODE_SPACE:
1047 1048 1049 1050
      if (heap()->isolate()->code_range()->exists()) {
        // When code range exists, code pages are allocated in a special way
        // (from the reserved code range). That part of the code is not yet
        // upgraded to handle small pages.
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
        size = AreaSize();
      } else {
        size = 384 * KB;
      }
      break;
    default:
      UNREACHABLE();
  }
  return Min(size, AreaSize());
}


1063
int PagedSpace::CountTotalPages() {
1064
  PageIterator it(this);
1065
  int count = 0;
1066 1067
  while (it.has_next()) {
    it.next();
1068 1069 1070 1071 1072 1073
    count++;
  }
  return count;
}


1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
  sizes->huge_size_ = page->available_in_huge_free_list();
  sizes->small_size_ = page->available_in_small_free_list();
  sizes->medium_size_ = page->available_in_medium_free_list();
  sizes->large_size_ = page->available_in_large_free_list();
}


void PagedSpace::ResetFreeListStatistics() {
  PageIterator page_iterator(this);
  while (page_iterator.has_next()) {
    Page* page = page_iterator.next();
    page->ResetFreeListStatistics();
  }
}


1091
void PagedSpace::ReleasePage(Page* page, bool unlink) {
1092
  ASSERT(page->LiveBytes() == 0);
1093
  ASSERT(AreaSize() == page->area_size());
1094

1095
  // Adjust list of unswept pages if the page is the head of the list.
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
  if (first_unswept_page_ == page) {
    first_unswept_page_ = page->next_page();
    if (first_unswept_page_ == anchor()) {
      first_unswept_page_ = Page::FromAddress(NULL);
    }
  }

  if (page->WasSwept()) {
    intptr_t size = free_list_.EvictFreeListItems(page);
    accounting_stats_.AllocateBytes(size);
1106
    ASSERT_EQ(AreaSize(), static_cast<int>(size));
1107 1108
  } else {
    DecreaseUnsweptFreeBytes(page);
1109 1110
  }

1111 1112 1113 1114
  if (Page::FromAllocationTop(allocation_info_.top) == page) {
    allocation_info_.top = allocation_info_.limit = NULL;
  }

1115 1116 1117
  if (unlink) {
    page->Unlink();
  }
1118 1119 1120 1121 1122 1123 1124
  if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
    heap()->isolate()->memory_allocator()->Free(page);
  } else {
    heap()->QueueMemoryChunkForFree(page);
  }

  ASSERT(Capacity() > 0);
1125
  accounting_stats_.ShrinkSpace(AreaSize());
1126 1127 1128
}


1129 1130 1131 1132
#ifdef DEBUG
void PagedSpace::Print() { }
#endif

1133
#ifdef VERIFY_HEAP
1134
void PagedSpace::Verify(ObjectVisitor* visitor) {
1135 1136 1137 1138
  // We can only iterate over the pages if they were swept precisely.
  if (was_swept_conservatively_) return;

  bool allocation_pointer_found_in_space =
1139
      (allocation_info_.top == allocation_info_.limit);
1140 1141 1142
  PageIterator page_iterator(this);
  while (page_iterator.has_next()) {
    Page* page = page_iterator.next();
1143
    CHECK(page->owner() == this);
1144 1145 1146
    if (page == Page::FromAllocationTop(allocation_info_.top)) {
      allocation_pointer_found_in_space = true;
    }
1147
    CHECK(page->WasSweptPrecisely());
1148
    HeapObjectIterator it(page, NULL);
1149 1150
    Address end_of_previous_object = page->area_start();
    Address top = page->area_end();
1151 1152
    int black_size = 0;
    for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1153
      CHECK(end_of_previous_object <= object->address());
1154 1155 1156 1157

      // The first word should be a map, and we expect all map pointers to
      // be in map space.
      Map* map = object->map();
1158 1159
      CHECK(map->IsMap());
      CHECK(heap()->map_space()->Contains(map));
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171

      // Perform space-specific object verification.
      VerifyObject(object);

      // The object itself should look OK.
      object->Verify();

      // All the interior pointers should be contained in the heap.
      int size = object->Size();
      object->IterateBody(map->instance_type(), size, visitor);
      if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
        black_size += size;
1172 1173
      }

1174
      CHECK(object->address() + size <= top);
1175
      end_of_previous_object = object->address() + size;
1176
    }
1177
    CHECK_LE(black_size, page->LiveBytes());
1178
  }
1179
  CHECK(allocation_pointer_found_in_space);
1180
}
1181
#endif  // VERIFY_HEAP
1182

1183 1184 1185
// -----------------------------------------------------------------------------
// NewSpace implementation

1186

1187
bool NewSpace::SetUp(int reserved_semispace_capacity,
1188
                     int maximum_semispace_capacity) {
1189
  // Set up new space based on the preallocated memory block defined by
1190 1191 1192
  // start and size. The provided space is divided into two semi-spaces.
  // To support fast containment testing in the new space, the size of
  // this chunk must be a power of two and it must be aligned to its size.
1193
  int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203

  size_t size = 2 * reserved_semispace_capacity;
  Address base =
      heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
          size, size, &reservation_);
  if (base == NULL) return false;

  chunk_base_ = base;
  chunk_size_ = static_cast<uintptr_t>(size);
  LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1204

1205 1206 1207
  ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
  ASSERT(IsPowerOf2(maximum_semispace_capacity));

1208
  // Allocate and set up the histogram arrays if necessary.
1209 1210 1211 1212 1213 1214 1215 1216
  allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
  promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);

#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
                       promoted_histogram_[name].set_name(#name);
  INSTANCE_TYPE_LIST(SET_NAME)
#undef SET_NAME

1217 1218 1219 1220
  ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
  ASSERT(static_cast<intptr_t>(chunk_size_) >=
         2 * heap()->ReservedSemiSpaceSize());
  ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1221

1222 1223 1224 1225 1226 1227 1228
  to_space_.SetUp(chunk_base_,
                  initial_semispace_capacity,
                  maximum_semispace_capacity);
  from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
                    initial_semispace_capacity,
                    maximum_semispace_capacity);
  if (!to_space_.Commit()) {
1229 1230
    return false;
  }
1231
  ASSERT(!from_space_.is_committed());  // No need to use memory yet.
1232

1233 1234
  start_ = chunk_base_;
  address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1235
  object_mask_ = address_mask_ | kHeapObjectTagMask;
1236
  object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1237

1238
  ResetAllocationInfo();
1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257

  return true;
}


void NewSpace::TearDown() {
  if (allocated_histogram_) {
    DeleteArray(allocated_histogram_);
    allocated_histogram_ = NULL;
  }
  if (promoted_histogram_) {
    DeleteArray(promoted_histogram_);
    promoted_histogram_ = NULL;
  }

  start_ = NULL;
  allocation_info_.top = NULL;
  allocation_info_.limit = NULL;

1258 1259
  to_space_.TearDown();
  from_space_.TearDown();
1260 1261 1262 1263 1264 1265 1266 1267

  LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));

  ASSERT(reservation_.IsReserved());
  heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
                                                    NOT_EXECUTABLE);
  chunk_base_ = NULL;
  chunk_size_ = 0;
1268 1269 1270 1271
}


void NewSpace::Flip() {
1272
  SemiSpace::Swap(&from_space_, &to_space_);
1273 1274 1275
}


1276
void NewSpace::Grow() {
1277
  // Double the semispace size but only up to maximum capacity.
1278
  ASSERT(Capacity() < MaximumCapacity());
1279 1280
  int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
  if (to_space_.GrowTo(new_capacity)) {
1281
    // Only grow from space if we managed to grow to-space.
1282
    if (!from_space_.GrowTo(new_capacity)) {
1283 1284
      // If we managed to grow to-space but couldn't grow from-space,
      // attempt to shrink to-space.
1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
      if (!to_space_.ShrinkTo(from_space_.Capacity())) {
        // We are in an inconsistent state because we could not
        // commit/uncommit memory from new space.
        V8::FatalProcessOutOfMemory("Failed to grow new space.");
      }
    }
  }
  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}


void NewSpace::Shrink() {
1297
  int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
1298
  int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1299 1300
  if (rounded_new_capacity < Capacity() &&
      to_space_.ShrinkTo(rounded_new_capacity))  {
1301 1302
    // Only shrink from-space if we managed to shrink to-space.
    from_space_.Reset();
1303
    if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1304 1305
      // If we managed to shrink to-space but couldn't shrink from
      // space, attempt to grow to-space again.
1306 1307 1308 1309 1310 1311 1312
      if (!to_space_.GrowTo(from_space_.Capacity())) {
        // We are in an inconsistent state because we could not
        // commit/uncommit memory from new space.
        V8::FatalProcessOutOfMemory("Failed to shrink new space.");
      }
    }
  }
1313
  allocation_info_.limit = to_space_.page_high();
1314 1315 1316 1317
  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}


1318
void NewSpace::UpdateAllocationInfo() {
1319
  MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
  allocation_info_.top = to_space_.page_low();
  allocation_info_.limit = to_space_.page_high();

  // Lower limit during incremental marking.
  if (heap()->incremental_marking()->IsMarking() &&
      inline_allocation_limit_step() != 0) {
    Address new_limit =
        allocation_info_.top + inline_allocation_limit_step();
    allocation_info_.limit = Min(new_limit, allocation_info_.limit);
  }
1330 1331 1332 1333
  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}


1334 1335 1336 1337 1338 1339 1340 1341 1342
void NewSpace::ResetAllocationInfo() {
  to_space_.Reset();
  UpdateAllocationInfo();
  pages_used_ = 0;
  // Clear all mark-bits in the to-space.
  NewSpacePageIterator it(&to_space_);
  while (it.has_next()) {
    Bitmap::Clear(it.next());
  }
1343 1344 1345
}


1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
bool NewSpace::AddFreshPage() {
  Address top = allocation_info_.top;
  if (NewSpacePage::IsAtStart(top)) {
    // The current page is already empty. Don't try to make another.

    // We should only get here if someone asks to allocate more
    // than what can be stored in a single page.
    // TODO(gc): Change the limit on new-space allocation to prevent this
    // from happening (all such allocations should go directly to LOSpace).
    return false;
  }
  if (!to_space_.AdvancePage()) {
    // Failed to get a new page in to-space.
    return false;
  }
1361

1362
  // Clear remainder of current page.
1363
  Address limit = NewSpacePage::FromLimit(top)->area_end();
1364 1365 1366 1367 1368 1369
  if (heap()->gc_state() == Heap::SCAVENGE) {
    heap()->promotion_queue()->SetNewLimit(limit);
    heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
  }

  int remaining_in_page = static_cast<int>(limit - top);
1370 1371 1372
  heap()->CreateFillerObjectAt(top, remaining_in_page);
  pages_used_++;
  UpdateAllocationInfo();
1373

1374
  return true;
1375 1376 1377
}


1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388
MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
  Address old_top = allocation_info_.top;
  Address new_top = old_top + size_in_bytes;
  Address high = to_space_.page_high();
  if (allocation_info_.limit < high) {
    // Incremental marking has lowered the limit to get a
    // chance to do a step.
    allocation_info_.limit = Min(
        allocation_info_.limit + inline_allocation_limit_step_,
        high);
    int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1389 1390
    heap()->incremental_marking()->Step(
        bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1391 1392 1393 1394 1395
    top_on_previous_step_ = new_top;
    return AllocateRaw(size_in_bytes);
  } else if (AddFreshPage()) {
    // Switched to new page. Try allocating again.
    int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1396 1397
    heap()->incremental_marking()->Step(
        bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1398 1399 1400 1401 1402 1403 1404 1405
    top_on_previous_step_ = to_space_.page_low();
    return AllocateRaw(size_in_bytes);
  } else {
    return Failure::RetryAfterGC();
  }
}


1406
#ifdef VERIFY_HEAP
1407
// We do not use the SemiSpaceIterator because verification doesn't assume
1408 1409 1410 1411 1412 1413 1414
// that it works (it depends on the invariants we are checking).
void NewSpace::Verify() {
  // The allocation pointer should be in the space or at the very end.
  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);

  // There should be objects packed in from the low address up to the
  // allocation pointer.
1415
  Address current = to_space_.first_page()->area_start();
1416
  CHECK_EQ(current, to_space_.space_start());
1417

1418 1419 1420 1421 1422
  while (current != top()) {
    if (!NewSpacePage::IsAtEnd(current)) {
      // The allocation pointer should not be in the middle of an object.
      CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
            current < top());
1423

1424
      HeapObject* object = HeapObject::FromAddress(current);
1425

1426 1427 1428 1429 1430
      // The first word should be a map, and we expect all map pointers to
      // be in map space.
      Map* map = object->map();
      CHECK(map->IsMap());
      CHECK(heap()->map_space()->Contains(map));
1431

1432 1433 1434
      // The object should not be code or a map.
      CHECK(!object->IsMap());
      CHECK(!object->IsCode());
1435

1436 1437
      // The object itself should look OK.
      object->Verify();
1438

1439 1440 1441 1442
      // All the interior pointers should be contained in the heap.
      VerifyPointersVisitor visitor;
      int size = object->Size();
      object->IterateBody(map->instance_type(), size, &visitor);
1443

1444 1445 1446 1447 1448 1449
      current += size;
    } else {
      // At end of page, switch to next page.
      NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
      // Next page should be valid.
      CHECK(!page->is_anchor());
1450
      current = page->area_start();
1451
    }
1452 1453
  }

1454
  // Check semi-spaces.
1455 1456
  CHECK_EQ(from_space_.id(), kFromSpace);
  CHECK_EQ(to_space_.id(), kToSpace);
1457 1458
  from_space_.Verify();
  to_space_.Verify();
1459
}
1460
#endif
1461

1462 1463 1464
// -----------------------------------------------------------------------------
// SemiSpace implementation

1465
void SemiSpace::SetUp(Address start,
1466 1467 1468 1469 1470 1471 1472 1473
                      int initial_capacity,
                      int maximum_capacity) {
  // Creates a space in the young generation. The constructor does not
  // allocate memory from the OS.  A SemiSpace is given a contiguous chunk of
  // memory of size 'capacity' when set up, and does not grow or shrink
  // otherwise.  In the mark-compact collector, the memory region of the from
  // space is used as the marking stack. It requires contiguous memory
  // addresses.
1474 1475
  ASSERT(maximum_capacity >= Page::kPageSize);
  initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1476
  capacity_ = initial_capacity;
1477
  maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1478
  committed_ = false;
1479
  start_ = start;
1480
  address_mask_ = ~(maximum_capacity - 1);
1481
  object_mask_ = address_mask_ | kHeapObjectTagMask;
1482
  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1483 1484 1485 1486 1487 1488 1489 1490 1491 1492
  age_mark_ = start_;
}


void SemiSpace::TearDown() {
  start_ = NULL;
  capacity_ = 0;
}


1493 1494 1495 1496 1497 1498 1499 1500
bool SemiSpace::Commit() {
  ASSERT(!is_committed());
  int pages = capacity_ / Page::kPageSize;
  Address end = start_ + maximum_capacity_;
  Address start = end - pages * Page::kPageSize;
  if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
                                                          capacity_,
                                                          executable())) {
1501 1502
    return false;
  }
1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513

  NewSpacePage* page = anchor();
  for (int i = 1; i <= pages; i++) {
    NewSpacePage* new_page =
      NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
    new_page->InsertAfter(page);
    page = new_page;
  }

  committed_ = true;
  Reset();
1514 1515 1516 1517
  return true;
}


1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
bool SemiSpace::Uncommit() {
  ASSERT(is_committed());
  Address start = start_ + maximum_capacity_ - capacity_;
  if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
    return false;
  }
  anchor()->set_next_page(anchor());
  anchor()->set_prev_page(anchor());

  committed_ = false;
  return true;
}


1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542
size_t SemiSpace::CommittedPhysicalMemory() {
  if (!is_committed()) return 0;
  size_t size = 0;
  NewSpacePageIterator it(this);
  while (it.has_next()) {
    size += it.next()->CommittedPhysicalMemory();
  }
  return size;
}


1543
bool SemiSpace::GrowTo(int new_capacity) {
1544 1545 1546
  if (!is_committed()) {
    if (!Commit()) return false;
  }
1547
  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1548 1549
  ASSERT(new_capacity <= maximum_capacity_);
  ASSERT(new_capacity > capacity_);
1550 1551 1552 1553 1554
  int pages_before = capacity_ / Page::kPageSize;
  int pages_after = new_capacity / Page::kPageSize;

  Address end = start_ + maximum_capacity_;
  Address start = end - new_capacity;
1555
  size_t delta = new_capacity - capacity_;
1556

1557
  ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1558
  if (!heap()->isolate()->memory_allocator()->CommitBlock(
1559
      start, delta, executable())) {
1560 1561 1562
    return false;
  }
  capacity_ = new_capacity;
1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576
  NewSpacePage* last_page = anchor()->prev_page();
  ASSERT(last_page != anchor());
  for (int i = pages_before + 1; i <= pages_after; i++) {
    Address page_address = end - i * Page::kPageSize;
    NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
                                                      page_address,
                                                      this);
    new_page->InsertAfter(last_page);
    Bitmap::Clear(new_page);
    // Duplicate the flags that was set on the old page.
    new_page->SetFlags(last_page->GetFlags(),
                       NewSpacePage::kCopyOnFlipFlagsMask);
    last_page = new_page;
  }
1577 1578 1579 1580 1581
  return true;
}


bool SemiSpace::ShrinkTo(int new_capacity) {
1582
  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1583 1584
  ASSERT(new_capacity >= initial_capacity_);
  ASSERT(new_capacity < capacity_);
1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604
  if (is_committed()) {
    // Semispaces grow backwards from the end of their allocated capacity,
    // so we find the before and after start addresses relative to the
    // end of the space.
    Address space_end = start_ + maximum_capacity_;
    Address old_start = space_end - capacity_;
    size_t delta = capacity_ - new_capacity;
    ASSERT(IsAligned(delta, OS::AllocateAlignment()));

    MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
    if (!allocator->UncommitBlock(old_start, delta)) {
      return false;
    }

    int pages_after = new_capacity / Page::kPageSize;
    NewSpacePage* new_last_page =
        NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
    new_last_page->set_next_page(anchor());
    anchor()->set_prev_page(new_last_page);
    ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
1605
  }
1606

1607
  capacity_ = new_capacity;
1608

1609 1610 1611 1612
  return true;
}


1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680
void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
  anchor_.set_owner(this);
  // Fixup back-pointers to anchor. Address of anchor changes
  // when we swap.
  anchor_.prev_page()->set_next_page(&anchor_);
  anchor_.next_page()->set_prev_page(&anchor_);

  bool becomes_to_space = (id_ == kFromSpace);
  id_ = becomes_to_space ? kToSpace : kFromSpace;
  NewSpacePage* page = anchor_.next_page();
  while (page != &anchor_) {
    page->set_owner(this);
    page->SetFlags(flags, mask);
    if (becomes_to_space) {
      page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
      page->SetFlag(MemoryChunk::IN_TO_SPACE);
      page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
      page->ResetLiveBytes();
    } else {
      page->SetFlag(MemoryChunk::IN_FROM_SPACE);
      page->ClearFlag(MemoryChunk::IN_TO_SPACE);
    }
    ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
    ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
           page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
    page = page->next_page();
  }
}


void SemiSpace::Reset() {
  ASSERT(anchor_.next_page() != &anchor_);
  current_page_ = anchor_.next_page();
}


void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
  // We won't be swapping semispaces without data in them.
  ASSERT(from->anchor_.next_page() != &from->anchor_);
  ASSERT(to->anchor_.next_page() != &to->anchor_);

  // Swap bits.
  SemiSpace tmp = *from;
  *from = *to;
  *to = tmp;

  // Fixup back-pointers to the page list anchor now that its address
  // has changed.
  // Swap to/from-space bits on pages.
  // Copy GC flags from old active space (from-space) to new (to-space).
  intptr_t flags = from->current_page()->GetFlags();
  to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);

  from->FlipPages(0, 0);
}


void SemiSpace::set_age_mark(Address mark) {
  ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
  age_mark_ = mark;
  // Mark all pages up to the one containing mark.
  NewSpacePageIterator it(space_start(), mark);
  while (it.has_next()) {
    it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
  }
}


1681 1682
#ifdef DEBUG
void SemiSpace::Print() { }
1683
#endif
1684

1685
#ifdef VERIFY_HEAP
1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714
void SemiSpace::Verify() {
  bool is_from_space = (id_ == kFromSpace);
  NewSpacePage* page = anchor_.next_page();
  CHECK(anchor_.semi_space() == this);
  while (page != &anchor_) {
    CHECK(page->semi_space() == this);
    CHECK(page->InNewSpace());
    CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
                                        : MemoryChunk::IN_TO_SPACE));
    CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
                                         : MemoryChunk::IN_FROM_SPACE));
    CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
    if (!is_from_space) {
      // The pointers-from-here-are-interesting flag isn't updated dynamically
      // on from-space pages, so it might be out of sync with the marking state.
      if (page->heap()->incremental_marking()->IsMarking()) {
        CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
      } else {
        CHECK(!page->IsFlagSet(
            MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
      }
      // TODO(gc): Check that the live_bytes_count_ field matches the
      // black marking on the page (if we make it match in new-space).
    }
    CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
    CHECK(page->prev_page()->next_page() == page);
    page = page->next_page();
  }
}
1715
#endif
1716

1717
#ifdef DEBUG
1718 1719
void SemiSpace::AssertValidRange(Address start, Address end) {
  // Addresses belong to same semi-space
1720
  NewSpacePage* page = NewSpacePage::FromLimit(start);
1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735
  NewSpacePage* end_page = NewSpacePage::FromLimit(end);
  SemiSpace* space = page->semi_space();
  CHECK_EQ(space, end_page->semi_space());
  // Start address is before end address, either on same page,
  // or end address is on a later page in the linked list of
  // semi-space pages.
  if (page == end_page) {
    CHECK(start <= end);
  } else {
    while (page != end_page) {
      page = page->next_page();
      CHECK_NE(page, space->anchor());
    }
  }
}
1736 1737 1738 1739 1740 1741
#endif


// -----------------------------------------------------------------------------
// SemiSpaceIterator implementation.
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1742
  Initialize(space->bottom(), space->top(), NULL);
1743 1744 1745 1746 1747
}


SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
                                     HeapObjectCallback size_func) {
1748
  Initialize(space->bottom(), space->top(), size_func);
1749 1750 1751 1752
}


SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1753
  Initialize(start, space->top(), NULL);
1754 1755 1756
}


1757 1758 1759 1760 1761 1762
SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
  Initialize(from, to, NULL);
}


void SemiSpaceIterator::Initialize(Address start,
1763 1764
                                   Address end,
                                   HeapObjectCallback size_func) {
1765
  SemiSpace::AssertValidRange(start, end);
1766 1767 1768 1769 1770 1771 1772 1773 1774
  current_ = start;
  limit_ = end;
  size_func_ = size_func;
}


#ifdef DEBUG
// heap_histograms is shared, always clear it before using it.
static void ClearHistograms() {
1775
  Isolate* isolate = Isolate::Current();
1776
  // We reset the name each time, though it hasn't changed.
1777
#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1778 1779 1780
  INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
#undef DEF_TYPE_NAME

1781
#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1782 1783 1784
  INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
#undef CLEAR_HISTOGRAM

1785
  isolate->js_spill_information()->Clear();
1786 1787 1788 1789
}


static void ClearCodeKindStatistics() {
1790
  Isolate* isolate = Isolate::Current();
1791
  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1792
    isolate->code_kind_statistics()[i] = 0;
1793 1794 1795 1796 1797
  }
}


static void ReportCodeKindStatistics() {
1798
  Isolate* isolate = Isolate::Current();
1799
  const char* table[Code::NUMBER_OF_KINDS] = { NULL };
1800 1801 1802 1803 1804 1805 1806 1807

#define CASE(name)                            \
  case Code::name: table[Code::name] = #name; \
  break

  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
    switch (static_cast<Code::Kind>(i)) {
      CASE(FUNCTION);
1808
      CASE(OPTIMIZED_FUNCTION);
1809 1810 1811 1812 1813 1814 1815
      CASE(STUB);
      CASE(BUILTIN);
      CASE(LOAD_IC);
      CASE(KEYED_LOAD_IC);
      CASE(STORE_IC);
      CASE(KEYED_STORE_IC);
      CASE(CALL_IC);
1816
      CASE(KEYED_CALL_IC);
1817 1818
      CASE(UNARY_OP_IC);
      CASE(BINARY_OP_IC);
1819
      CASE(COMPARE_IC);
1820
      CASE(COMPARE_NIL_IC);
1821
      CASE(TO_BOOLEAN_IC);
1822 1823 1824 1825 1826 1827 1828
    }
  }

#undef CASE

  PrintF("\n   Code kind histograms: \n");
  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1829 1830 1831
    if (isolate->code_kind_statistics()[i] > 0) {
      PrintF("     %-20s: %10d bytes\n", table[i],
          isolate->code_kind_statistics()[i]);
1832 1833 1834 1835 1836 1837 1838
    }
  }
  PrintF("\n");
}


static int CollectHistogramInfo(HeapObject* obj) {
1839
  Isolate* isolate = Isolate::Current();
1840 1841
  InstanceType type = obj->map()->instance_type();
  ASSERT(0 <= type && type <= LAST_TYPE);
1842 1843 1844
  ASSERT(isolate->heap_histograms()[type].name() != NULL);
  isolate->heap_histograms()[type].increment_number(1);
  isolate->heap_histograms()[type].increment_bytes(obj->Size());
1845 1846

  if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1847 1848
    JSObject::cast(obj)->IncrementSpillStatistics(
        isolate->js_spill_information());
1849 1850 1851 1852 1853 1854 1855
  }

  return obj->Size();
}


static void ReportHistogram(bool print_spill) {
1856
  Isolate* isolate = Isolate::Current();
1857 1858
  PrintF("\n  Object Histogram:\n");
  for (int i = 0; i <= LAST_TYPE; i++) {
1859
    if (isolate->heap_histograms()[i].number() > 0) {
1860
      PrintF("    %-34s%10d (%10d bytes)\n",
1861 1862 1863
             isolate->heap_histograms()[i].name(),
             isolate->heap_histograms()[i].number(),
             isolate->heap_histograms()[i].bytes());
1864 1865 1866 1867 1868 1869 1870
    }
  }
  PrintF("\n");

  // Summarize string types.
  int string_number = 0;
  int string_bytes = 0;
1871
#define INCREMENT(type, size, name, camel_name)      \
1872 1873
    string_number += isolate->heap_histograms()[type].number(); \
    string_bytes += isolate->heap_histograms()[type].bytes();
1874 1875 1876
  STRING_TYPE_LIST(INCREMENT)
#undef INCREMENT
  if (string_number > 0) {
1877
    PrintF("    %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1878 1879 1880 1881
           string_bytes);
  }

  if (FLAG_collect_heap_spill_statistics && print_spill) {
1882
    isolate->js_spill_information()->Print();
1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897
  }
}
#endif  // DEBUG


// Support for statistics gathering for --heap-stats and --log-gc.
void NewSpace::ClearHistograms() {
  for (int i = 0; i <= LAST_TYPE; i++) {
    allocated_histogram_[i].clear();
    promoted_histogram_[i].clear();
  }
}

// Because the copying collector does not touch garbage objects, we iterate
// the new space before a collection to get a histogram of allocated objects.
1898
// This only happens when --log-gc flag is set.
1899 1900 1901
void NewSpace::CollectStatistics() {
  ClearHistograms();
  SemiSpaceIterator it(this);
1902
  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1903
    RecordAllocation(obj);
1904 1905 1906
}


1907 1908 1909
static void DoReportStatistics(Isolate* isolate,
                               HistogramInfo* info, const char* description) {
  LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1910 1911 1912
  // Lump all the string types together.
  int string_number = 0;
  int string_bytes = 0;
1913 1914
#define INCREMENT(type, size, name, camel_name)       \
    string_number += info[type].number();             \
1915 1916 1917 1918
    string_bytes += info[type].bytes();
  STRING_TYPE_LIST(INCREMENT)
#undef INCREMENT
  if (string_number > 0) {
1919 1920
    LOG(isolate,
        HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1921 1922 1923 1924 1925
  }

  // Then do the other types.
  for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
    if (info[i].number() > 0) {
1926 1927
      LOG(isolate,
          HeapSampleItemEvent(info[i].name(), info[i].number(),
1928 1929 1930
                              info[i].bytes()));
    }
  }
1931
  LOG(isolate, HeapSampleEndEvent("NewSpace", description));
1932 1933 1934 1935 1936 1937 1938
}


void NewSpace::ReportStatistics() {
#ifdef DEBUG
  if (FLAG_heap_stats) {
    float pct = static_cast<float>(Available()) / Capacity();
1939 1940
    PrintF("  capacity: %" V8_PTR_PREFIX "d"
               ", available: %" V8_PTR_PREFIX "d, %%%d\n",
1941 1942 1943 1944
           Capacity(), Available(), static_cast<int>(pct*100));
    PrintF("\n  Object Histogram:\n");
    for (int i = 0; i <= LAST_TYPE; i++) {
      if (allocated_histogram_[i].number() > 0) {
1945
        PrintF("    %-34s%10d (%10d bytes)\n",
1946 1947 1948 1949 1950 1951 1952 1953 1954 1955
               allocated_histogram_[i].name(),
               allocated_histogram_[i].number(),
               allocated_histogram_[i].bytes());
      }
    }
    PrintF("\n");
  }
#endif  // DEBUG

  if (FLAG_log_gc) {
1956 1957 1958
    Isolate* isolate = ISOLATE;
    DoReportStatistics(isolate, allocated_histogram_, "allocated");
    DoReportStatistics(isolate, promoted_histogram_, "promoted");
1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977
  }
}


void NewSpace::RecordAllocation(HeapObject* obj) {
  InstanceType type = obj->map()->instance_type();
  ASSERT(0 <= type && type <= LAST_TYPE);
  allocated_histogram_[type].increment_number(1);
  allocated_histogram_[type].increment_bytes(obj->Size());
}


void NewSpace::RecordPromotion(HeapObject* obj) {
  InstanceType type = obj->map()->instance_type();
  ASSERT(0 <= type && type <= LAST_TYPE);
  promoted_histogram_[type].increment_number(1);
  promoted_histogram_[type].increment_bytes(obj->Size());
}

1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988

size_t NewSpace::CommittedPhysicalMemory() {
  if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
  MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
  size_t size = to_space_.CommittedPhysicalMemory();
  if (from_space_.is_committed()) {
    size += from_space_.CommittedPhysicalMemory();
  }
  return size;
}

1989 1990 1991
// -----------------------------------------------------------------------------
// Free lists for old object spaces implementation

1992
void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
1993 1994 1995 1996
  ASSERT(size_in_bytes > 0);
  ASSERT(IsAligned(size_in_bytes, kPointerSize));

  // We write a map and possibly size information to the block.  If the block
1997 1998
  // is big enough to be a FreeSpace with at least one extra word (the next
  // pointer), we set its map to be the free space map and its size to an
1999
  // appropriate array length for the desired size from HeapObject::Size().
2000
  // If the block is too small (eg, one or two words), to hold both a size
2001 2002
  // field and a next pointer, we give it a filler map that gives it the
  // correct size.
2003
  if (size_in_bytes > FreeSpace::kHeaderSize) {
2004
    set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
2005 2006 2007
    // Can't use FreeSpace::cast because it fails during deserialization.
    FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
    this_as_free_space->set_size(size_in_bytes);
2008
  } else if (size_in_bytes == kPointerSize) {
2009
    set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
2010
  } else if (size_in_bytes == 2 * kPointerSize) {
2011
    set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
2012
  } else {
2013 2014 2015 2016 2017 2018 2019 2020 2021
    UNREACHABLE();
  }
  // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
  // deserialization because the free space map is not done yet.
}


FreeListNode* FreeListNode::next() {
  ASSERT(IsFreeListNode(this));
2022
  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2023 2024 2025 2026 2027 2028
    ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
    return reinterpret_cast<FreeListNode*>(
        Memory::Address_at(address() + kNextOffset));
  } else {
    return reinterpret_cast<FreeListNode*>(
        Memory::Address_at(address() + kPointerSize));
2029 2030 2031 2032
  }
}


2033
FreeListNode** FreeListNode::next_address() {
2034
  ASSERT(IsFreeListNode(this));
2035
  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2036
    ASSERT(Size() >= kNextOffset + kPointerSize);
2037
    return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
2038
  } else {
2039
    return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
2040
  }
2041 2042 2043
}


2044
void FreeListNode::set_next(FreeListNode* next) {
2045
  ASSERT(IsFreeListNode(this));
2046 2047 2048
  // While we are booting the VM the free space map will actually be null.  So
  // we have to make sure that we don't try to use it for anything at that
  // stage.
2049
  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2050 2051 2052
    ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
    Memory::Address_at(address() + kNextOffset) =
        reinterpret_cast<Address>(next);
2053
  } else {
2054 2055
    Memory::Address_at(address() + kPointerSize) =
        reinterpret_cast<Address>(next);
2056
  }
2057 2058 2059
}


2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082
intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
  intptr_t free_bytes = 0;
  if (category->top_ != NULL) {
    ASSERT(category->end_ != NULL);
    // This is safe (not going to deadlock) since Concatenate operations
    // are never performed on the same free lists at the same time in
    // reverse order.
    ScopedLock lock_target(mutex_);
    ScopedLock lock_source(category->mutex());
    free_bytes = category->available();
    if (end_ == NULL) {
      end_ = category->end();
    } else {
      category->end()->set_next(top_);
    }
    top_ = category->top();
    available_ += category->available();
    category->Reset();
  }
  return free_bytes;
}


2083 2084 2085 2086 2087 2088 2089 2090
void FreeListCategory::Reset() {
  top_ = NULL;
  end_ = NULL;
  available_ = 0;
}


intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
2091
  int sum = 0;
2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160
  FreeListNode** n = &top_;
  while (*n != NULL) {
    if (Page::FromAddress((*n)->address()) == p) {
      FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
      sum += free_space->Size();
      *n = (*n)->next();
    } else {
      n = (*n)->next_address();
    }
  }
  if (top_ == NULL) {
    end_ = NULL;
  }
  available_ -= sum;
  return sum;
}


FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
  FreeListNode* node = top_;

  if (node == NULL) return NULL;

  while (node != NULL &&
         Page::FromAddress(node->address())->IsEvacuationCandidate()) {
    available_ -= node->Size();
    node = node->next();
  }

  if (node != NULL) {
    set_top(node->next());
    *node_size = node->Size();
    available_ -= *node_size;
  } else {
    set_top(NULL);
  }

  if (top() == NULL) {
    set_end(NULL);
  }

  return node;
}


void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
  node->set_next(top_);
  top_ = node;
  if (end_ == NULL) {
    end_ = node;
  }
  available_ += size_in_bytes;
}


void FreeListCategory::RepairFreeList(Heap* heap) {
  FreeListNode* n = top_;
  while (n != NULL) {
    Map** map_location = reinterpret_cast<Map**>(n->address());
    if (*map_location == NULL) {
      *map_location = heap->free_space_map();
    } else {
      ASSERT(*map_location == heap->free_space_map());
    }
    n = n->next();
  }
}


2161 2162
FreeList::FreeList(PagedSpace* owner)
    : owner_(owner), heap_(owner->heap()) {
2163 2164 2165 2166
  Reset();
}


2167 2168 2169 2170 2171 2172 2173 2174 2175 2176
intptr_t FreeList::Concatenate(FreeList* free_list) {
  intptr_t free_bytes = 0;
  free_bytes += small_list_.Concatenate(free_list->small_list());
  free_bytes += medium_list_.Concatenate(free_list->medium_list());
  free_bytes += large_list_.Concatenate(free_list->large_list());
  free_bytes += huge_list_.Concatenate(free_list->huge_list());
  return free_bytes;
}


2177
void FreeList::Reset() {
2178 2179 2180 2181
  small_list_.Reset();
  medium_list_.Reset();
  large_list_.Reset();
  huge_list_.Reset();
2182 2183 2184 2185 2186
}


int FreeList::Free(Address start, int size_in_bytes) {
  if (size_in_bytes == 0) return 0;
2187

2188
  FreeListNode* node = FreeListNode::FromAddress(start);
2189
  node->set_size(heap_, size_in_bytes);
2190
  Page* page = Page::FromAddress(start);
2191

2192
  // Early return to drop too-small blocks on the floor.
2193 2194 2195 2196
  if (size_in_bytes < kSmallListMin) {
    page->add_non_available_small_blocks(size_in_bytes);
    return size_in_bytes;
  }
2197 2198 2199 2200

  // Insert other blocks at the head of a free list of the appropriate
  // magnitude.
  if (size_in_bytes <= kSmallListMax) {
2201
    small_list_.Free(node, size_in_bytes);
2202
    page->add_available_in_small_free_list(size_in_bytes);
2203
  } else if (size_in_bytes <= kMediumListMax) {
2204
    medium_list_.Free(node, size_in_bytes);
2205
    page->add_available_in_medium_free_list(size_in_bytes);
2206
  } else if (size_in_bytes <= kLargeListMax) {
2207
    large_list_.Free(node, size_in_bytes);
2208
    page->add_available_in_large_free_list(size_in_bytes);
2209
  } else {
2210
    huge_list_.Free(node, size_in_bytes);
2211
    page->add_available_in_huge_free_list(size_in_bytes);
2212
  }
2213

2214 2215
  ASSERT(IsVeryLong() || available() == SumFreeLists());
  return 0;
2216 2217 2218
}


2219
FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2220
  FreeListNode* node = NULL;
2221
  Page* page = NULL;
2222

2223
  if (size_in_bytes <= kSmallAllocationMax) {
2224
    node = small_list_.PickNodeFromList(node_size);
2225 2226 2227 2228 2229
    if (node != NULL) {
      page = Page::FromAddress(node->address());
      page->add_available_in_small_free_list(-(*node_size));
      return node;
    }
2230
  }
2231

2232
  if (size_in_bytes <= kMediumAllocationMax) {
2233
    node = medium_list_.PickNodeFromList(node_size);
2234 2235 2236 2237 2238
    if (node != NULL) {
      page = Page::FromAddress(node->address());
      page->add_available_in_medium_free_list(-(*node_size));
      return node;
    }
2239
  }
2240

2241
  if (size_in_bytes <= kLargeAllocationMax) {
2242
    node = large_list_.PickNodeFromList(node_size);
2243 2244 2245 2246 2247
    if (node != NULL) {
      page = Page::FromAddress(node->address());
      page->add_available_in_large_free_list(-(*node_size));
      return node;
    }
2248
  }
2249

2250 2251
  int huge_list_available = huge_list_.available();
  for (FreeListNode** cur = huge_list_.GetTopAddress();
2252
       *cur != NULL;
2253 2254 2255 2256
       cur = (*cur)->next_address()) {
    FreeListNode* cur_node = *cur;
    while (cur_node != NULL &&
           Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
2257 2258 2259 2260
      int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
      huge_list_available -= size;
      page = Page::FromAddress(cur_node->address());
      page->add_available_in_huge_free_list(-size);
2261 2262 2263 2264
      cur_node = cur_node->next();
    }

    *cur = cur_node;
2265 2266 2267 2268
    if (cur_node == NULL) {
      huge_list_.set_end(NULL);
      break;
    }
2269

2270
    ASSERT((*cur)->map() == heap_->raw_unchecked_free_space_map());
2271 2272 2273 2274 2275 2276
    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
    int size = cur_as_free_space->Size();
    if (size >= size_in_bytes) {
      // Large enough node found.  Unlink it from the list.
      node = *cur;
      *cur = node->next();
2277 2278
      *node_size = size;
      huge_list_available -= size;
2279 2280
      page = Page::FromAddress(node->address());
      page->add_available_in_huge_free_list(-size);
2281 2282
      break;
    }
2283
  }
2284

2285 2286 2287 2288 2289 2290 2291
  if (huge_list_.top() == NULL) {
    huge_list_.set_end(NULL);
  }

  huge_list_.set_available(huge_list_available);
  ASSERT(IsVeryLong() || available() == SumFreeLists());

2292
  return node;
2293 2294 2295
}


2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310
// Allocation on the old space free list.  If it succeeds then a new linear
// allocation space has been set up with the top and limit of the space.  If
// the allocation fails then NULL is returned, and the caller can perform a GC
// or allocate a new page before retrying.
HeapObject* FreeList::Allocate(int size_in_bytes) {
  ASSERT(0 < size_in_bytes);
  ASSERT(size_in_bytes <= kMaxBlockSize);
  ASSERT(IsAligned(size_in_bytes, kPointerSize));
  // Don't free list allocate if there is linear space available.
  ASSERT(owner_->limit() - owner_->top() < size_in_bytes);

  int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
  // Mark the old linear allocation area with a free space map so it can be
  // skipped when scanning the heap.  This also puts it back in the free list
  // if it is big enough.
2311
  owner_->Free(owner_->top(), old_linear_size);
2312

2313 2314 2315
  owner_->heap()->incremental_marking()->OldSpaceStep(
      size_in_bytes - old_linear_size);

2316 2317 2318 2319 2320 2321 2322 2323 2324 2325
  int new_node_size = 0;
  FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
  if (new_node == NULL) {
    owner_->SetTop(NULL, NULL);
    return NULL;
  }

  int bytes_left = new_node_size - size_in_bytes;
  ASSERT(bytes_left >= 0);

2326 2327
#ifdef DEBUG
  for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2328 2329
    reinterpret_cast<Object**>(new_node->address())[i] =
        Smi::FromInt(kCodeZapValue);
2330 2331 2332
  }
#endif

2333 2334 2335 2336 2337
  // The old-space-step might have finished sweeping and restarted marking.
  // Verify that it did not turn the page of the new node into an evacuation
  // candidate.
  ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));

2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350
  const int kThreshold = IncrementalMarking::kAllocatedThreshold;

  // Memory in the linear allocation area is counted as allocated.  We may free
  // a little of this again immediately - see below.
  owner_->Allocate(new_node_size);

  if (bytes_left > kThreshold &&
      owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
      FLAG_incremental_marking_steps) {
    int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
    // We don't want to give too large linear areas to the allocator while
    // incremental marking is going on, because we won't check again whether
    // we want to do another increment until the linear area is used up.
2351 2352
    owner_->Free(new_node->address() + size_in_bytes + linear_size,
                 new_node_size - size_in_bytes - linear_size);
2353 2354 2355 2356 2357 2358 2359
    owner_->SetTop(new_node->address() + size_in_bytes,
                   new_node->address() + size_in_bytes + linear_size);
  } else if (bytes_left > 0) {
    // Normally we give the rest of the node to the allocator as its new
    // linear allocation area.
    owner_->SetTop(new_node->address() + size_in_bytes,
                   new_node->address() + new_node_size);
2360
  } else {
2361 2362 2363
    // TODO(gc) Try not freeing linear allocation region when bytes_left
    // are zero.
    owner_->SetTop(NULL, NULL);
2364
  }
2365 2366

  return new_node;
2367 2368 2369
}


2370
intptr_t FreeList::EvictFreeListItems(Page* p) {
2371
  intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
2372
  p->set_available_in_huge_free_list(0);
2373

2374
  if (sum < p->area_size()) {
2375 2376 2377
    sum += small_list_.EvictFreeListItemsInList(p) +
        medium_list_.EvictFreeListItemsInList(p) +
        large_list_.EvictFreeListItemsInList(p);
2378 2379 2380
    p->set_available_in_small_free_list(0);
    p->set_available_in_medium_free_list(0);
    p->set_available_in_large_free_list(0);
2381 2382 2383 2384 2385 2386
  }

  return sum;
}


2387 2388 2389 2390 2391 2392 2393 2394
void FreeList::RepairLists(Heap* heap) {
  small_list_.RepairFreeList(heap);
  medium_list_.RepairFreeList(heap);
  large_list_.RepairFreeList(heap);
  huge_list_.RepairFreeList(heap);
}


2395
#ifdef DEBUG
2396
intptr_t FreeListCategory::SumFreeList() {
2397
  intptr_t sum = 0;
2398
  FreeListNode* cur = top_;
2399
  while (cur != NULL) {
2400
    ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
2401 2402 2403
    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
    sum += cur_as_free_space->Size();
    cur = cur->next();
2404
  }
2405
  return sum;
2406 2407 2408
}


2409
static const int kVeryLongFreeList = 500;
2410 2411


2412
int FreeListCategory::FreeListLength() {
2413
  int length = 0;
2414
  FreeListNode* cur = top_;
2415 2416 2417 2418 2419 2420
  while (cur != NULL) {
    length++;
    cur = cur->next();
    if (length == kVeryLongFreeList) return length;
  }
  return length;
2421 2422 2423
}


2424
bool FreeList::IsVeryLong() {
2425 2426 2427 2428
  if (small_list_.FreeListLength() == kVeryLongFreeList) return  true;
  if (medium_list_.FreeListLength() == kVeryLongFreeList) return  true;
  if (large_list_.FreeListLength() == kVeryLongFreeList) return  true;
  if (huge_list_.FreeListLength() == kVeryLongFreeList) return  true;
2429 2430
  return false;
}
2431 2432


2433 2434 2435 2436
// This can take a very long time because it is linear in the number of entries
// on the free list, so it should not be called if FreeListLength returns
// kVeryLongFreeList.
intptr_t FreeList::SumFreeLists() {
2437 2438 2439 2440
  intptr_t sum = small_list_.SumFreeList();
  sum += medium_list_.SumFreeList();
  sum += large_list_.SumFreeList();
  sum += huge_list_.SumFreeList();
2441
  return sum;
2442
}
2443
#endif
2444 2445


2446 2447 2448
// -----------------------------------------------------------------------------
// OldSpace implementation

2449 2450
bool NewSpace::ReserveSpace(int bytes) {
  // We can't reliably unpack a partial snapshot that needs more new space
2451 2452 2453 2454 2455
  // space than the minimum NewSpace size.  The limit can be set lower than
  // the end of new space either because there is more space on the next page
  // or because we have lowered the limit in order to get periodic incremental
  // marking.  The most reliable way to ensure that there is linear space is
  // to do the allocation, then rewind the limit.
2456
  ASSERT(bytes <= InitialCapacity());
2457
  MaybeObject* maybe = AllocateRaw(bytes);
2458 2459 2460
  Object* object = NULL;
  if (!maybe->ToObject(&object)) return false;
  HeapObject* allocation = HeapObject::cast(object);
2461
  Address top = allocation_info_.top;
2462
  if ((top - bytes) == allocation->address()) {
2463
    allocation_info_.top = allocation->address();
2464 2465 2466 2467 2468
    return true;
  }
  // There may be a borderline case here where the allocation succeeded, but
  // the limit and top have moved on to a new page.  In that case we try again.
  return ReserveSpace(bytes);
2469 2470 2471
}


2472 2473 2474 2475 2476 2477
void PagedSpace::PrepareForMarkCompact() {
  // We don't have a linear allocation area while sweeping.  It will be restored
  // on the first allocation after the sweep.
  // Mark the old linear allocation area with a free space map so it can be
  // skipped when scanning the heap.
  int old_linear_size = static_cast<int>(limit() - top());
2478
  Free(top(), old_linear_size);
2479 2480 2481 2482 2483 2484
  SetTop(NULL, NULL);

  // Stop lazy sweeping and clear marking bits for unswept pages.
  if (first_unswept_page_ != NULL) {
    Page* p = first_unswept_page_;
    do {
2485 2486 2487 2488
      // Do not use ShouldBeSweptLazily predicate here.
      // New evacuation candidates were selected but they still have
      // to be swept before collection starts.
      if (!p->WasSwept()) {
2489 2490 2491 2492 2493 2494 2495
        Bitmap::Clear(p);
        if (FLAG_gc_verbose) {
          PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
                 reinterpret_cast<intptr_t>(p));
        }
      }
      p = p->next_page();
2496
    } while (p != anchor());
2497
  }
2498
  first_unswept_page_ = Page::FromAddress(NULL);
2499
  unswept_free_bytes_ = 0;
2500

2501 2502 2503
  // Clear the free list before a full GC---it will be rebuilt afterward.
  free_list_.Reset();
}
2504 2505


2506
bool PagedSpace::ReserveSpace(int size_in_bytes) {
2507
  ASSERT(size_in_bytes <= AreaSize());
2508 2509 2510 2511
  ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
  Address current_top = allocation_info_.top;
  Address new_top = current_top + size_in_bytes;
  if (new_top <= allocation_info_.limit) return true;
2512

2513 2514 2515
  HeapObject* new_area = free_list_.Allocate(size_in_bytes);
  if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
  if (new_area == NULL) return false;
2516

2517 2518 2519 2520
  int old_linear_size = static_cast<int>(limit() - top());
  // Mark the old linear allocation area with a free space so it can be
  // skipped when scanning the heap.  This also puts it back in the free list
  // if it is big enough.
2521
  Free(top(), old_linear_size);
2522

2523 2524 2525
  SetTop(new_area->address(), new_area->address() + size_in_bytes);
  return true;
}
2526

2527

2528 2529 2530 2531 2532 2533
intptr_t PagedSpace::SizeOfObjects() {
  ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0));
  return Size() - unswept_free_bytes_ - (limit() - top());
}


2534 2535 2536 2537 2538 2539 2540 2541 2542
// After we have booted, we have created a map which represents free space
// on the heap.  If there was already a free list then the elements on it
// were created with the wrong FreeSpaceMap (normally NULL), so we need to
// fix them.
void PagedSpace::RepairFreeListsAfterBoot() {
  free_list_.RepairLists(heap());
}


2543 2544 2545
// You have to call this last, since the implementation from PagedSpace
// doesn't know that memory was 'promised' to large object space.
bool LargeObjectSpace::ReserveSpace(int bytes) {
2546 2547 2548
  return heap()->OldGenerationCapacityAvailable() >= bytes &&
         (!heap()->incremental_marking()->IsStopped() ||
           heap()->OldGenerationSpaceAvailable() >= bytes);
2549
}
2550 2551


2552
bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
2553
  if (IsLazySweepingComplete()) return true;
2554

2555 2556 2557 2558 2559 2560 2561 2562
  intptr_t freed_bytes = 0;
  Page* p = first_unswept_page_;
  do {
    Page* next_page = p->next_page();
    if (ShouldBeSweptLazily(p)) {
      if (FLAG_gc_verbose) {
        PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
               reinterpret_cast<intptr_t>(p));
2563
      }
2564
      DecreaseUnsweptFreeBytes(p);
2565 2566 2567 2568
      freed_bytes +=
          MarkCompactCollector::
              SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
                  this, NULL, p);
2569
    }
2570
    p = next_page;
2571
  } while (p != anchor() && freed_bytes < bytes_to_sweep);
2572

2573 2574
  if (p == anchor()) {
    first_unswept_page_ = Page::FromAddress(NULL);
2575 2576
  } else {
    first_unswept_page_ = p;
2577
  }
2578

2579
  heap()->FreeQueuedChunks();
2580

2581
  return IsLazySweepingComplete();
2582 2583 2584
}


2585 2586
void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
  if (allocation_info_.top >= allocation_info_.limit) return;
2587

2588
  if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
2589 2590 2591 2592
    // Create filler object to keep page iterable if it was iterable.
    int remaining =
        static_cast<int>(allocation_info_.limit - allocation_info_.top);
    heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
2593

2594 2595 2596
    allocation_info_.top = NULL;
    allocation_info_.limit = NULL;
  }
2597 2598 2599
}


2600 2601 2602
bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
  MarkCompactCollector* collector = heap()->mark_compact_collector();
  if (collector->AreSweeperThreadsActivated()) {
2603
    if (collector->IsConcurrentSweepingInProgress()) {
2604
      if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
2605 2606 2607 2608
        if (!collector->sequential_sweeping()) {
          collector->WaitUntilSweepingCompleted();
          return true;
        }
2609 2610
      }
      return false;
2611
    }
2612
    return true;
2613 2614 2615 2616 2617 2618
  } else {
    return AdvanceSweeper(size_in_bytes);
  }
}


2619 2620
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
  // Allocation in this space has failed.
2621

2622 2623 2624 2625 2626 2627
  // If there are unswept pages advance lazy sweeper a bounded number of times
  // until we find a size_in_bytes contiguous piece of memory
  const int kMaxSweepingTries = 5;
  bool sweeping_complete = false;

  for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
2628
    sweeping_complete = EnsureSweeperProgress(size_in_bytes);
2629

2630 2631 2632
    // Retry the free list allocation.
    HeapObject* object = free_list_.Allocate(size_in_bytes);
    if (object != NULL) return object;
2633
  }
2634

2635 2636 2637 2638 2639 2640
  // Free list allocation failed and there is no next page.  Fail if we have
  // hit the old generation size limit that should cause a garbage
  // collection.
  if (!heap()->always_allocate() &&
      heap()->OldGenerationAllocationLimitReached()) {
    return NULL;
2641
  }
2642

2643
  // Try to expand the space and allocate in the new next page.
2644
  if (Expand()) {
2645
    return free_list_.Allocate(size_in_bytes);
2646
  }
2647

2648 2649
  // Last ditch, sweep all the remaining pages to try to find space.  This may
  // cause a pause.
2650
  if (!IsLazySweepingComplete()) {
2651
    EnsureSweeperProgress(kMaxInt);
2652 2653 2654 2655 2656 2657

    // Retry the free list allocation.
    HeapObject* object = free_list_.Allocate(size_in_bytes);
    if (object != NULL) return object;
  }

2658 2659
  // Finally, fail.
  return NULL;
2660 2661 2662
}


2663 2664
#ifdef DEBUG
void PagedSpace::ReportCodeStatistics() {
2665 2666 2667
  Isolate* isolate = Isolate::Current();
  CommentStatistic* comments_statistics =
      isolate->paged_space_comments_statistics();
2668 2669 2670
  ReportCodeKindStatistics();
  PrintF("Code comment statistics (\"   [ comment-txt   :    size/   "
         "count  (average)\"):\n");
2671
  for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682
    const CommentStatistic& cs = comments_statistics[i];
    if (cs.size > 0) {
      PrintF("   %-30s: %10d/%6d     (%d)\n", cs.comment, cs.size, cs.count,
             cs.size/cs.count);
    }
  }
  PrintF("\n");
}


void PagedSpace::ResetCodeStatistics() {
2683 2684 2685
  Isolate* isolate = Isolate::Current();
  CommentStatistic* comments_statistics =
      isolate->paged_space_comments_statistics();
2686
  ClearCodeKindStatistics();
2687 2688 2689 2690 2691 2692
  for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
    comments_statistics[i].Clear();
  }
  comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
  comments_statistics[CommentStatistic::kMaxComments].size = 0;
  comments_statistics[CommentStatistic::kMaxComments].count = 0;
2693 2694 2695
}


2696
// Adds comment to 'comment_statistics' table. Performance OK as long as
2697
// 'kMaxComments' is small
2698 2699 2700
static void EnterComment(Isolate* isolate, const char* comment, int delta) {
  CommentStatistic* comments_statistics =
      isolate->paged_space_comments_statistics();
2701 2702
  // Do not count empty comments
  if (delta <= 0) return;
2703
  CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2704 2705
  // Search for a free or matching entry in 'comments_statistics': 'cs'
  // points to result.
2706
  for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723
    if (comments_statistics[i].comment == NULL) {
      cs = &comments_statistics[i];
      cs->comment = comment;
      break;
    } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
      cs = &comments_statistics[i];
      break;
    }
  }
  // Update entry for 'comment'
  cs->size += delta;
  cs->count += 1;
}


// Call for each nested comment start (start marked with '[ xxx', end marked
// with ']'.  RelocIterator 'it' must point to a comment reloc info.
2724
static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2725
  ASSERT(!it->done());
2726
  ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742
  const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
  if (tmp[0] != '[') {
    // Not a nested comment; skip
    return;
  }

  // Search for end of nested comment or a new nested comment
  const char* const comment_txt =
      reinterpret_cast<const char*>(it->rinfo()->data());
  const byte* prev_pc = it->rinfo()->pc();
  int flat_delta = 0;
  it->next();
  while (true) {
    // All nested comments must be terminated properly, and therefore exit
    // from loop.
    ASSERT(!it->done());
2743
    if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2744 2745
      const char* const txt =
          reinterpret_cast<const char*>(it->rinfo()->data());
2746
      flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2747 2748
      if (txt[0] == ']') break;  // End of nested  comment
      // A new comment
2749
      CollectCommentStatistics(isolate, it);
2750 2751 2752 2753 2754
      // Skip code that was covered with previous comment
      prev_pc = it->rinfo()->pc();
    }
    it->next();
  }
2755
  EnterComment(isolate, comment_txt, flat_delta);
2756 2757 2758 2759 2760 2761 2762
}


// Collects code size statistics:
// - by code kind
// - by code comment
void PagedSpace::CollectCodeStatistics() {
2763
  Isolate* isolate = heap()->isolate();
2764
  HeapObjectIterator obj_it(this);
2765
  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2766 2767
    if (obj->IsCode()) {
      Code* code = Code::cast(obj);
2768
      isolate->code_kind_statistics()[code->kind()] += code->Size();
2769 2770 2771 2772
      RelocIterator it(code);
      int delta = 0;
      const byte* prev_pc = code->instruction_start();
      while (!it.done()) {
2773
        if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2774
          delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2775
          CollectCommentStatistics(isolate, &it);
2776 2777 2778 2779 2780 2781
          prev_pc = it.rinfo()->pc();
        }
        it.next();
      }

      ASSERT(code->instruction_start() <= prev_pc &&
2782 2783
             prev_pc <= code->instruction_end());
      delta += static_cast<int>(code->instruction_end() - prev_pc);
2784
      EnterComment(isolate, "NoComment", delta);
2785 2786 2787 2788 2789
    }
  }
}


2790
void PagedSpace::ReportStatistics() {
2791 2792 2793 2794
  int pct = static_cast<int>(Available() * 100 / Capacity());
  PrintF("  capacity: %" V8_PTR_PREFIX "d"
             ", waste: %" V8_PTR_PREFIX "d"
             ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2795 2796
         Capacity(), Waste(), Available(), pct);

2797
  if (was_swept_conservatively_) return;
2798 2799
  ClearHistograms();
  HeapObjectIterator obj_it(this);
2800
  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2801
    CollectHistogramInfo(obj);
2802 2803 2804 2805 2806
  ReportHistogram(true);
}
#endif

// -----------------------------------------------------------------------------
2807
// FixedSpace implementation
2808

2809
void FixedSpace::PrepareForMarkCompact() {
2810
  // Call prepare of the super class.
2811
  PagedSpace::PrepareForMarkCompact();
2812

2813 2814 2815 2816 2817
  // During a non-compacting collection, everything below the linear
  // allocation pointer except wasted top-of-page blocks is considered
  // allocated and we will rediscover available bytes during the
  // collection.
  accounting_stats_.AllocateBytes(free_list_.available());
2818

2819
  // Clear the free list before a full GC---it will be rebuilt afterward.
2820 2821 2822 2823
  free_list_.Reset();
}


2824 2825
// -----------------------------------------------------------------------------
// MapSpace implementation
2826 2827 2828
// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
// there is at least one non-inlined virtual function. I would prefer to hide
// the VerifyObject definition behind VERIFY_HEAP.
2829 2830 2831

void MapSpace::VerifyObject(HeapObject* object) {
  // The object should be a map or a free-list node.
2832
  CHECK(object->IsMap() || object->IsFreeSpace());
2833 2834 2835 2836 2837
}


// -----------------------------------------------------------------------------
// GlobalPropertyCellSpace implementation
2838 2839 2840
// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
// there is at least one non-inlined virtual function. I would prefer to hide
// the VerifyObject definition behind VERIFY_HEAP.
2841 2842 2843

void CellSpace::VerifyObject(HeapObject* object) {
  // The object should be a global object property cell or a free-list node.
2844
  CHECK(object->IsJSGlobalPropertyCell() ||
2845
         object->map() == heap()->two_pointer_filler_map());
2846
}
2847 2848 2849 2850 2851 2852


// -----------------------------------------------------------------------------
// LargeObjectIterator

LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2853
  current_ = space->first_page_;
2854 2855 2856 2857 2858 2859
  size_func_ = NULL;
}


LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
                                         HeapObjectCallback size_func) {
2860
  current_ = space->first_page_;
2861 2862 2863 2864
  size_func_ = size_func;
}


2865
HeapObject* LargeObjectIterator::Next() {
2866 2867
  if (current_ == NULL) return NULL;

2868
  HeapObject* object = current_->GetObject();
2869
  current_ = current_->next_page();
2870 2871 2872 2873 2874 2875
  return object;
}


// -----------------------------------------------------------------------------
// LargeObjectSpace
2876 2877 2878 2879
static bool ComparePointers(void* key1, void* key2) {
    return key1 == key2;
}

2880

2881 2882 2883
LargeObjectSpace::LargeObjectSpace(Heap* heap,
                                   intptr_t max_capacity,
                                   AllocationSpace id)
2884
    : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
2885
      max_capacity_(max_capacity),
2886
      first_page_(NULL),
2887
      size_(0),
2888
      page_count_(0),
2889 2890
      objects_size_(0),
      chunk_map_(ComparePointers, 1024) {}
2891 2892


2893
bool LargeObjectSpace::SetUp() {
2894
  first_page_ = NULL;
2895 2896
  size_ = 0;
  page_count_ = 0;
2897
  objects_size_ = 0;
2898
  chunk_map_.Clear();
2899 2900 2901 2902 2903
  return true;
}


void LargeObjectSpace::TearDown() {
2904 2905 2906 2907 2908 2909 2910 2911 2912
  while (first_page_ != NULL) {
    LargePage* page = first_page_;
    first_page_ = first_page_->next_page();
    LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));

    ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
    heap()->isolate()->memory_allocator()->PerformAllocationCallback(
        space, kAllocationActionFree, page->size());
    heap()->isolate()->memory_allocator()->Free(page);
2913
  }
2914
  SetUp();
2915 2916 2917
}


2918 2919
MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
                                           Executability executable) {
2920 2921
  // Check if we want to force a GC before growing the old space further.
  // If so, fail the allocation.
2922 2923
  if (!heap()->always_allocate() &&
      heap()->OldGenerationAllocationLimitReached()) {
2924
    return Failure::RetryAfterGC(identity());
2925 2926
  }

2927 2928 2929 2930
  if (Size() + object_size > max_capacity_) {
    return Failure::RetryAfterGC(identity());
  }

2931
  LargePage* page = heap()->isolate()->memory_allocator()->
2932
      AllocateLargePage(object_size, this, executable);
2933
  if (page == NULL) return Failure::RetryAfterGC(identity());
2934
  ASSERT(page->area_size() >= object_size);
2935

2936 2937
  size_ += static_cast<int>(page->size());
  objects_size_ += object_size;
2938
  page_count_++;
2939 2940
  page->set_next_page(first_page_);
  first_page_ = page;
2941

2942 2943
  // Register all MemoryChunk::kAlignment-aligned chunks covered by
  // this large page in the chunk map.
2944 2945
  uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
  uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2946 2947
  for (uintptr_t key = base; key <= limit; key++) {
    HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2948 2949
                                              static_cast<uint32_t>(key),
                                              true);
2950 2951 2952 2953
    ASSERT(entry != NULL);
    entry->value = page;
  }

2954 2955
  HeapObject* object = page->GetObject();

2956 2957 2958 2959 2960 2961 2962
  if (Heap::ShouldZapGarbage()) {
    // Make the object consistent so the heap can be verified in OldSpaceStep.
    // We only need to do this in debug builds or if verify_heap is on.
    reinterpret_cast<Object**>(object->address())[0] =
        heap()->fixed_array_map();
    reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
  }
2963

2964
  heap()->incremental_marking()->OldSpaceStep(object_size);
2965
  return object;
2966 2967 2968
}


2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980
size_t LargeObjectSpace::CommittedPhysicalMemory() {
  if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
  size_t size = 0;
  LargePage* current = first_page_;
  while (current != NULL) {
    size += current->CommittedPhysicalMemory();
    current = current->next_page();
  }
  return size;
}


2981
// GC support
2982
MaybeObject* LargeObjectSpace::FindObject(Address a) {
2983 2984 2985
  LargePage* page = FindPage(a);
  if (page != NULL) {
    return page->GetObject();
2986 2987 2988 2989
  }
  return Failure::Exception();
}

2990

2991 2992 2993
LargePage* LargeObjectSpace::FindPage(Address a) {
  uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
  HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2994 2995
                                        static_cast<uint32_t>(key),
                                        false);
2996 2997 2998 2999 3000 3001
  if (e != NULL) {
    ASSERT(e->value != NULL);
    LargePage* page = reinterpret_cast<LargePage*>(e->value);
    ASSERT(page->is_valid());
    if (page->Contains(a)) {
      return page;
3002 3003 3004 3005 3006 3007
    }
  }
  return NULL;
}


3008
void LargeObjectSpace::FreeUnmarkedObjects() {
3009 3010
  LargePage* previous = NULL;
  LargePage* current = first_page_;
3011 3012
  while (current != NULL) {
    HeapObject* object = current->GetObject();
3013 3014 3015 3016 3017 3018
    // Can this large page contain pointers to non-trivial objects.  No other
    // pointer object is this big.
    bool is_pointer_object = object->IsFixedArray();
    MarkBit mark_bit = Marking::MarkBitFrom(object);
    if (mark_bit.Get()) {
      mark_bit.Clear();
3019 3020
      Page::FromAddress(object->address())->ResetProgressBar();
      Page::FromAddress(object->address())->ResetLiveBytes();
3021
      previous = current;
3022
      current = current->next_page();
3023
    } else {
3024
      LargePage* page = current;
3025
      // Cut the chunk out from the chunk list.
3026
      current = current->next_page();
3027
      if (previous == NULL) {
3028
        first_page_ = current;
3029
      } else {
3030
        previous->set_next_page(current);
3031 3032 3033
      }

      // Free the chunk.
3034 3035
      heap()->mark_compact_collector()->ReportDeleteIfNeeded(
          object, heap()->isolate());
3036
      size_ -= static_cast<int>(page->size());
3037
      objects_size_ -= object->Size();
3038
      page_count_--;
3039

3040 3041 3042 3043 3044 3045 3046
      // Remove entries belonging to this page.
      // Use variable alignment to help pass length check (<= 80 characters)
      // of single line in tools/presubmit.py.
      const intptr_t alignment = MemoryChunk::kAlignment;
      uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
      uintptr_t limit = base + (page->size()-1)/alignment;
      for (uintptr_t key = base; key <= limit; key++) {
3047 3048
        chunk_map_.Remove(reinterpret_cast<void*>(key),
                          static_cast<uint32_t>(key));
3049 3050
      }

3051 3052 3053 3054 3055
      if (is_pointer_object) {
        heap()->QueueMemoryChunkForFree(page);
      } else {
        heap()->isolate()->memory_allocator()->Free(page);
      }
3056 3057
    }
  }
3058
  heap()->FreeQueuedChunks();
3059 3060 3061 3062 3063
}


bool LargeObjectSpace::Contains(HeapObject* object) {
  Address address = object->address();
3064
  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3065

3066
  bool owned = (chunk->owner() == this);
3067

3068 3069 3070
  SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());

  return owned;
3071 3072 3073
}


3074
#ifdef VERIFY_HEAP
3075 3076 3077
// We do not assume that the large object iterator works, because it depends
// on the invariants we are checking during verification.
void LargeObjectSpace::Verify() {
3078
  for (LargePage* chunk = first_page_;
3079
       chunk != NULL;
3080
       chunk = chunk->next_page()) {
3081 3082 3083 3084
    // Each chunk contains an object that starts at the large object page's
    // object area start.
    HeapObject* object = chunk->GetObject();
    Page* page = Page::FromAddress(object->address());
3085
    CHECK(object->address() == page->area_start());
3086 3087 3088 3089

    // The first word should be a map, and we expect all map pointers to be
    // in map space.
    Map* map = object->map();
3090 3091
    CHECK(map->IsMap());
    CHECK(heap()->map_space()->Contains(map));
3092

3093 3094 3095
    // We have only code, sequential strings, external strings
    // (sequential strings that have been morphed into external
    // strings), fixed arrays, and byte arrays in large object space.
3096
    CHECK(object->IsCode() || object->IsSeqString() ||
3097
           object->IsExternalString() || object->IsFixedArray() ||
3098
           object->IsFixedDoubleArray() || object->IsByteArray());
3099 3100

    // The object itself should look OK.
3101
    object->Verify();
3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114

    // Byte arrays and strings don't have interior pointers.
    if (object->IsCode()) {
      VerifyPointersVisitor code_visitor;
      object->IterateBody(map->instance_type(),
                          object->Size(),
                          &code_visitor);
    } else if (object->IsFixedArray()) {
      FixedArray* array = FixedArray::cast(object);
      for (int j = 0; j < array->length(); j++) {
        Object* element = array->get(j);
        if (element->IsHeapObject()) {
          HeapObject* element_object = HeapObject::cast(element);
3115 3116
          CHECK(heap()->Contains(element_object));
          CHECK(element_object->map()->IsMap());
3117 3118 3119 3120 3121
        }
      }
    }
  }
}
3122
#endif
3123 3124


3125
#ifdef DEBUG
3126 3127
void LargeObjectSpace::Print() {
  LargeObjectIterator it(this);
3128
  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3129
    obj->Print();
3130 3131 3132 3133 3134
  }
}


void LargeObjectSpace::ReportStatistics() {
3135
  PrintF("  size: %" V8_PTR_PREFIX "d\n", size_);
3136 3137 3138
  int num_objects = 0;
  ClearHistograms();
  LargeObjectIterator it(this);
3139
  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3140
    num_objects++;
3141
    CollectHistogramInfo(obj);
3142 3143
  }

3144 3145
  PrintF("  number of objects %d, "
         "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
3146 3147 3148 3149 3150
  if (num_objects > 0) ReportHistogram(false);
}


void LargeObjectSpace::CollectCodeStatistics() {
3151
  Isolate* isolate = heap()->isolate();
3152
  LargeObjectIterator obj_it(this);
3153
  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3154 3155
    if (obj->IsCode()) {
      Code* code = Code::cast(obj);
3156
      isolate->code_kind_statistics()[code->kind()] += code->Size();
3157 3158 3159
    }
  }
}
3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184


void Page::Print() {
  // Make a best-effort to print the objects in the page.
  PrintF("Page@%p in %s\n",
         this->address(),
         AllocationSpaceName(this->owner()->identity()));
  printf(" --------------------------------------\n");
  HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
  unsigned mark_size = 0;
  for (HeapObject* object = objects.Next();
       object != NULL;
       object = objects.Next()) {
    bool is_marked = Marking::MarkBitFrom(object).Get();
    PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
    if (is_marked) {
      mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
    }
    object->ShortPrint();
    PrintF("\n");
  }
  printf(" --------------------------------------\n");
  printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
}

3185 3186 3187
#endif  // DEBUG

} }  // namespace v8::internal