test-spaces.cc 32.1 KB
Newer Older
1
// Copyright 2011 the V8 project authors. All rights reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
//     * Redistributions of source code must retain the above copyright
//       notice, this list of conditions and the following disclaimer.
//     * Redistributions in binary form must reproduce the above
//       copyright notice, this list of conditions and the following
//       disclaimer in the documentation and/or other materials provided
//       with the distribution.
//     * Neither the name of Google Inc. nor the names of its
//       contributors may be used to endorse or promote products derived
//       from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

28 29 30
// TODO(mythria): Remove this define after this flag is turned on globally
#define V8_IMMINENT_DEPRECATION_WARNINGS

31 32
#include <stdlib.h>

33
#include "src/base/platform/platform.h"
34
#include "src/snapshot/snapshot.h"
35 36
#include "src/v8.h"
#include "test/cctest/cctest.h"
37
#include "test/cctest/heap-tester.h"
38

39 40
namespace v8 {
namespace internal {
41

42
#if 0
43
static void VerifyRegionMarking(Address page_start) {
44
#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
45 46
  Page* p = Page::FromAddress(page_start);

47
  p->SetRegionMarks(Page::kAllRegionsCleanMarks);
48 49 50 51

  for (Address addr = p->ObjectAreaStart();
       addr < p->ObjectAreaEnd();
       addr += kPointerSize) {
52
    CHECK(!Page::FromAddress(addr)->IsRegionDirty(addr));
53 54 55 56 57
  }

  for (Address addr = p->ObjectAreaStart();
       addr < p->ObjectAreaEnd();
       addr += kPointerSize) {
58
    Page::FromAddress(addr)->MarkRegionDirty(addr);
59 60 61 62 63
  }

  for (Address addr = p->ObjectAreaStart();
       addr < p->ObjectAreaEnd();
       addr += kPointerSize) {
64
    CHECK(Page::FromAddress(addr)->IsRegionDirty(addr));
65
  }
66
#endif
67
}
68
#endif
69 70


71 72
// TODO(gc) you can no longer allocate pages like this. Details are hidden.
#if 0
73 74 75 76 77 78 79 80
TEST(Page) {
  byte* mem = NewArray<byte>(2*Page::kPageSize);
  CHECK(mem != NULL);

  Address start = reinterpret_cast<Address>(mem);
  Address page_start = RoundUp(start, Page::kPageSize);

  Page* p = Page::FromAddress(page_start);
81
  // Initialized Page has heap pointer, normally set by memory_allocator.
82
  p->heap_ = CcTest::heap();
83 84 85 86
  CHECK(p->address() == page_start);
  CHECK(p->is_valid());

  p->opaque_header = 0;
87
  p->SetIsLargeObjectPage(false);
88 89 90 91 92 93 94 95 96 97 98 99
  CHECK(!p->next_page()->is_valid());

  CHECK(p->ObjectAreaStart() == page_start + Page::kObjectStartOffset);
  CHECK(p->ObjectAreaEnd() == page_start + Page::kPageSize);

  CHECK(p->Offset(page_start + Page::kObjectStartOffset) ==
        Page::kObjectStartOffset);
  CHECK(p->Offset(page_start + Page::kPageSize) == Page::kPageSize);

  CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart());
  CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd());

100 101
  // test region marking
  VerifyRegionMarking(page_start);
102 103 104

  DeleteArray(mem);
}
105
#endif
106 107


108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
// Temporarily sets a given allocator in an isolate.
class TestMemoryAllocatorScope {
 public:
  TestMemoryAllocatorScope(Isolate* isolate, MemoryAllocator* allocator)
      : isolate_(isolate),
        old_allocator_(isolate->memory_allocator_) {
    isolate->memory_allocator_ = allocator;
  }

  ~TestMemoryAllocatorScope() {
    isolate_->memory_allocator_ = old_allocator_;
  }

 private:
  Isolate* isolate_;
  MemoryAllocator* old_allocator_;

  DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
};

128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148

// Temporarily sets a given code range in an isolate.
class TestCodeRangeScope {
 public:
  TestCodeRangeScope(Isolate* isolate, CodeRange* code_range)
      : isolate_(isolate),
        old_code_range_(isolate->code_range_) {
    isolate->code_range_ = code_range;
  }

  ~TestCodeRangeScope() {
    isolate_->code_range_ = old_code_range_;
  }

 private:
  Isolate* isolate_;
  CodeRange* old_code_range_;

  DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope);
};

149

150 151 152 153 154 155
static void VerifyMemoryChunk(Isolate* isolate,
                              Heap* heap,
                              CodeRange* code_range,
                              size_t reserve_area_size,
                              size_t commit_area_size,
                              size_t second_commit_area_size,
156
                              Executability executable) {
157 158 159 160 161 162
  MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
  CHECK(memory_allocator->SetUp(heap->MaxReserved(),
                                heap->MaxExecutableSize()));
  TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
  TestCodeRangeScope test_code_range_scope(isolate, code_range);

163
  size_t header_size = (executable == EXECUTABLE)
164 165
                       ? MemoryAllocator::CodePageGuardStartOffset()
                       : MemoryChunk::kObjectStartOffset;
166
  size_t guard_size = (executable == EXECUTABLE)
167 168 169 170 171
                       ? MemoryAllocator::CodePageGuardSize()
                       : 0;

  MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(reserve_area_size,
                                                              commit_area_size,
172
                                                              executable,
173
                                                              NULL);
174 175 176
  size_t alignment = code_range != NULL && code_range->valid()
                         ? MemoryChunk::kAlignment
                         : base::OS::CommitPageSize();
177 178 179 180 181
  size_t reserved_size =
      ((executable == EXECUTABLE))
          ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
                    alignment)
          : RoundUp(header_size + reserve_area_size,
182
                    base::OS::CommitPageSize());
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
  CHECK(memory_chunk->size() == reserved_size);
  CHECK(memory_chunk->area_start() < memory_chunk->address() +
                                     memory_chunk->size());
  CHECK(memory_chunk->area_end() <= memory_chunk->address() +
                                    memory_chunk->size());
  CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);

  Address area_start = memory_chunk->area_start();

  memory_chunk->CommitArea(second_commit_area_size);
  CHECK(area_start == memory_chunk->area_start());
  CHECK(memory_chunk->area_start() < memory_chunk->address() +
                                     memory_chunk->size());
  CHECK(memory_chunk->area_end() <= memory_chunk->address() +
                                    memory_chunk->size());
  CHECK(static_cast<size_t>(memory_chunk->area_size()) ==
      second_commit_area_size);

  memory_allocator->Free(memory_chunk);
  memory_allocator->TearDown();
  delete memory_allocator;
}


207 208 209
TEST(Regress3540) {
  Isolate* isolate = CcTest::i_isolate();
  Heap* heap = isolate->heap();
210
  const int pageSize = Page::kPageSize;
211 212 213 214 215
  MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
  CHECK(
      memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
  TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
  CodeRange* code_range = new CodeRange(isolate);
216
  const size_t code_range_size = 4 * pageSize;
217 218 219
  if (!code_range->SetUp(
          code_range_size +
          RoundUp(v8::base::OS::CommitPageSize() * kReservedCodeRangePages,
220 221
                  MemoryChunk::kAlignment) +
          v8::internal::MemoryAllocator::CodePageAreaSize())) {
222 223
    return;
  }
224

225 226
  Address address;
  size_t size;
227
  size_t request_size = code_range_size - 2 * pageSize;
228
  address = code_range->AllocateRawMemory(
229 230
      request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
      &size);
231
  CHECK(address != NULL);
232

233 234
  Address null_address;
  size_t null_size;
235
  request_size = code_range_size - pageSize;
236
  null_address = code_range->AllocateRawMemory(
237 238
      request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
      &null_size);
239
  CHECK(null_address == NULL);
240

241 242 243 244
  code_range->FreeRawMemory(address, size);
  delete code_range;
  memory_allocator->TearDown();
  delete memory_allocator;
245 246 247
}


248 249 250 251 252 253 254 255
static unsigned int Pseudorandom() {
  static uint32_t lo = 2345;
  lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
  return lo & 0xFFFFF;
}


TEST(MemoryChunk) {
256
  Isolate* isolate = CcTest::i_isolate();
257 258 259 260 261 262 263 264 265 266 267
  Heap* heap = isolate->heap();

  size_t reserve_area_size = 1 * MB;
  size_t initial_commit_area_size, second_commit_area_size;

  for (int i = 0; i < 100; i++) {
    initial_commit_area_size = Pseudorandom();
    second_commit_area_size = Pseudorandom();

    // With CodeRange.
    CodeRange* code_range = new CodeRange(isolate);
268
    const size_t code_range_size = 32 * MB;
269 270 271 272 273 274 275 276
    if (!code_range->SetUp(code_range_size)) return;

    VerifyMemoryChunk(isolate,
                      heap,
                      code_range,
                      reserve_area_size,
                      initial_commit_area_size,
                      second_commit_area_size,
277
                      EXECUTABLE);
278 279 280 281 282 283 284

    VerifyMemoryChunk(isolate,
                      heap,
                      code_range,
                      reserve_area_size,
                      initial_commit_area_size,
                      second_commit_area_size,
285
                      NOT_EXECUTABLE);
286 287 288 289 290 291 292 293 294 295
    delete code_range;

    // Without CodeRange.
    code_range = NULL;
    VerifyMemoryChunk(isolate,
                      heap,
                      code_range,
                      reserve_area_size,
                      initial_commit_area_size,
                      second_commit_area_size,
296
                      EXECUTABLE);
297 298 299 300 301 302 303

    VerifyMemoryChunk(isolate,
                      heap,
                      code_range,
                      reserve_area_size,
                      initial_commit_area_size,
                      second_commit_area_size,
304
                      NOT_EXECUTABLE);
305 306 307 308
  }
}


309
TEST(MemoryAllocator) {
310
  Isolate* isolate = CcTest::i_isolate();
311
  Heap* heap = isolate->heap();
312

313
  MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
314
  CHECK(memory_allocator != nullptr);
315
  CHECK(memory_allocator->SetUp(heap->MaxReserved(),
316
                                heap->MaxExecutableSize()));
317
  TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
318

319 320 321 322 323
  {
    int total_pages = 0;
    OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE);
    Page* first_page = memory_allocator->AllocatePage(
        faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
324

325 326 327 328
    first_page->InsertAfter(faked_space.anchor()->prev_page());
    CHECK(first_page->is_valid());
    CHECK(first_page->next_page() == faked_space.anchor());
    total_pages++;
329

330 331 332
    for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
      CHECK(p->owner() == &faked_space);
    }
333

334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
    // Again, we should get n or n - 1 pages.
    Page* other = memory_allocator->AllocatePage(faked_space.AreaSize(),
                                                 &faked_space, NOT_EXECUTABLE);
    CHECK(other->is_valid());
    total_pages++;
    other->InsertAfter(first_page);
    int page_count = 0;
    for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
      CHECK(p->owner() == &faked_space);
      page_count++;
    }
    CHECK(total_pages == page_count);

    Page* second_page = first_page->next_page();
    CHECK(second_page->is_valid());
349

350 351
    // OldSpace's destructor will tear down the space and free up all pages.
  }
352 353
  memory_allocator->TearDown();
  delete memory_allocator;
354 355 356 357
}


TEST(NewSpace) {
358
  Isolate* isolate = CcTest::i_isolate();
359 360
  Heap* heap = isolate->heap();
  MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
361
  CHECK(memory_allocator->SetUp(heap->MaxReserved(),
362 363
                                heap->MaxExecutableSize()));
  TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
364

365
  NewSpace new_space(heap);
366

367 368
  CHECK(new_space.SetUp(CcTest::heap()->ReservedSemiSpaceSize(),
                        CcTest::heap()->ReservedSemiSpaceSize()));
369
  CHECK(new_space.HasBeenSetUp());
370

371
  while (new_space.Available() >= Page::kMaxRegularHeapObjectSize) {
372 373 374
    Object* obj =
        new_space.AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
            .ToObjectChecked();
375
    CHECK(new_space.Contains(HeapObject::cast(obj)));
376 377
  }

378
  new_space.TearDown();
379 380
  memory_allocator->TearDown();
  delete memory_allocator;
381 382 383 384
}


TEST(OldSpace) {
385
  Isolate* isolate = CcTest::i_isolate();
386 387
  Heap* heap = isolate->heap();
  MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
388
  CHECK(memory_allocator->SetUp(heap->MaxReserved(),
389 390 391
                                heap->MaxExecutableSize()));
  TestMemoryAllocatorScope test_scope(isolate, memory_allocator);

392
  OldSpace* s = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
393 394
  CHECK(s != NULL);

395
  CHECK(s->SetUp());
396 397

  while (s->Available() > 0) {
398
    s->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize).ToObjectChecked();
399 400 401
  }

  delete s;
402 403
  memory_allocator->TearDown();
  delete memory_allocator;
404 405 406
}


407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
TEST(CompactionSpace) {
  Isolate* isolate = CcTest::i_isolate();
  Heap* heap = isolate->heap();
  MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
  CHECK(memory_allocator != nullptr);
  CHECK(
      memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
  TestMemoryAllocatorScope test_scope(isolate, memory_allocator);

  CompactionSpace* compaction_space =
      new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
  CHECK(compaction_space != NULL);
  CHECK(compaction_space->SetUp());

  OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
  CHECK(old_space != NULL);
  CHECK(old_space->SetUp());

  // Cannot loop until "Available()" since we initially have 0 bytes available
  // and would thus neither grow, nor be able to allocate an object.
  const int kNumObjects = 100;
428 429 430 431
  const int kNumObjectsPerPage =
      compaction_space->AreaSize() / Page::kMaxRegularHeapObjectSize;
  const int kExpectedPages =
      (kNumObjects + kNumObjectsPerPage - 1) / kNumObjectsPerPage;
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
  for (int i = 0; i < kNumObjects; i++) {
    compaction_space->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
        .ToObjectChecked();
  }
  int pages_in_old_space = old_space->CountTotalPages();
  int pages_in_compaction_space = compaction_space->CountTotalPages();
  CHECK_EQ(pages_in_compaction_space, kExpectedPages);
  CHECK_LE(pages_in_old_space, 1);

  old_space->MergeCompactionSpace(compaction_space);
  CHECK_EQ(old_space->CountTotalPages(),
           pages_in_old_space + pages_in_compaction_space);

  delete compaction_space;
  delete old_space;

  memory_allocator->TearDown();
  delete memory_allocator;
}


453 454 455 456 457 458 459 460 461 462
TEST(CompactionSpaceUsingExternalMemory) {
  const int kObjectSize = 512;

  Isolate* isolate = CcTest::i_isolate();
  Heap* heap = isolate->heap();
  MemoryAllocator* allocator = new MemoryAllocator(isolate);
  CHECK(allocator != nullptr);
  CHECK(allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
  TestMemoryAllocatorScope test_scope(isolate, allocator);

463 464
  CompactionSpaceCollection* collection = new CompactionSpaceCollection(heap);
  CompactionSpace* compaction_space = collection->Get(OLD_SPACE);
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
  CHECK(compaction_space != NULL);
  CHECK(compaction_space->SetUp());

  OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
  CHECK(old_space != NULL);
  CHECK(old_space->SetUp());

  // The linear allocation area already counts as used bytes, making
  // exact testing impossible.
  heap->DisableInlineAllocation();

  // Test:
  // * Allocate a backing store in old_space.
  // * Compute the number num_rest_objects of kObjectSize objects that fit into
  //   of available memory.
  //   kNumRestObjects.
  // * Add the rest of available memory to the compaction space.
  // * Allocate kNumRestObjects in the compaction space.
  // * Allocate one object more.
  // * Merge the compaction space and compare the expected number of pages.

  // Allocate a single object in old_space to initialize a backing page.
  old_space->AllocateRawUnaligned(kObjectSize).ToObjectChecked();
  // Compute the number of objects that fit into the rest in old_space.
  intptr_t rest = static_cast<int>(old_space->Available());
  CHECK_GT(rest, 0);
  intptr_t num_rest_objects = rest / kObjectSize;
  // After allocating num_rest_objects in compaction_space we allocate a bit
  // more.
  const intptr_t kAdditionalCompactionMemory = kObjectSize;
  // We expect a single old_space page.
  const intptr_t kExpectedInitialOldSpacePages = 1;
  // We expect a single additional page in compaction space because we mostly
  // use external memory.
  const intptr_t kExpectedCompactionPages = 1;
  // We expect two pages to be reachable from old_space in the end.
  const intptr_t kExpectedOldSpacePagesAfterMerge = 2;

  CHECK_EQ(old_space->CountTotalPages(), kExpectedInitialOldSpacePages);
  CHECK_EQ(compaction_space->CountTotalPages(), 0);
  CHECK_EQ(compaction_space->Capacity(), 0);
  // Make the rest of memory available for compaction.
507
  old_space->DivideUponCompactionSpaces(&collection, 1, rest);
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
  CHECK_EQ(compaction_space->CountTotalPages(), 0);
  CHECK_EQ(compaction_space->Capacity(), rest);
  while (num_rest_objects-- > 0) {
    compaction_space->AllocateRawUnaligned(kObjectSize).ToObjectChecked();
  }
  // We only used external memory so far.
  CHECK_EQ(compaction_space->CountTotalPages(), 0);
  // Additional allocation.
  compaction_space->AllocateRawUnaligned(kAdditionalCompactionMemory)
      .ToObjectChecked();
  // Now the compaction space shouldve also acquired a page.
  CHECK_EQ(compaction_space->CountTotalPages(), kExpectedCompactionPages);

  old_space->MergeCompactionSpace(compaction_space);
  CHECK_EQ(old_space->CountTotalPages(), kExpectedOldSpacePagesAfterMerge);

524
  delete collection;
525 526 527 528 529 530 531
  delete old_space;

  allocator->TearDown();
  delete allocator;
}


532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
CompactionSpaceCollection** HeapTester::InitializeCompactionSpaces(
    Heap* heap, int num_spaces) {
  CompactionSpaceCollection** spaces =
      new CompactionSpaceCollection*[num_spaces];
  for (int i = 0; i < num_spaces; i++) {
    spaces[i] = new CompactionSpaceCollection(heap);
  }
  return spaces;
}


void HeapTester::DestroyCompactionSpaces(CompactionSpaceCollection** spaces,
                                         int num_spaces) {
  for (int i = 0; i < num_spaces; i++) {
    delete spaces[i];
  }
  delete[] spaces;
}


void HeapTester::MergeCompactionSpaces(PagedSpace* space,
                                       CompactionSpaceCollection** spaces,
                                       int num_spaces) {
  AllocationSpace id = space->identity();
  for (int i = 0; i < num_spaces; i++) {
    space->MergeCompactionSpace(spaces[i]->Get(id));
    CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Size(), 0);
    CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Capacity(), 0);
    CHECK_EQ(spaces[i]->Get(id)->Waste(), 0);
  }
}


void HeapTester::AllocateInCompactionSpaces(CompactionSpaceCollection** spaces,
                                            AllocationSpace id, int num_spaces,
                                            int num_objects, int object_size) {
  for (int i = 0; i < num_spaces; i++) {
    for (int j = 0; j < num_objects; j++) {
      spaces[i]->Get(id)->AllocateRawUnaligned(object_size).ToObjectChecked();
    }
    spaces[i]->Get(id)->EmptyAllocationInfo();
    CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Size(),
             num_objects * object_size);
    CHECK_GE(spaces[i]->Get(id)->accounting_stats_.Capacity(),
             spaces[i]->Get(id)->accounting_stats_.Size());
  }
}


void HeapTester::CompactionStats(CompactionSpaceCollection** spaces,
                                 AllocationSpace id, int num_spaces,
                                 intptr_t* capacity, intptr_t* size) {
  *capacity = 0;
  *size = 0;
  for (int i = 0; i < num_spaces; i++) {
    *capacity += spaces[i]->Get(id)->accounting_stats_.Capacity();
    *size += spaces[i]->Get(id)->accounting_stats_.Size();
  }
}


void HeapTester::TestCompactionSpaceDivide(int num_additional_objects,
                                           int object_size,
                                           int num_compaction_spaces,
                                           int additional_capacity_in_bytes) {
  Isolate* isolate = CcTest::i_isolate();
  Heap* heap = isolate->heap();
  OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
  CHECK(old_space != nullptr);
  CHECK(old_space->SetUp());
  old_space->AllocateRawUnaligned(object_size).ToObjectChecked();
  old_space->EmptyAllocationInfo();

  intptr_t rest_capacity = old_space->accounting_stats_.Capacity() -
                           old_space->accounting_stats_.Size();
  intptr_t capacity_for_compaction_space =
      rest_capacity / num_compaction_spaces;
  int num_objects_in_compaction_space =
      static_cast<int>(capacity_for_compaction_space) / object_size +
      num_additional_objects;
  CHECK_GT(num_objects_in_compaction_space, 0);
  intptr_t initial_old_space_capacity = old_space->accounting_stats_.Capacity();

  CompactionSpaceCollection** spaces =
      InitializeCompactionSpaces(heap, num_compaction_spaces);
  old_space->DivideUponCompactionSpaces(spaces, num_compaction_spaces,
                                        capacity_for_compaction_space);

  intptr_t compaction_capacity = 0;
  intptr_t compaction_size = 0;
  CompactionStats(spaces, OLD_SPACE, num_compaction_spaces,
                  &compaction_capacity, &compaction_size);

  intptr_t old_space_capacity = old_space->accounting_stats_.Capacity();
  intptr_t old_space_size = old_space->accounting_stats_.Size();
  // Compaction space memory is subtracted from the original space's capacity.
  CHECK_EQ(old_space_capacity,
           initial_old_space_capacity - compaction_capacity);
  CHECK_EQ(compaction_size, 0);

  AllocateInCompactionSpaces(spaces, OLD_SPACE, num_compaction_spaces,
                             num_objects_in_compaction_space, object_size);

  // Old space size and capacity should be the same as after dividing.
  CHECK_EQ(old_space->accounting_stats_.Size(), old_space_size);
  CHECK_EQ(old_space->accounting_stats_.Capacity(), old_space_capacity);

  CompactionStats(spaces, OLD_SPACE, num_compaction_spaces,
                  &compaction_capacity, &compaction_size);
  MergeCompactionSpaces(old_space, spaces, num_compaction_spaces);

  CHECK_EQ(old_space->accounting_stats_.Capacity(),
           old_space_capacity + compaction_capacity);
  CHECK_EQ(old_space->accounting_stats_.Size(),
           old_space_size + compaction_size);
  // We check against the expected end capacity.
  CHECK_EQ(old_space->accounting_stats_.Capacity(),
           initial_old_space_capacity + additional_capacity_in_bytes);

  DestroyCompactionSpaces(spaces, num_compaction_spaces);
  delete old_space;
}


HEAP_TEST(CompactionSpaceDivideSinglePage) {
  const int kObjectSize = KB;
  const int kCompactionSpaces = 4;
  // Since the bound for objects is tight and the dividing is best effort, we
  // subtract some objects to make sure we still fit in the initial page.
  // A CHECK makes sure that the overall number of allocated objects stays
  // > 0.
  const int kAdditionalObjects = -10;
  const int kAdditionalCapacityRequired = 0;
  TestCompactionSpaceDivide(kAdditionalObjects, kObjectSize, kCompactionSpaces,
                            kAdditionalCapacityRequired);
}


HEAP_TEST(CompactionSpaceDivideMultiplePages) {
  const int kObjectSize = KB;
  const int kCompactionSpaces = 4;
  // Allocate half a page of objects to ensure that we need one more page per
  // compaction space.
  const int kAdditionalObjects = (Page::kPageSize / kObjectSize / 2);
  const int kAdditionalCapacityRequired =
      Page::kAllocatableMemory * kCompactionSpaces;
  TestCompactionSpaceDivide(kAdditionalObjects, kObjectSize, kCompactionSpaces,
                            kAdditionalCapacityRequired);
}


683
TEST(LargeObjectSpace) {
684
  v8::V8::Initialize();
685

686
  LargeObjectSpace* lo = CcTest::heap()->lo_space();
687 688 689 690
  CHECK(lo != NULL);

  int lo_size = Page::kPageSize;

691
  Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE).ToObjectChecked();
692 693 694 695 696 697 698 699 700 701 702
  CHECK(obj->IsHeapObject());

  HeapObject* ho = HeapObject::cast(obj);

  CHECK(lo->Contains(HeapObject::cast(obj)));

  CHECK(lo->FindObject(ho->address()) == obj);

  CHECK(lo->Contains(ho));

  while (true) {
703
    intptr_t available = lo->Available();
704 705
    { AllocationResult allocation = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
      if (allocation.IsRetry()) break;
706
    }
707 708 709
    // The available value is conservative such that it may report
    // zero prior to heap exhaustion.
    CHECK(lo->Available() < available || available == 0);
710
  }
711 712 713

  CHECK(!lo->IsEmpty());

714
  CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE).IsRetry());
715
}
716 717 718 719


TEST(SizeOfFirstPageIsLargeEnough) {
  if (i::FLAG_always_opt) return;
720
  // Bootstrapping without a snapshot causes more allocations.
721 722
  CcTest::InitializeVM();
  Isolate* isolate = CcTest::i_isolate();
723 724
  if (!isolate->snapshot_available()) return;
  if (Snapshot::EmbedsScript(isolate)) return;
725

726 727 728
  // If this test fails due to enabling experimental natives that are not part
  // of the snapshot, we may need to adjust CalculateFirstPageSizes.

729 730
  // Freshly initialized VM gets by with one page per space.
  for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
731 732
    // Debug code can be very large, so skip CODE_SPACE if we are generating it.
    if (i == CODE_SPACE && i::FLAG_debug_code) continue;
733 734 735 736 737 738 739
    CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
  }

  // Executing the empty script gets by with one page per space.
  HandleScope scope(isolate);
  CompileRun("/*empty*/");
  for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
740 741
    // Debug code can be very large, so skip CODE_SPACE if we are generating it.
    if (i == CODE_SPACE && i::FLAG_debug_code) continue;
742 743 744 745 746 747
    CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
  }

  // No large objects required to perform the above steps.
  CHECK(isolate->heap()->lo_space()->IsEmpty());
}
748 749 750


UNINITIALIZED_TEST(NewSpaceGrowsToTargetCapacity) {
751
  FLAG_target_semi_space_size = 2 * (Page::kPageSize / MB);
752
  if (FLAG_optimize_for_size) return;
753

754 755 756
  v8::Isolate::CreateParams create_params;
  create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
  v8::Isolate* isolate = v8::Isolate::New(create_params);
757 758 759 760 761 762 763 764 765 766 767 768
  {
    v8::Isolate::Scope isolate_scope(isolate);
    v8::HandleScope handle_scope(isolate);
    v8::Context::New(isolate)->Enter();

    Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);

    NewSpace* new_space = i_isolate->heap()->new_space();

    // This test doesn't work if we start with a non-default new space
    // configuration.
    if (new_space->InitialTotalCapacity() == Page::kPageSize) {
769
      CHECK_EQ(new_space->CommittedMemory(), new_space->InitialTotalCapacity());
770 771 772 773 774 775 776

      // Fill up the first (and only) page of the semi space.
      FillCurrentPage(new_space);

      // Try to allocate out of the new space. A new page should be added and
      // the
      // allocation should succeed.
777 778
      v8::internal::AllocationResult allocation =
          new_space->AllocateRawUnaligned(80);
779
      CHECK(!allocation.IsRetry());
780
      CHECK_EQ(new_space->CommittedMemory(), 2 * Page::kPageSize);
781 782 783

      // Turn the allocation into a proper object so isolate teardown won't
      // crash.
784 785 786
      HeapObject* free_space = NULL;
      CHECK(allocation.To(&free_space));
      new_space->heap()->CreateFillerObjectAt(free_space->address(), 80);
787 788 789 790
    }
  }
  isolate->Dispose();
}
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836


static HeapObject* AllocateUnaligned(NewSpace* space, int size) {
  AllocationResult allocation = space->AllocateRawUnaligned(size);
  CHECK(!allocation.IsRetry());
  HeapObject* filler = NULL;
  CHECK(allocation.To(&filler));
  space->heap()->CreateFillerObjectAt(filler->address(), size);
  return filler;
}

class Observer : public InlineAllocationObserver {
 public:
  explicit Observer(intptr_t step_size)
      : InlineAllocationObserver(step_size), count_(0) {}

  virtual void Step(int bytes_allocated) { count_++; }

  int count() const { return count_; }

 private:
  int count_;
};


UNINITIALIZED_TEST(InlineAllocationObserver) {
  v8::Isolate::CreateParams create_params;
  create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
  v8::Isolate* isolate = v8::Isolate::New(create_params);
  {
    v8::Isolate::Scope isolate_scope(isolate);
    v8::HandleScope handle_scope(isolate);
    v8::Context::New(isolate)->Enter();

    Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);

    NewSpace* new_space = i_isolate->heap()->new_space();

    Observer observer1(128);
    new_space->AddInlineAllocationObserver(&observer1);

    // The observer should not get notified if we have only allocated less than
    // 128 bytes.
    AllocateUnaligned(new_space, 64);
    CHECK_EQ(observer1.count(), 0);

837
    // The observer should get called when we have allocated exactly 128 bytes.
838 839 840 841 842 843 844 845 846 847 848
    AllocateUnaligned(new_space, 64);
    CHECK_EQ(observer1.count(), 1);

    // Another >128 bytes should get another notification.
    AllocateUnaligned(new_space, 136);
    CHECK_EQ(observer1.count(), 2);

    // Allocating a large object should get only one notification.
    AllocateUnaligned(new_space, 1024);
    CHECK_EQ(observer1.count(), 3);

849
    // Allocating another 2048 bytes in small objects should get 16
850 851 852 853
    // notifications.
    for (int i = 0; i < 64; ++i) {
      AllocateUnaligned(new_space, 32);
    }
854
    CHECK_EQ(observer1.count(), 19);
855 856 857 858 859 860

    // Multiple observers should work.
    Observer observer2(96);
    new_space->AddInlineAllocationObserver(&observer2);

    AllocateUnaligned(new_space, 2048);
861
    CHECK_EQ(observer1.count(), 20);
862 863 864
    CHECK_EQ(observer2.count(), 1);

    AllocateUnaligned(new_space, 104);
865
    CHECK_EQ(observer1.count(), 20);
866 867 868 869 870 871
    CHECK_EQ(observer2.count(), 2);

    // Callback should stop getting called after an observer is removed.
    new_space->RemoveInlineAllocationObserver(&observer1);

    AllocateUnaligned(new_space, 384);
872
    CHECK_EQ(observer1.count(), 20);  // no more notifications.
873 874
    CHECK_EQ(observer2.count(), 3);   // this one is still active.

875 876 877 878 879 880 881 882 883 884 885 886 887 888
    // Ensure that Pause/ResumeInlineAllocationObservers work correctly.
    AllocateUnaligned(new_space, 48);
    CHECK_EQ(observer2.count(), 3);
    new_space->PauseInlineAllocationObservers();
    CHECK_EQ(observer2.count(), 3);
    AllocateUnaligned(new_space, 384);
    CHECK_EQ(observer2.count(), 3);
    new_space->ResumeInlineAllocationObservers();
    CHECK_EQ(observer2.count(), 3);
    // Coupled with the 48 bytes allocated before the pause, another 48 bytes
    // allocated here should trigger a notification.
    AllocateUnaligned(new_space, 48);
    CHECK_EQ(observer2.count(), 4);

889 890
    new_space->RemoveInlineAllocationObserver(&observer2);
    AllocateUnaligned(new_space, 384);
891
    CHECK_EQ(observer1.count(), 20);
892
    CHECK_EQ(observer2.count(), 4);
893 894 895
  }
  isolate->Dispose();
}
896

897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922

UNINITIALIZED_TEST(InlineAllocationObserverCadence) {
  v8::Isolate::CreateParams create_params;
  create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
  v8::Isolate* isolate = v8::Isolate::New(create_params);
  {
    v8::Isolate::Scope isolate_scope(isolate);
    v8::HandleScope handle_scope(isolate);
    v8::Context::New(isolate)->Enter();

    Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);

    NewSpace* new_space = i_isolate->heap()->new_space();

    Observer observer1(512);
    new_space->AddInlineAllocationObserver(&observer1);
    Observer observer2(576);
    new_space->AddInlineAllocationObserver(&observer2);

    for (int i = 0; i < 512; ++i) {
      AllocateUnaligned(new_space, 32);
    }

    new_space->RemoveInlineAllocationObserver(&observer1);
    new_space->RemoveInlineAllocationObserver(&observer2);

923 924
    CHECK_EQ(observer1.count(), 32);
    CHECK_EQ(observer2.count(), 28);
925 926 927 928
  }
  isolate->Dispose();
}

929 930
}  // namespace internal
}  // namespace v8