test-spaces.cc 32.6 KB
Newer Older
1
// Copyright 2011 the V8 project authors. All rights reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
//     * Redistributions of source code must retain the above copyright
//       notice, this list of conditions and the following disclaimer.
//     * Redistributions in binary form must reproduce the above
//       copyright notice, this list of conditions and the following
//       disclaimer in the documentation and/or other materials provided
//       with the distribution.
//     * Neither the name of Google Inc. nor the names of its
//       contributors may be used to endorse or promote products derived
//       from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

#include <stdlib.h>

30
#include "include/v8-platform.h"
31
#include "src/base/bounded-page-allocator.h"
32
#include "src/base/macros.h"
33
#include "src/base/platform/platform.h"
34
#include "src/common/globals.h"
35
#include "src/heap/factory.h"
36
#include "src/heap/large-spaces.h"
37
#include "src/heap/memory-allocator.h"
38
#include "src/heap/memory-chunk.h"
39
#include "src/heap/spaces-inl.h"
40
#include "src/heap/spaces.h"
41
#include "src/objects/free-space.h"
42
#include "src/objects/objects-inl.h"
43
#include "src/snapshot/snapshot.h"
44
#include "test/cctest/cctest.h"
45
#include "test/cctest/heap/heap-tester.h"
46
#include "test/cctest/heap/heap-utils.h"
47

48 49
namespace v8 {
namespace internal {
50
namespace heap {
51

52 53 54
// Temporarily sets a given allocator in an isolate.
class TestMemoryAllocatorScope {
 public:
55
  TestMemoryAllocatorScope(Isolate* isolate, size_t max_capacity,
56 57
                           size_t code_range_size,
                           PageAllocator* page_allocator = nullptr)
58 59 60 61
      : isolate_(isolate),
        old_allocator_(std::move(isolate->heap()->memory_allocator_)) {
    isolate->heap()->memory_allocator_.reset(
        new MemoryAllocator(isolate, max_capacity, code_range_size));
62 63 64
    if (page_allocator != nullptr) {
      isolate->heap()->memory_allocator_->data_page_allocator_ = page_allocator;
    }
65 66
  }

67 68
  MemoryAllocator* allocator() { return isolate_->heap()->memory_allocator(); }

69
  ~TestMemoryAllocatorScope() {
70 71
    isolate_->heap()->memory_allocator()->TearDown();
    isolate_->heap()->memory_allocator_.swap(old_allocator_);
72 73 74 75
  }

 private:
  Isolate* isolate_;
76
  std::unique_ptr<MemoryAllocator> old_allocator_;
77 78 79 80

  DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
};

81 82
// Temporarily sets a given code page allocator in an isolate.
class TestCodePageAllocatorScope {
83
 public:
84 85
  TestCodePageAllocatorScope(Isolate* isolate,
                             v8::PageAllocator* code_page_allocator)
86
      : isolate_(isolate),
87 88 89 90
        old_code_page_allocator_(
            isolate->heap()->memory_allocator()->code_page_allocator()) {
    isolate->heap()->memory_allocator()->code_page_allocator_ =
        code_page_allocator;
91 92
  }

93 94 95
  ~TestCodePageAllocatorScope() {
    isolate_->heap()->memory_allocator()->code_page_allocator_ =
        old_code_page_allocator_;
96 97 98 99
  }

 private:
  Isolate* isolate_;
100
  v8::PageAllocator* old_code_page_allocator_;
101

102
  DISALLOW_COPY_AND_ASSIGN(TestCodePageAllocatorScope);
103 104
};

105
static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
106 107 108
                              v8::PageAllocator* code_page_allocator,
                              size_t reserve_area_size, size_t commit_area_size,
                              Executability executable, Space* space) {
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
  TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved(),
                                                0);
  MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
  TestCodePageAllocatorScope test_code_page_allocator_scope(
      isolate, code_page_allocator);

  v8::PageAllocator* page_allocator =
      memory_allocator->page_allocator(executable);

  size_t allocatable_memory_area_offset =
      MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(space->identity());
  size_t guard_size =
      (executable == EXECUTABLE) ? MemoryChunkLayout::CodePageGuardSize() : 0;

  MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
      reserve_area_size, commit_area_size, executable, space);
  size_t reserved_size =
      ((executable == EXECUTABLE))
          ? allocatable_memory_area_offset +
                RoundUp(reserve_area_size, page_allocator->CommitPageSize()) +
                guard_size
          : RoundUp(allocatable_memory_area_offset + reserve_area_size,
                    page_allocator->CommitPageSize());
  CHECK(memory_chunk->size() == reserved_size);
  CHECK(memory_chunk->area_start() <
        memory_chunk->address() + memory_chunk->size());
  CHECK(memory_chunk->area_end() <=
        memory_chunk->address() + memory_chunk->size());
  CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);

  memory_allocator->Free<MemoryAllocator::kFull>(memory_chunk);
140 141
}

142
static unsigned int PseudorandomAreaSize() {
143 144 145 146 147 148 149
  static uint32_t lo = 2345;
  lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
  return lo & 0xFFFFF;
}


TEST(MemoryChunk) {
150
  Isolate* isolate = CcTest::i_isolate();
151 152
  Heap* heap = isolate->heap();

153 154
  v8::PageAllocator* page_allocator = GetPlatformPageAllocator();

155
  size_t reserve_area_size = 1 * MB;
156
  size_t initial_commit_area_size;
157 158

  for (int i = 0; i < 100; i++) {
159
    initial_commit_area_size =
160
        RoundUp(PseudorandomAreaSize(), page_allocator->CommitPageSize());
161 162

    // With CodeRange.
163
    const size_t code_range_size = 32 * MB;
164 165 166
    VirtualMemory code_range_reservation(page_allocator, code_range_size,
                                         nullptr, MemoryChunk::kAlignment);
    CHECK(code_range_reservation.IsReserved());
167 168 169 170

    base::BoundedPageAllocator code_page_allocator(
        page_allocator, code_range_reservation.address(),
        code_range_reservation.size(), MemoryChunk::kAlignment);
171

172
    VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size,
173 174
                      initial_commit_area_size, EXECUTABLE, heap->code_space());

175
    VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size,
176 177
                      initial_commit_area_size, NOT_EXECUTABLE,
                      heap->old_space());
178 179 180 181
  }
}


182
TEST(MemoryAllocator) {
183
  Isolate* isolate = CcTest::i_isolate();
184
  Heap* heap = isolate->heap();
185

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
  TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved(),
                                                0);
  MemoryAllocator* memory_allocator = test_allocator_scope.allocator();

  int total_pages = 0;
  OldSpace faked_space(heap);
  CHECK(!faked_space.first_page());
  CHECK(!faked_space.last_page());
  Page* first_page = memory_allocator->AllocatePage(
      faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
      NOT_EXECUTABLE);

  faked_space.memory_chunk_list().PushBack(first_page);
  CHECK(first_page->next_page() == nullptr);
  total_pages++;

  for (Page* p = first_page; p != nullptr; p = p->next_page()) {
    CHECK(p->owner() == &faked_space);
  }

  // Again, we should get n or n - 1 pages.
  Page* other = memory_allocator->AllocatePage(
      faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
      NOT_EXECUTABLE);
  total_pages++;
  faked_space.memory_chunk_list().PushBack(other);
  int page_count = 0;
  for (Page* p = first_page; p != nullptr; p = p->next_page()) {
    CHECK(p->owner() == &faked_space);
    page_count++;
216
  }
217 218 219 220 221 222
  CHECK(total_pages == page_count);

  Page* second_page = first_page->next_page();
  CHECK_NOT_NULL(second_page);

  // OldSpace's destructor will tear down the space and free up all pages.
223 224
}

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
TEST(ComputeDiscardMemoryAreas) {
  base::AddressRegion memory_area;
  size_t page_size = MemoryAllocator::GetCommitPageSize();
  size_t free_header_size = FreeSpace::kSize;

  memory_area = MemoryAllocator::ComputeDiscardMemoryArea(0, 0);
  CHECK_EQ(memory_area.begin(), 0);
  CHECK_EQ(memory_area.size(), 0);

  memory_area = MemoryAllocator::ComputeDiscardMemoryArea(
      0, page_size + free_header_size);
  CHECK_EQ(memory_area.begin(), 0);
  CHECK_EQ(memory_area.size(), 0);

  memory_area = MemoryAllocator::ComputeDiscardMemoryArea(
      page_size - free_header_size, page_size + free_header_size);
  CHECK_EQ(memory_area.begin(), page_size);
  CHECK_EQ(memory_area.size(), page_size);

  memory_area = MemoryAllocator::ComputeDiscardMemoryArea(page_size, page_size);
  CHECK_EQ(memory_area.begin(), 0);
  CHECK_EQ(memory_area.size(), 0);

  memory_area = MemoryAllocator::ComputeDiscardMemoryArea(
      page_size / 2, page_size + page_size / 2);
  CHECK_EQ(memory_area.begin(), page_size);
  CHECK_EQ(memory_area.size(), page_size);

  memory_area = MemoryAllocator::ComputeDiscardMemoryArea(
      page_size / 2, page_size + page_size / 4);
  CHECK_EQ(memory_area.begin(), 0);
  CHECK_EQ(memory_area.size(), 0);

  memory_area =
      MemoryAllocator::ComputeDiscardMemoryArea(page_size / 2, page_size * 3);
  CHECK_EQ(memory_area.begin(), page_size);
  CHECK_EQ(memory_area.size(), page_size * 2);
}
263 264

TEST(NewSpace) {
265
  Isolate* isolate = CcTest::i_isolate();
266
  Heap* heap = isolate->heap();
267 268 269
  TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved(),
                                                0);
  MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
270

271 272
  NewSpace new_space(heap, memory_allocator->data_page_allocator(),
                     CcTest::heap()->InitialSemiSpaceSize(),
273
                     CcTest::heap()->InitialSemiSpaceSize());
274
  CHECK(new_space.MaximumCapacity());
275

276
  while (new_space.Available() >= kMaxRegularHeapObjectSize) {
277 278 279 280
    CHECK(new_space.Contains(new_space
                                 .AllocateRaw(kMaxRegularHeapObjectSize,
                                              AllocationAlignment::kWordAligned)
                                 .ToObjectChecked()));
281 282
  }

283
  new_space.TearDown();
284
  memory_allocator->unmapper()->EnsureUnmappingCompleted();
285 286 287 288
}


TEST(OldSpace) {
289
  Isolate* isolate = CcTest::i_isolate();
290
  Heap* heap = isolate->heap();
291 292
  TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved(),
                                                0);
293

294
  OldSpace* s = new OldSpace(heap);
295
  CHECK_NOT_NULL(s);
296 297

  while (s->Available() > 0) {
298
    s->AllocateRawUnaligned(kMaxRegularHeapObjectSize).ToObjectChecked();
299 300 301 302 303
  }

  delete s;
}

304
TEST(OldLargeObjectSpace) {
305 306 307
  // This test does not initialize allocated objects, which confuses the
  // incremental marker.
  FLAG_incremental_marking = false;
308
  v8::V8::Initialize();
309

310
  OldLargeObjectSpace* lo = CcTest::heap()->lo_space();
311
  CHECK_NOT_NULL(lo);
312 313 314

  int lo_size = Page::kPageSize;

315
  Object obj = lo->AllocateRaw(lo_size).ToObjectChecked();
316
  CHECK(obj.IsHeapObject());
317

318
  HeapObject ho = HeapObject::cast(obj);
319 320 321 322 323 324

  CHECK(lo->Contains(HeapObject::cast(obj)));

  CHECK(lo->Contains(ho));

  while (true) {
325 326
    {
      AllocationResult allocation = lo->AllocateRaw(lo_size);
327
      if (allocation.IsRetry()) break;
328
    }
329
  }
330 331 332

  CHECK(!lo->IsEmpty());

333
  CHECK(lo->AllocateRaw(lo_size).IsRetry());
334
}
335

336 337 338 339 340 341
#ifndef DEBUG
// The test verifies that committed size of a space is less then some threshold.
// Debug builds pull in all sorts of additional instrumentation that increases
// heap sizes. E.g. CSA_ASSERT creates on-heap strings for error messages. These
// messages are also not stable if files are moved and modified during the build
// process (jumbo builds).
342
TEST(SizeOfInitialHeap) {
343
  if (i::FLAG_always_opt) return;
344
  // Bootstrapping without a snapshot causes more allocations.
345 346
  CcTest::InitializeVM();
  Isolate* isolate = CcTest::i_isolate();
347
  if (!isolate->snapshot_available()) return;
348 349 350 351 352 353 354 355 356
  HandleScope scope(isolate);
  v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
  // Skip this test on the custom snapshot builder.
  if (!CcTest::global()
           ->Get(context, v8_str("assertEquals"))
           .ToLocalChecked()
           ->IsUndefined()) {
    return;
  }
357 358
  // Initial size of LO_SPACE
  size_t initial_lo_space = isolate->heap()->lo_space()->Size();
359

360 361 362 363
// The limit for each space for an empty isolate containing just the
// snapshot.
// In PPC the page size is 64K, causing more internal fragmentation
// hence requiring a larger limit.
364
#if V8_OS_LINUX && (V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64)
365 366
  const size_t kMaxInitialSizePerSpace = 3 * MB;
#else
367
  const size_t kMaxInitialSizePerSpace = 2 * MB;
368
#endif
369

370 371 372
  // Freshly initialized VM gets by with the snapshot size (which is below
  // kMaxInitialSizePerSpace per space).
  Heap* heap = isolate->heap();
373 374 375
  int page_count[LAST_GROWABLE_PAGED_SPACE + 1] = {0, 0, 0, 0};
  for (int i = FIRST_GROWABLE_PAGED_SPACE; i <= LAST_GROWABLE_PAGED_SPACE;
       i++) {
376 377
    // Debug code can be very large, so skip CODE_SPACE if we are generating it.
    if (i == CODE_SPACE && i::FLAG_debug_code) continue;
378 379 380

    page_count[i] = heap->paged_space(i)->CountTotalPages();
    // Check that the initial heap is also below the limit.
381
    CHECK_LE(heap->paged_space(i)->CommittedMemory(), kMaxInitialSizePerSpace);
382 383
  }

384 385
  // Executing the empty script gets by with the same number of pages, i.e.,
  // requires no extra space.
386
  CompileRun("/*empty*/");
387 388
  for (int i = FIRST_GROWABLE_PAGED_SPACE; i <= LAST_GROWABLE_PAGED_SPACE;
       i++) {
389 390
    // Skip CODE_SPACE, since we had to generate code even for an empty script.
    if (i == CODE_SPACE) continue;
391
    CHECK_EQ(page_count[i], isolate->heap()->paged_space(i)->CountTotalPages());
392 393 394
  }

  // No large objects required to perform the above steps.
395 396
  CHECK_EQ(initial_lo_space,
           static_cast<size_t>(isolate->heap()->lo_space()->Size()));
397
}
398
#endif  // DEBUG
399

400
static HeapObject AllocateUnaligned(NewSpace* space, int size) {
401
  AllocationResult allocation = space->AllocateRaw(size, kWordAligned);
402
  CHECK(!allocation.IsRetry());
403
  HeapObject filler;
404
  CHECK(allocation.To(&filler));
405 406
  space->heap()->CreateFillerObjectAt(filler.address(), size,
                                      ClearRecordedSlots::kNo);
407 408 409
  return filler;
}

410
static HeapObject AllocateUnaligned(PagedSpace* space, int size) {
411
  AllocationResult allocation = space->AllocateRaw(size, kWordAligned);
412
  CHECK(!allocation.IsRetry());
413
  HeapObject filler;
414
  CHECK(allocation.To(&filler));
415 416
  space->heap()->CreateFillerObjectAt(filler.address(), size,
                                      ClearRecordedSlots::kNo);
417 418 419
  return filler;
}

420
static HeapObject AllocateUnaligned(OldLargeObjectSpace* space, int size) {
421
  AllocationResult allocation = space->AllocateRaw(size);
422
  CHECK(!allocation.IsRetry());
423
  HeapObject filler;
424 425 426 427 428
  CHECK(allocation.To(&filler));
  return filler;
}

class Observer : public AllocationObserver {
429 430
 public:
  explicit Observer(intptr_t step_size)
431
      : AllocationObserver(step_size), count_(0) {}
432

433
  void Step(int bytes_allocated, Address addr, size_t) override { count_++; }
434 435 436 437 438 439 440

  int count() const { return count_; }

 private:
  int count_;
};

441 442 443 444
template <typename T>
void testAllocationObserver(Isolate* i_isolate, T* space) {
  Observer observer1(128);
  space->AddAllocationObserver(&observer1);
445

446 447 448 449
  // The observer should not get notified if we have only allocated less than
  // 128 bytes.
  AllocateUnaligned(space, 64);
  CHECK_EQ(observer1.count(), 0);
450

451 452 453
  // The observer should get called when we have allocated exactly 128 bytes.
  AllocateUnaligned(space, 64);
  CHECK_EQ(observer1.count(), 1);
454

455 456 457
  // Another >128 bytes should get another notification.
  AllocateUnaligned(space, 136);
  CHECK_EQ(observer1.count(), 2);
458

459 460 461
  // Allocating a large object should get only one notification.
  AllocateUnaligned(space, 1024);
  CHECK_EQ(observer1.count(), 3);
462

463 464 465 466 467 468
  // Allocating another 2048 bytes in small objects should get 16
  // notifications.
  for (int i = 0; i < 64; ++i) {
    AllocateUnaligned(space, 32);
  }
  CHECK_EQ(observer1.count(), 19);
469

470 471 472
  // Multiple observers should work.
  Observer observer2(96);
  space->AddAllocationObserver(&observer2);
473

474 475 476
  AllocateUnaligned(space, 2048);
  CHECK_EQ(observer1.count(), 20);
  CHECK_EQ(observer2.count(), 1);
477

478 479 480
  AllocateUnaligned(space, 104);
  CHECK_EQ(observer1.count(), 20);
  CHECK_EQ(observer2.count(), 2);
481

482 483
  // Callback should stop getting called after an observer is removed.
  space->RemoveAllocationObserver(&observer1);
484

485 486 487
  AllocateUnaligned(space, 384);
  CHECK_EQ(observer1.count(), 20);  // no more notifications.
  CHECK_EQ(observer2.count(), 3);   // this one is still active.
488

489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
  // Ensure that PauseInlineAllocationObserversScope work correctly.
  AllocateUnaligned(space, 48);
  CHECK_EQ(observer2.count(), 3);
  {
    PauseAllocationObserversScope pause_observers(i_isolate->heap());
    CHECK_EQ(observer2.count(), 3);
    AllocateUnaligned(space, 384);
    CHECK_EQ(observer2.count(), 3);
  }
  CHECK_EQ(observer2.count(), 3);
  // Coupled with the 48 bytes allocated before the pause, another 48 bytes
  // allocated here should trigger a notification.
  AllocateUnaligned(space, 48);
  CHECK_EQ(observer2.count(), 4);

  space->RemoveAllocationObserver(&observer2);
  AllocateUnaligned(space, 384);
  CHECK_EQ(observer1.count(), 20);
  CHECK_EQ(observer2.count(), 4);
}
509

510 511 512 513 514 515 516 517
UNINITIALIZED_TEST(AllocationObserver) {
  v8::Isolate::CreateParams create_params;
  create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
  v8::Isolate* isolate = v8::Isolate::New(create_params);
  {
    v8::Isolate::Scope isolate_scope(isolate);
    v8::HandleScope handle_scope(isolate);
    v8::Context::New(isolate)->Enter();
518

519
    Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
520

521 522 523 524 525
    testAllocationObserver<NewSpace>(i_isolate, i_isolate->heap()->new_space());
    // Old space is used but the code path is shared for all
    // classes inheriting from PagedSpace.
    testAllocationObserver<PagedSpace>(i_isolate,
                                       i_isolate->heap()->old_space());
526 527
    testAllocationObserver<OldLargeObjectSpace>(i_isolate,
                                                i_isolate->heap()->lo_space());
528 529 530
  }
  isolate->Dispose();
}
531

532 533 534 535 536 537 538 539 540 541 542
UNINITIALIZED_TEST(InlineAllocationObserverCadence) {
  v8::Isolate::CreateParams create_params;
  create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
  v8::Isolate* isolate = v8::Isolate::New(create_params);
  {
    v8::Isolate::Scope isolate_scope(isolate);
    v8::HandleScope handle_scope(isolate);
    v8::Context::New(isolate)->Enter();

    Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);

543 544
    // Clear out any pre-existing garbage to make the test consistent
    // across snapshot/no-snapshot builds.
545
    CcTest::CollectAllGarbage(i_isolate);
546

547 548 549
    NewSpace* new_space = i_isolate->heap()->new_space();

    Observer observer1(512);
550
    new_space->AddAllocationObserver(&observer1);
551
    Observer observer2(576);
552
    new_space->AddAllocationObserver(&observer2);
553 554 555 556 557

    for (int i = 0; i < 512; ++i) {
      AllocateUnaligned(new_space, 32);
    }

558 559
    new_space->RemoveAllocationObserver(&observer1);
    new_space->RemoveAllocationObserver(&observer2);
560

561 562
    CHECK_EQ(observer1.count(), 32);
    CHECK_EQ(observer2.count(), 28);
563 564 565 566
  }
  isolate->Dispose();
}

567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
HEAP_TEST(Regress777177) {
  CcTest::InitializeVM();
  Isolate* isolate = CcTest::i_isolate();
  Heap* heap = isolate->heap();
  HandleScope scope(isolate);
  PagedSpace* old_space = heap->old_space();
  Observer observer(128);
  old_space->AddAllocationObserver(&observer);

  int area_size = old_space->AreaSize();
  int max_object_size = kMaxRegularHeapObjectSize;
  int filler_size = area_size - max_object_size;

  {
    // Ensure a new linear allocation area on a fresh page.
582
    AlwaysAllocateScopeForTesting always_allocate(heap);
583 584
    heap::SimulateFullSpace(old_space);
    AllocationResult result = old_space->AllocateRaw(filler_size, kWordAligned);
585
    HeapObject obj = result.ToObjectChecked();
586 587
    heap->CreateFillerObjectAt(obj.address(), filler_size,
                               ClearRecordedSlots::kNo);
588 589 590 591 592 593 594
  }

  {
    // Allocate all bytes of the linear allocation area. This moves top_ and
    // top_on_previous_step_ to the next page.
    AllocationResult result =
        old_space->AllocateRaw(max_object_size, kWordAligned);
595
    HeapObject obj = result.ToObjectChecked();
596
    // Simulate allocation folding moving the top pointer back.
597
    old_space->SetTopAndLimit(obj.address(), old_space->limit());
598 599 600 601 602
  }

  {
    // This triggers assert in crbug.com/777177.
    AllocationResult result = old_space->AllocateRaw(filler_size, kWordAligned);
603
    HeapObject obj = result.ToObjectChecked();
604 605
    heap->CreateFillerObjectAt(obj.address(), filler_size,
                               ClearRecordedSlots::kNo);
606 607 608 609
  }
  old_space->RemoveAllocationObserver(&observer);
}

610 611 612 613 614 615 616 617 618 619 620 621
HEAP_TEST(Regress791582) {
  CcTest::InitializeVM();
  Isolate* isolate = CcTest::i_isolate();
  Heap* heap = isolate->heap();
  HandleScope scope(isolate);
  NewSpace* new_space = heap->new_space();
  if (new_space->TotalCapacity() < new_space->MaximumCapacity()) {
    new_space->Grow();
  }

  int until_page_end = static_cast<int>(new_space->limit() - new_space->top());

622
  if (!IsAligned(until_page_end, kTaggedSize)) {
623 624 625 626 627 628 629 630 631 632 633 634
    // The test works if the size of allocation area size is a multiple of
    // pointer size. This is usually the case unless some allocation observer
    // is already active (e.g. incremental marking observer).
    return;
  }

  Observer observer(128);
  new_space->AddAllocationObserver(&observer);

  {
    AllocationResult result =
        new_space->AllocateRaw(until_page_end, kWordAligned);
635
    HeapObject obj = result.ToObjectChecked();
636 637
    heap->CreateFillerObjectAt(obj.address(), until_page_end,
                               ClearRecordedSlots::kNo);
638
    // Simulate allocation folding moving the top pointer back.
639
    *new_space->allocation_top_address() = obj.address();
640 641 642 643 644
  }

  {
    // This triggers assert in crbug.com/791582
    AllocationResult result = new_space->AllocateRaw(256, kWordAligned);
645
    HeapObject obj = result.ToObjectChecked();
646
    heap->CreateFillerObjectAt(obj.address(), 256, ClearRecordedSlots::kNo);
647 648 649 650
  }
  new_space->RemoveAllocationObserver(&observer);
}

651
TEST(ShrinkPageToHighWaterMarkFreeSpaceEnd) {
652
  FLAG_stress_incremental_marking = false;
653 654 655 656 657 658 659 660
  CcTest::InitializeVM();
  Isolate* isolate = CcTest::i_isolate();
  HandleScope scope(isolate);

  heap::SealCurrentObjects(CcTest::heap());

  // Prepare page that only contains a single object and a trailing FreeSpace
  // filler.
661 662
  Handle<FixedArray> array =
      isolate->factory()->NewFixedArray(128, AllocationType::kOld);
663
  Page* page = Page::FromHeapObject(*array);
664 665

  // Reset space so high water mark is consistent.
666
  PagedSpace* old_space = CcTest::heap()->old_space();
667
  old_space->FreeLinearAllocationArea();
668
  old_space->ResetFreeList();
669

670
  HeapObject filler = HeapObject::FromAddress(array->address() + array->Size());
671
  CHECK(filler.IsFreeSpace());
672
  size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
673 674 675 676
  size_t should_have_shrunk = RoundDown(
      static_cast<size_t>(MemoryChunkLayout::AllocatableMemoryInDataPage() -
                          array->Size()),
      CommitPageSize());
677
  CHECK_EQ(should_have_shrunk, shrunk);
678 679 680 681 682 683 684 685 686 687 688 689
}

TEST(ShrinkPageToHighWaterMarkNoFiller) {
  CcTest::InitializeVM();
  Isolate* isolate = CcTest::i_isolate();
  HandleScope scope(isolate);
  heap::SealCurrentObjects(CcTest::heap());

  const int kFillerSize = 0;
  std::vector<Handle<FixedArray>> arrays =
      heap::FillOldSpacePageWithFixedArrays(CcTest::heap(), kFillerSize);
  Handle<FixedArray> array = arrays.back();
690
  Page* page = Page::FromHeapObject(*array);
691 692 693
  CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);

  // Reset space so high water mark and fillers are consistent.
694 695
  PagedSpace* old_space = CcTest::heap()->old_space();
  old_space->ResetFreeList();
696
  old_space->FreeLinearAllocationArea();
697

698
  size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
699
  CHECK_EQ(0u, shrunk);
700 701 702 703 704 705 706 707 708
}

TEST(ShrinkPageToHighWaterMarkOneWordFiller) {
  CcTest::InitializeVM();
  Isolate* isolate = CcTest::i_isolate();
  HandleScope scope(isolate);

  heap::SealCurrentObjects(CcTest::heap());

709
  const int kFillerSize = kTaggedSize;
710 711 712
  std::vector<Handle<FixedArray>> arrays =
      heap::FillOldSpacePageWithFixedArrays(CcTest::heap(), kFillerSize);
  Handle<FixedArray> array = arrays.back();
713
  Page* page = Page::FromHeapObject(*array);
714 715 716
  CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);

  // Reset space so high water mark and fillers are consistent.
717
  PagedSpace* old_space = CcTest::heap()->old_space();
718
  old_space->FreeLinearAllocationArea();
719
  old_space->ResetFreeList();
720

721
  HeapObject filler = HeapObject::FromAddress(array->address() + array->Size());
722
  CHECK_EQ(filler.map(),
723
           ReadOnlyRoots(CcTest::heap()).one_pointer_filler_map());
724

725
  size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
726
  CHECK_EQ(0u, shrunk);
727 728 729 730 731 732 733 734 735
}

TEST(ShrinkPageToHighWaterMarkTwoWordFiller) {
  CcTest::InitializeVM();
  Isolate* isolate = CcTest::i_isolate();
  HandleScope scope(isolate);

  heap::SealCurrentObjects(CcTest::heap());

736
  const int kFillerSize = 2 * kTaggedSize;
737 738 739
  std::vector<Handle<FixedArray>> arrays =
      heap::FillOldSpacePageWithFixedArrays(CcTest::heap(), kFillerSize);
  Handle<FixedArray> array = arrays.back();
740
  Page* page = Page::FromHeapObject(*array);
741 742 743
  CHECK_EQ(page->area_end(), array->address() + array->Size() + kFillerSize);

  // Reset space so high water mark and fillers are consistent.
744
  PagedSpace* old_space = CcTest::heap()->old_space();
745
  old_space->FreeLinearAllocationArea();
746
  old_space->ResetFreeList();
747

748
  HeapObject filler = HeapObject::FromAddress(array->address() + array->Size());
749
  CHECK_EQ(filler.map(),
750
           ReadOnlyRoots(CcTest::heap()).two_pointer_filler_map());
751

752
  size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
753
  CHECK_EQ(0u, shrunk);
754 755
}

756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
namespace {
// PageAllocator that always fails.
class FailingPageAllocator : public v8::PageAllocator {
 public:
  size_t AllocatePageSize() override { return 1024; }
  size_t CommitPageSize() override { return 1024; }
  void SetRandomMmapSeed(int64_t seed) override {}
  void* GetRandomMmapAddr() override { return nullptr; }
  void* AllocatePages(void* address, size_t length, size_t alignment,
                      Permission permissions) override {
    return nullptr;
  }
  bool FreePages(void* address, size_t length) override { return false; }
  bool ReleasePages(void* address, size_t length, size_t new_length) override {
    return false;
  }
  bool SetPermissions(void* address, size_t length,
                      Permission permissions) override {
    return false;
  }
};
}  // namespace

TEST(NoMemoryForNewPage) {
  Isolate* isolate = CcTest::i_isolate();
  Heap* heap = isolate->heap();

  // Memory allocator that will fail to allocate any pages.
  FailingPageAllocator failing_allocator;
  TestMemoryAllocatorScope test_allocator_scope(isolate, 0, 0,
                                                &failing_allocator);
  MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
  OldSpace faked_space(heap);
  Page* page = memory_allocator->AllocatePage(
      faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
      NOT_EXECUTABLE);

  CHECK_NULL(page);
}

796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820
TEST(ReadOnlySpaceMetrics_OnePage) {
  Isolate* isolate = CcTest::i_isolate();
  Heap* heap = isolate->heap();

  // Create a read-only space and allocate some memory, shrink the pages and
  // check the allocated object size is as expected.

  ReadOnlySpace faked_space(heap);

  // Initially no memory.
  CHECK_EQ(faked_space.Size(), 0);
  CHECK_EQ(faked_space.Capacity(), 0);
  CHECK_EQ(faked_space.CommittedMemory(), 0);
  CHECK_EQ(faked_space.CommittedPhysicalMemory(), 0);

  faked_space.AllocateRaw(16, kWordAligned);

  faked_space.ShrinkPages();
  faked_space.Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);

  MemoryAllocator* allocator = heap->memory_allocator();

  // Allocated objects size.
  CHECK_EQ(faked_space.Size(), 16);

821 822 823 824
  size_t committed_memory = RoundUp(
      MemoryChunkLayout::ObjectStartOffsetInDataPage() + faked_space.Size(),
      allocator->GetCommitPageSize());

825
  // Amount of OS allocated memory.
826 827
  CHECK_EQ(faked_space.CommittedMemory(), committed_memory);
  CHECK_EQ(faked_space.CommittedPhysicalMemory(), committed_memory);
828

829 830
  // Capacity will be one OS page minus the page header.
  CHECK_EQ(faked_space.Capacity(),
831
           committed_memory - MemoryChunkLayout::ObjectStartOffsetInDataPage());
832
}
833

834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875
TEST(ReadOnlySpaceMetrics_AlignedAllocations) {
  Isolate* isolate = CcTest::i_isolate();
  Heap* heap = isolate->heap();

  // Create a read-only space and allocate some memory, shrink the pages and
  // check the allocated object size is as expected.

  ReadOnlySpace faked_space(heap);

  // Initially no memory.
  CHECK_EQ(faked_space.Size(), 0);
  CHECK_EQ(faked_space.Capacity(), 0);
  CHECK_EQ(faked_space.CommittedMemory(), 0);
  CHECK_EQ(faked_space.CommittedPhysicalMemory(), 0);

  MemoryAllocator* allocator = heap->memory_allocator();
  // Allocate an object just under an OS page in size.
  int object_size =
      static_cast<int>(allocator->GetCommitPageSize() - kApiTaggedSize);

// TODO(v8:8875): Pointer compression does not enable aligned memory allocation
// yet.
#ifdef V8_COMPRESS_POINTERS
  int alignment = kInt32Size;
#else
  int alignment = kDoubleSize;
#endif

  HeapObject object =
      faked_space.AllocateRaw(object_size, kDoubleAligned).ToObjectChecked();
  CHECK_EQ(object.address() % alignment, 0);
  object =
      faked_space.AllocateRaw(object_size, kDoubleAligned).ToObjectChecked();
  CHECK_EQ(object.address() % alignment, 0);

  faked_space.ShrinkPages();
  faked_space.Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);

  // Allocated objects size may will contain 4 bytes of padding on 32-bit or
  // with pointer compression.
  CHECK_EQ(faked_space.Size(), object_size + RoundUp(object_size, alignment));

876 877 878 879 880 881
  size_t committed_memory = RoundUp(
      MemoryChunkLayout::ObjectStartOffsetInDataPage() + faked_space.Size(),
      allocator->GetCommitPageSize());

  CHECK_EQ(faked_space.CommittedMemory(), committed_memory);
  CHECK_EQ(faked_space.CommittedPhysicalMemory(), committed_memory);
882 883 884

  // Capacity will be 3 OS pages minus the page header.
  CHECK_EQ(faked_space.Capacity(),
885
           committed_memory - MemoryChunkLayout::ObjectStartOffsetInDataPage());
886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
}

TEST(ReadOnlySpaceMetrics_TwoPages) {
  Isolate* isolate = CcTest::i_isolate();
  Heap* heap = isolate->heap();

  // Create a read-only space and allocate some memory, shrink the pages and
  // check the allocated object size is as expected.

  ReadOnlySpace faked_space(heap);

  // Initially no memory.
  CHECK_EQ(faked_space.Size(), 0);
  CHECK_EQ(faked_space.Capacity(), 0);
  CHECK_EQ(faked_space.CommittedMemory(), 0);
  CHECK_EQ(faked_space.CommittedPhysicalMemory(), 0);

  MemoryAllocator* allocator = heap->memory_allocator();

  // Allocate an object that's too big to have more than one on a page.
906

907 908 909 910
  int object_size = RoundUp(
      static_cast<int>(
          MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE) / 2 + 16),
      kTaggedSize);
911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940
  CHECK_GT(object_size * 2,
           MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE));
  faked_space.AllocateRaw(object_size, kWordAligned);

  // Then allocate another so it expands the space to two pages.
  faked_space.AllocateRaw(object_size, kWordAligned);

  faked_space.ShrinkPages();
  faked_space.Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);

  // Allocated objects size.
  CHECK_EQ(faked_space.Size(), object_size * 2);

  // Amount of OS allocated memory.
  size_t committed_memory_per_page =
      RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage() + object_size,
              allocator->GetCommitPageSize());
  CHECK_EQ(faked_space.CommittedMemory(), 2 * committed_memory_per_page);
  CHECK_EQ(faked_space.CommittedPhysicalMemory(),
           2 * committed_memory_per_page);

  // Capacity will be the space up to the amount of committed memory minus the
  // page headers.
  size_t capacity_per_page =
      RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage() + object_size,
              allocator->GetCommitPageSize()) -
      MemoryChunkLayout::ObjectStartOffsetInDataPage();
  CHECK_EQ(faked_space.Capacity(), 2 * capacity_per_page);
}

941
}  // namespace heap
942 943
}  // namespace internal
}  // namespace v8