test-compaction.cc 18 KB
Newer Older
1 2 3 4
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5
#include "src/execution/isolate.h"
6
#include "src/heap/factory.h"
7
#include "src/heap/heap-inl.h"
8
#include "src/heap/mark-compact.h"
9
#include "src/heap/memory-chunk.h"
10
#include "src/heap/remembered-set-inl.h"
11
#include "src/objects/objects-inl.h"
12 13
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
14
#include "test/cctest/heap/heap-utils.h"
15 16 17

namespace v8 {
namespace internal {
18
namespace heap {
19

20 21 22
namespace {

void CheckInvariantsOfAbortedPage(Page* page) {
23 24 25 26
  // Check invariants:
  // 1) Markbits are cleared
  // 2) The page is not marked as evacuation candidate anymore
  // 3) The page is not marked as aborted compaction anymore.
27 28
  CHECK(page->heap()
            ->mark_compact_collector()
29
            ->non_atomic_marking_state()
30 31
            ->bitmap(page)
            ->IsClean());
32 33 34 35
  CHECK(!page->IsEvacuationCandidate());
  CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
}

36 37
void CheckAllObjectsOnPage(const std::vector<Handle<FixedArray>>& handles,
                           Page* page) {
38 39
  for (Handle<FixedArray> fixed_array : handles) {
    CHECK(Page::FromHeapObject(*fixed_array) == page);
40 41 42 43
  }
}

}  // namespace
44 45

HEAP_TEST(CompactionFullAbortedPage) {
46
  if (FLAG_never_compact) return;
47 48 49 50 51
  // Test the scenario where we reach OOM during compaction and the whole page
  // is aborted.

  // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
  // we can reach the state of a half aborted page.
52
  ManualGCScope manual_gc_scope;
53 54 55 56 57 58
  FLAG_manual_evacuation_candidates_selection = true;
  CcTest::InitializeVM();
  Isolate* isolate = CcTest::i_isolate();
  Heap* heap = isolate->heap();
  {
    HandleScope scope1(isolate);
59 60

    heap::SealCurrentObjects(heap);
61 62 63 64

    {
      HandleScope scope2(isolate);
      CHECK(heap->old_space()->Expand());
65 66 67
      auto compaction_page_handles = heap::CreatePadding(
          heap,
          static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
68
          AllocationType::kOld);
69
      Page* to_be_aborted_page =
70
          Page::FromHeapObject(*compaction_page_handles.front());
71 72
      to_be_aborted_page->SetFlag(
          MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
73
      CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
74 75

      heap->set_force_oom(true);
76
      CcTest::CollectAllGarbage();
77
      heap->mark_compact_collector()->EnsureSweepingCompleted();
78 79 80 81

      // Check that all handles still point to the same page, i.e., compaction
      // has been aborted on the page.
      for (Handle<FixedArray> object : compaction_page_handles) {
82
        CHECK_EQ(to_be_aborted_page, Page::FromHeapObject(*object));
83 84 85 86 87 88
      }
      CheckInvariantsOfAbortedPage(to_be_aborted_page);
    }
  }
}

89 90 91 92 93 94 95 96 97 98 99 100
namespace {

int GetObjectSize(int objects_per_page) {
  int allocatable =
      static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage());
  // Make sure that object_size is a multiple of kTaggedSize.
  int object_size =
      ((allocatable / kTaggedSize) / objects_per_page) * kTaggedSize;
  return Min(kMaxRegularHeapObjectSize, object_size);
}

}  // namespace
101 102

HEAP_TEST(CompactionPartiallyAbortedPage) {
103
  if (FLAG_never_compact) return;
104 105 106 107 108
  // Test the scenario where we reach OOM during compaction and parts of the
  // page have already been migrated to a new one.

  // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
  // we can reach the state of a half aborted page.
109
  ManualGCScope manual_gc_scope;
110 111
  FLAG_manual_evacuation_candidates_selection = true;

112
  const int objects_per_page = 10;
113
  const int object_size = GetObjectSize(objects_per_page);
114 115 116 117 118 119

  CcTest::InitializeVM();
  Isolate* isolate = CcTest::i_isolate();
  Heap* heap = isolate->heap();
  {
    HandleScope scope1(isolate);
120 121

    heap::SealCurrentObjects(heap);
122 123 124 125 126 127

    {
      HandleScope scope2(isolate);
      // Fill another page with objects of size {object_size} (last one is
      // properly adjusted).
      CHECK(heap->old_space()->Expand());
128
      auto compaction_page_handles = heap::CreatePadding(
129 130
          heap,
          static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
131
          AllocationType::kOld, object_size);
132
      Page* to_be_aborted_page =
133
          Page::FromHeapObject(*compaction_page_handles.front());
134 135
      to_be_aborted_page->SetFlag(
          MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
136
      CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
137 138 139 140 141 142 143

      {
        // Add another page that is filled with {num_objects} objects of size
        // {object_size}.
        HandleScope scope3(isolate);
        CHECK(heap->old_space()->Expand());
        const int num_objects = 3;
144
        std::vector<Handle<FixedArray>> page_to_fill_handles =
145 146
            heap::CreatePadding(heap, object_size * num_objects,
                                AllocationType::kOld, object_size);
147 148 149 150
        Page* page_to_fill =
            Page::FromAddress(page_to_fill_handles.front()->address());

        heap->set_force_oom(true);
151
        CcTest::CollectAllGarbage();
152
        heap->mark_compact_collector()->EnsureSweepingCompleted();
153 154 155 156 157 158

        bool migration_aborted = false;
        for (Handle<FixedArray> object : compaction_page_handles) {
          // Once compaction has been aborted, all following objects still have
          // to be on the initial page.
          CHECK(!migration_aborted ||
159 160
                (Page::FromHeapObject(*object) == to_be_aborted_page));
          if (Page::FromHeapObject(*object) == to_be_aborted_page) {
161 162 163
            // This object has not been migrated.
            migration_aborted = true;
          } else {
164
            CHECK_EQ(Page::FromHeapObject(*object), page_to_fill);
165 166 167 168 169 170 171 172 173 174 175
          }
        }
        // Check that we actually created a scenario with a partially aborted
        // page.
        CHECK(migration_aborted);
        CheckInvariantsOfAbortedPage(to_be_aborted_page);
      }
    }
  }
}

176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
HEAP_TEST(CompactionPartiallyAbortedPageWithInvalidatedSlots) {
  if (FLAG_never_compact) return;
  // Test evacuating a page partially when it contains recorded
  // slots and invalidated objects.

  // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
  // we can reach the state of a half aborted page.
  ManualGCScope manual_gc_scope;
  FLAG_manual_evacuation_candidates_selection = true;

  const int objects_per_page = 10;
  const int object_size = GetObjectSize(objects_per_page);

  CcTest::InitializeVM();
  Isolate* isolate = CcTest::i_isolate();
  Heap* heap = isolate->heap();
  {
    HandleScope scope1(isolate);

    heap::SealCurrentObjects(heap);

    {
      HandleScope scope2(isolate);
      // Fill another page with objects of size {object_size} (last one is
      // properly adjusted).
      CHECK(heap->old_space()->Expand());
      auto compaction_page_handles = heap::CreatePadding(
          heap,
          static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
          AllocationType::kOld, object_size);
      Page* to_be_aborted_page =
          Page::FromHeapObject(*compaction_page_handles.front());
      for (Handle<FixedArray> object : compaction_page_handles) {
        CHECK_EQ(Page::FromHeapObject(*object), to_be_aborted_page);

        for (int i = 0; i < object->length(); i++) {
          RememberedSet<OLD_TO_NEW>::Insert<AccessMode::ATOMIC>(
              to_be_aborted_page, object->RawFieldOfElementAt(i).address());
        }
      }
      // First object is going to be evacuated.
      to_be_aborted_page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(
          *compaction_page_handles.front());
      // Last object is NOT going to be evacuated.
      // This happens since not all objects fit on the only other page in the
      // old space, the GC isn't allowed to allocate another page.
      to_be_aborted_page->RegisterObjectWithInvalidatedSlots<OLD_TO_NEW>(
          *compaction_page_handles.back());
      to_be_aborted_page->SetFlag(
          MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);

      {
        // Add another page that is filled with {num_objects} objects of size
        // {object_size}.
        HandleScope scope3(isolate);
        CHECK(heap->old_space()->Expand());
        const int num_objects = 3;
        std::vector<Handle<FixedArray>> page_to_fill_handles =
            heap::CreatePadding(heap, object_size * num_objects,
                                AllocationType::kOld, object_size);
        Page* page_to_fill =
            Page::FromAddress(page_to_fill_handles.front()->address());

        heap->set_force_oom(true);
        CcTest::CollectAllGarbage();
        heap->mark_compact_collector()->EnsureSweepingCompleted();

        CHECK_EQ(Page::FromHeapObject(*compaction_page_handles.front()),
                 page_to_fill);
        CHECK_EQ(Page::FromHeapObject(*compaction_page_handles.back()),
                 to_be_aborted_page);
      }
    }
  }
}
251 252

HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
253
  if (FLAG_never_compact) return;
254 255 256 257 258 259 260
  // Test the scenario where we reach OOM during compaction and parts of the
  // page have already been migrated to a new one. Objects on the aborted page
  // are linked together. This test makes sure that intra-aborted page pointers
  // get properly updated.

  // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
  // we can reach the state of a half aborted page.
261
  ManualGCScope manual_gc_scope;
262 263
  FLAG_manual_evacuation_candidates_selection = true;

264
  const int objects_per_page = 10;
265
  const int object_size = GetObjectSize(objects_per_page);
266 267 268 269 270 271 272

  CcTest::InitializeVM();
  Isolate* isolate = CcTest::i_isolate();
  Heap* heap = isolate->heap();
  {
    HandleScope scope1(isolate);
    Handle<FixedArray> root_array =
273
        isolate->factory()->NewFixedArray(10, AllocationType::kOld);
274

275
    heap::SealCurrentObjects(heap);
276 277 278 279 280 281 282 283

    Page* to_be_aborted_page = nullptr;
    {
      HandleScope temporary_scope(isolate);
      // Fill a fresh page with objects of size {object_size} (last one is
      // properly adjusted).
      CHECK(heap->old_space()->Expand());
      std::vector<Handle<FixedArray>> compaction_page_handles =
284 285 286 287
          heap::CreatePadding(
              heap,
              static_cast<int>(
                  MemoryChunkLayout::AllocatableMemoryInDataPage()),
288
              AllocationType::kOld, object_size);
289
      to_be_aborted_page =
290
          Page::FromHeapObject(*compaction_page_handles.front());
291 292 293 294 295 296
      to_be_aborted_page->SetFlag(
          MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
      for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
        compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
      }
      root_array->set(0, *compaction_page_handles.back());
297
      CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
298 299 300 301 302 303 304 305 306
    }
    {
      // Add another page that is filled with {num_objects} objects of size
      // {object_size}.
      HandleScope scope3(isolate);
      CHECK(heap->old_space()->Expand());
      const int num_objects = 2;
      int used_memory = object_size * num_objects;
      std::vector<Handle<FixedArray>> page_to_fill_handles =
307 308
          heap::CreatePadding(heap, used_memory, AllocationType::kOld,
                              object_size);
309
      Page* page_to_fill = Page::FromHeapObject(*page_to_fill_handles.front());
310 311

      heap->set_force_oom(true);
312
      CcTest::CollectAllGarbage();
313
      heap->mark_compact_collector()->EnsureSweepingCompleted();
314 315 316 317 318

      // The following check makes sure that we compacted "some" objects, while
      // leaving others in place.
      bool in_place = true;
      Handle<FixedArray> current = root_array;
319
      while (current->get(0) != ReadOnlyRoots(heap).undefined_value()) {
320 321
        current =
            Handle<FixedArray>(FixedArray::cast(current->get(0)), isolate);
322
        CHECK(current->IsFixedArray());
323
        if (Page::FromHeapObject(*current) != to_be_aborted_page) {
324 325 326
          in_place = false;
        }
        bool on_aborted_page =
327 328
            Page::FromHeapObject(*current) == to_be_aborted_page;
        bool on_fill_page = Page::FromHeapObject(*current) == page_to_fill;
329 330 331 332 333 334 335 336 337 338
        CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
      }
      // Check that we at least migrated one object, as otherwise the test would
      // not trigger.
      CHECK(!in_place);
      CheckInvariantsOfAbortedPage(to_be_aborted_page);
    }
  }
}

339 340
HEAP_TEST(CompactionPartiallyAbortedPageWithRememberedSetEntries) {
  if (FLAG_never_compact || FLAG_always_promote_young_mc) return;
341 342 343
  // Test the scenario where we reach OOM during compaction and parts of the
  // page have already been migrated to a new one. Objects on the aborted page
  // are linked together and the very first object on the aborted page points
344
  // into new space. The test verifies that the remembered set entries are
345 346 347 348 349 350
  // properly cleared and rebuilt after aborting a page. Failing to do so can
  // result in other objects being allocated in the free space where their
  // payload looks like a valid new space pointer.

  // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
  // we can reach the state of a half aborted page.
351
  ManualGCScope manual_gc_scope;
352 353
  FLAG_manual_evacuation_candidates_selection = true;

354
  const int objects_per_page = 10;
355
  const int object_size = GetObjectSize(objects_per_page);
356 357 358 359 360 361 362

  CcTest::InitializeVM();
  Isolate* isolate = CcTest::i_isolate();
  Heap* heap = isolate->heap();
  {
    HandleScope scope1(isolate);
    Handle<FixedArray> root_array =
363
        isolate->factory()->NewFixedArray(10, AllocationType::kOld);
364
    heap::SealCurrentObjects(heap);
365 366 367 368 369 370 371

    Page* to_be_aborted_page = nullptr;
    {
      HandleScope temporary_scope(isolate);
      // Fill another page with objects of size {object_size} (last one is
      // properly adjusted).
      CHECK(heap->old_space()->Expand());
372
      auto compaction_page_handles = heap::CreatePadding(
373 374
          heap,
          static_cast<int>(MemoryChunkLayout::AllocatableMemoryInDataPage()),
375
          AllocationType::kOld, object_size);
376 377 378
      // Sanity check that we have enough space for linking up arrays.
      CHECK_GE(compaction_page_handles.front()->length(), 2);
      to_be_aborted_page =
379
          Page::FromHeapObject(*compaction_page_handles.front());
380 381 382 383 384 385 386 387
      to_be_aborted_page->SetFlag(
          MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);

      for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
        compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
      }
      root_array->set(0, *compaction_page_handles.back());
      Handle<FixedArray> new_space_array =
388
          isolate->factory()->NewFixedArray(1, AllocationType::kYoung);
389
      CHECK(Heap::InYoungGeneration(*new_space_array));
390
      compaction_page_handles.front()->set(1, *new_space_array);
391
      CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
392 393 394 395 396 397 398 399 400 401
    }

    {
      // Add another page that is filled with {num_objects} objects of size
      // {object_size}.
      HandleScope scope3(isolate);
      CHECK(heap->old_space()->Expand());
      const int num_objects = 2;
      int used_memory = object_size * num_objects;
      std::vector<Handle<FixedArray>> page_to_fill_handles =
402 403
          heap::CreatePadding(heap, used_memory, AllocationType::kOld,
                              object_size);
404
      Page* page_to_fill = Page::FromHeapObject(*page_to_fill_handles.front());
405 406

      heap->set_force_oom(true);
407
      CcTest::CollectAllGarbage();
408
      heap->mark_compact_collector()->EnsureSweepingCompleted();
409 410 411 412 413

      // The following check makes sure that we compacted "some" objects, while
      // leaving others in place.
      bool in_place = true;
      Handle<FixedArray> current = root_array;
414
      while (current->get(0) != ReadOnlyRoots(heap).undefined_value()) {
415 416
        current =
            Handle<FixedArray>(FixedArray::cast(current->get(0)), isolate);
417
        CHECK(!Heap::InYoungGeneration(*current));
418
        CHECK(current->IsFixedArray());
419
        if (Page::FromHeapObject(*current) != to_be_aborted_page) {
420 421 422
          in_place = false;
        }
        bool on_aborted_page =
423 424
            Page::FromHeapObject(*current) == to_be_aborted_page;
        bool on_fill_page = Page::FromHeapObject(*current) == page_to_fill;
425 426 427 428 429 430 431 432 433
        CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
      }
      // Check that we at least migrated one object, as otherwise the test would
      // not trigger.
      CHECK(!in_place);
      CheckInvariantsOfAbortedPage(to_be_aborted_page);

      // Allocate a new object in new space.
      Handle<FixedArray> holder =
434
          isolate->factory()->NewFixedArray(10, AllocationType::kYoung);
435 436
      // Create a broken address that looks like a tagged pointer to a new space
      // object.
437
      Address broken_address = holder->address() + 2 * kTaggedSize + 1;
438 439
      // Convert it to a vector to create a string from it.
      Vector<const uint8_t> string_to_broken_addresss(
440
          reinterpret_cast<const uint8_t*>(&broken_address), kTaggedSize);
441 442 443 444 445 446 447 448 449

      Handle<String> string;
      do {
        // We know that the interesting slot will be on the aborted page and
        // hence we allocate until we get our string on the aborted page.
        // We used slot 1 in the fixed size array which corresponds to the
        // the first word in the string. Since the first object definitely
        // migrated we can just allocate until we hit the aborted page.
        string = isolate->factory()
450 451
                     ->NewStringFromOneByte(string_to_broken_addresss,
                                            AllocationType::kOld)
452
                     .ToHandleChecked();
453
      } while (Page::FromHeapObject(*string) != to_be_aborted_page);
454

455
      // If remembered set entries are not properly filtered/reset for aborted
456 457
      // pages we have now a broken address at an object slot in old space and
      // the following scavenge will crash.
458
      CcTest::CollectGarbage(NEW_SPACE);
459 460 461 462
    }
  }
}

463
}  // namespace heap
464 465
}  // namespace internal
}  // namespace v8