test-compaction.cc 14.7 KB
Newer Older
1 2 3 4
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5 6 7 8 9 10 11
#include "src/factory.h"
#include "src/heap/mark-compact.h"
#include "src/isolate.h"
// FIXME(mstarzinger, marja): This is weird, but required because of the missing
// (disallowed) include: src/factory.h -> src/objects-inl.h
#include "src/objects-inl.h"
// FIXME(mstarzinger, marja): This is weird, but required because of the missing
12 13 14
// (disallowed) include: src/feedback-vector.h ->
// src/feedback-vector-inl.h
#include "src/feedback-vector-inl.h"
15 16
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
17
#include "test/cctest/heap/heap-utils.h"
18 19 20

namespace v8 {
namespace internal {
21
namespace heap {
22

23 24 25
namespace {

void CheckInvariantsOfAbortedPage(Page* page) {
26 27 28 29
  // Check invariants:
  // 1) Markbits are cleared
  // 2) The page is not marked as evacuation candidate anymore
  // 3) The page is not marked as aborted compaction anymore.
30 31
  CHECK(page->heap()
            ->mark_compact_collector()
32
            ->non_atomic_marking_state()
33 34
            ->bitmap(page)
            ->IsClean());
35 36 37 38
  CHECK(!page->IsEvacuationCandidate());
  CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
}

39 40 41 42 43 44 45 46
void CheckAllObjectsOnPage(std::vector<Handle<FixedArray>>& handles,
                           Page* page) {
  for (auto& fixed_array : handles) {
    CHECK(Page::FromAddress(fixed_array->address()) == page);
  }
}

}  // namespace
47 48

HEAP_TEST(CompactionFullAbortedPage) {
49
  if (FLAG_never_compact) return;
50 51 52 53 54 55
  // Test the scenario where we reach OOM during compaction and the whole page
  // is aborted.

  // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
  // we can reach the state of a half aborted page.
  FLAG_concurrent_sweeping = false;
56
  FLAG_concurrent_marking = false;
57
  FLAG_stress_incremental_marking = false;
58 59 60 61 62 63
  FLAG_manual_evacuation_candidates_selection = true;
  CcTest::InitializeVM();
  Isolate* isolate = CcTest::i_isolate();
  Heap* heap = isolate->heap();
  {
    HandleScope scope1(isolate);
64 65

    heap::SealCurrentObjects(heap);
66 67 68 69 70

    {
      HandleScope scope2(isolate);
      CHECK(heap->old_space()->Expand());
      auto compaction_page_handles =
71
          heap::CreatePadding(heap, Page::kAllocatableMemory, TENURED);
72 73 74 75
      Page* to_be_aborted_page =
          Page::FromAddress(compaction_page_handles.front()->address());
      to_be_aborted_page->SetFlag(
          MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
76
      CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
77 78

      heap->set_force_oom(true);
79
      CcTest::CollectAllGarbage();
80
      heap->mark_compact_collector()->EnsureSweepingCompleted();
81 82 83 84 85 86 87 88 89 90 91 92 93

      // Check that all handles still point to the same page, i.e., compaction
      // has been aborted on the page.
      for (Handle<FixedArray> object : compaction_page_handles) {
        CHECK_EQ(to_be_aborted_page, Page::FromAddress(object->address()));
      }
      CheckInvariantsOfAbortedPage(to_be_aborted_page);
    }
  }
}


HEAP_TEST(CompactionPartiallyAbortedPage) {
94
  if (FLAG_never_compact) return;
95 96 97 98 99 100
  // Test the scenario where we reach OOM during compaction and parts of the
  // page have already been migrated to a new one.

  // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
  // we can reach the state of a half aborted page.
  FLAG_concurrent_sweeping = false;
101
  FLAG_concurrent_marking = false;
102
  FLAG_stress_incremental_marking = false;
103 104
  FLAG_manual_evacuation_candidates_selection = true;

105 106
  const int objects_per_page = 10;
  const int object_size = Page::kAllocatableMemory / objects_per_page;
107 108 109 110 111 112

  CcTest::InitializeVM();
  Isolate* isolate = CcTest::i_isolate();
  Heap* heap = isolate->heap();
  {
    HandleScope scope1(isolate);
113 114

    heap::SealCurrentObjects(heap);
115 116 117 118 119 120

    {
      HandleScope scope2(isolate);
      // Fill another page with objects of size {object_size} (last one is
      // properly adjusted).
      CHECK(heap->old_space()->Expand());
121 122
      auto compaction_page_handles = heap::CreatePadding(
          heap, Page::kAllocatableMemory, TENURED, object_size);
123 124 125 126
      Page* to_be_aborted_page =
          Page::FromAddress(compaction_page_handles.front()->address());
      to_be_aborted_page->SetFlag(
          MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
127
      CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
128 129 130 131 132 133 134

      {
        // Add another page that is filled with {num_objects} objects of size
        // {object_size}.
        HandleScope scope3(isolate);
        CHECK(heap->old_space()->Expand());
        const int num_objects = 3;
135 136 137
        std::vector<Handle<FixedArray>> page_to_fill_handles =
            heap::CreatePadding(heap, object_size * num_objects, TENURED,
                                object_size);
138 139 140 141
        Page* page_to_fill =
            Page::FromAddress(page_to_fill_handles.front()->address());

        heap->set_force_oom(true);
142
        CcTest::CollectAllGarbage();
143
        heap->mark_compact_collector()->EnsureSweepingCompleted();
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168

        bool migration_aborted = false;
        for (Handle<FixedArray> object : compaction_page_handles) {
          // Once compaction has been aborted, all following objects still have
          // to be on the initial page.
          CHECK(!migration_aborted ||
                (Page::FromAddress(object->address()) == to_be_aborted_page));
          if (Page::FromAddress(object->address()) == to_be_aborted_page) {
            // This object has not been migrated.
            migration_aborted = true;
          } else {
            CHECK_EQ(Page::FromAddress(object->address()), page_to_fill);
          }
        }
        // Check that we actually created a scenario with a partially aborted
        // page.
        CHECK(migration_aborted);
        CheckInvariantsOfAbortedPage(to_be_aborted_page);
      }
    }
  }
}


HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
169
  if (FLAG_never_compact) return;
170 171 172 173 174 175 176 177
  // Test the scenario where we reach OOM during compaction and parts of the
  // page have already been migrated to a new one. Objects on the aborted page
  // are linked together. This test makes sure that intra-aborted page pointers
  // get properly updated.

  // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
  // we can reach the state of a half aborted page.
  FLAG_concurrent_sweeping = false;
178
  FLAG_concurrent_marking = false;
179
  FLAG_stress_incremental_marking = false;
180 181
  FLAG_manual_evacuation_candidates_selection = true;

182 183
  const int objects_per_page = 10;
  const int object_size = Page::kAllocatableMemory / objects_per_page;
184 185 186 187 188 189 190 191 192

  CcTest::InitializeVM();
  Isolate* isolate = CcTest::i_isolate();
  Heap* heap = isolate->heap();
  {
    HandleScope scope1(isolate);
    Handle<FixedArray> root_array =
        isolate->factory()->NewFixedArray(10, TENURED);

193
    heap::SealCurrentObjects(heap);
194 195 196 197 198 199 200 201

    Page* to_be_aborted_page = nullptr;
    {
      HandleScope temporary_scope(isolate);
      // Fill a fresh page with objects of size {object_size} (last one is
      // properly adjusted).
      CHECK(heap->old_space()->Expand());
      std::vector<Handle<FixedArray>> compaction_page_handles =
202 203
          heap::CreatePadding(heap, Page::kAllocatableMemory, TENURED,
                              object_size);
204 205 206 207 208 209 210 211
      to_be_aborted_page =
          Page::FromAddress(compaction_page_handles.front()->address());
      to_be_aborted_page->SetFlag(
          MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
      for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
        compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
      }
      root_array->set(0, *compaction_page_handles.back());
212
      CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
213 214 215 216 217 218 219 220 221
    }
    {
      // Add another page that is filled with {num_objects} objects of size
      // {object_size}.
      HandleScope scope3(isolate);
      CHECK(heap->old_space()->Expand());
      const int num_objects = 2;
      int used_memory = object_size * num_objects;
      std::vector<Handle<FixedArray>> page_to_fill_handles =
222
          heap::CreatePadding(heap, used_memory, TENURED, object_size);
223 224 225 226
      Page* page_to_fill =
          Page::FromAddress(page_to_fill_handles.front()->address());

      heap->set_force_oom(true);
227
      CcTest::CollectAllGarbage();
228
      heap->mark_compact_collector()->EnsureSweepingCompleted();
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255

      // The following check makes sure that we compacted "some" objects, while
      // leaving others in place.
      bool in_place = true;
      Handle<FixedArray> current = root_array;
      while (current->get(0) != heap->undefined_value()) {
        current = Handle<FixedArray>(FixedArray::cast(current->get(0)));
        CHECK(current->IsFixedArray());
        if (Page::FromAddress(current->address()) != to_be_aborted_page) {
          in_place = false;
        }
        bool on_aborted_page =
            Page::FromAddress(current->address()) == to_be_aborted_page;
        bool on_fill_page =
            Page::FromAddress(current->address()) == page_to_fill;
        CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
      }
      // Check that we at least migrated one object, as otherwise the test would
      // not trigger.
      CHECK(!in_place);
      CheckInvariantsOfAbortedPage(to_be_aborted_page);
    }
  }
}


HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
256
  if (FLAG_never_compact) return;
257 258 259 260 261 262 263 264 265 266 267
  // Test the scenario where we reach OOM during compaction and parts of the
  // page have already been migrated to a new one. Objects on the aborted page
  // are linked together and the very first object on the aborted page points
  // into new space. The test verifies that the store buffer entries are
  // properly cleared and rebuilt after aborting a page. Failing to do so can
  // result in other objects being allocated in the free space where their
  // payload looks like a valid new space pointer.

  // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
  // we can reach the state of a half aborted page.
  FLAG_concurrent_sweeping = false;
268
  FLAG_concurrent_marking = false;
269
  FLAG_stress_incremental_marking = false;
270 271
  FLAG_manual_evacuation_candidates_selection = true;

272 273
  const int objects_per_page = 10;
  const int object_size = Page::kAllocatableMemory / objects_per_page;
274 275 276 277 278 279 280 281

  CcTest::InitializeVM();
  Isolate* isolate = CcTest::i_isolate();
  Heap* heap = isolate->heap();
  {
    HandleScope scope1(isolate);
    Handle<FixedArray> root_array =
        isolate->factory()->NewFixedArray(10, TENURED);
282
    heap::SealCurrentObjects(heap);
283 284 285 286 287 288 289

    Page* to_be_aborted_page = nullptr;
    {
      HandleScope temporary_scope(isolate);
      // Fill another page with objects of size {object_size} (last one is
      // properly adjusted).
      CHECK(heap->old_space()->Expand());
290 291
      auto compaction_page_handles = heap::CreatePadding(
          heap, Page::kAllocatableMemory, TENURED, object_size);
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
      // Sanity check that we have enough space for linking up arrays.
      CHECK_GE(compaction_page_handles.front()->length(), 2);
      to_be_aborted_page =
          Page::FromAddress(compaction_page_handles.front()->address());
      to_be_aborted_page->SetFlag(
          MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);

      for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) {
        compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]);
      }
      root_array->set(0, *compaction_page_handles.back());
      Handle<FixedArray> new_space_array =
          isolate->factory()->NewFixedArray(1, NOT_TENURED);
      CHECK(heap->InNewSpace(*new_space_array));
      compaction_page_handles.front()->set(1, *new_space_array);
307
      CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
308 309 310 311 312 313 314 315 316 317
    }

    {
      // Add another page that is filled with {num_objects} objects of size
      // {object_size}.
      HandleScope scope3(isolate);
      CHECK(heap->old_space()->Expand());
      const int num_objects = 2;
      int used_memory = object_size * num_objects;
      std::vector<Handle<FixedArray>> page_to_fill_handles =
318
          heap::CreatePadding(heap, used_memory, TENURED, object_size);
319 320 321 322
      Page* page_to_fill =
          Page::FromAddress(page_to_fill_handles.front()->address());

      heap->set_force_oom(true);
323
      CcTest::CollectAllGarbage();
324
      heap->mark_compact_collector()->EnsureSweepingCompleted();
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372

      // The following check makes sure that we compacted "some" objects, while
      // leaving others in place.
      bool in_place = true;
      Handle<FixedArray> current = root_array;
      while (current->get(0) != heap->undefined_value()) {
        current = Handle<FixedArray>(FixedArray::cast(current->get(0)));
        CHECK(!heap->InNewSpace(*current));
        CHECK(current->IsFixedArray());
        if (Page::FromAddress(current->address()) != to_be_aborted_page) {
          in_place = false;
        }
        bool on_aborted_page =
            Page::FromAddress(current->address()) == to_be_aborted_page;
        bool on_fill_page =
            Page::FromAddress(current->address()) == page_to_fill;
        CHECK((in_place && on_aborted_page) || (!in_place && on_fill_page));
      }
      // Check that we at least migrated one object, as otherwise the test would
      // not trigger.
      CHECK(!in_place);
      CheckInvariantsOfAbortedPage(to_be_aborted_page);

      // Allocate a new object in new space.
      Handle<FixedArray> holder =
          isolate->factory()->NewFixedArray(10, NOT_TENURED);
      // Create a broken address that looks like a tagged pointer to a new space
      // object.
      Address broken_address = holder->address() + 2 * kPointerSize + 1;
      // Convert it to a vector to create a string from it.
      Vector<const uint8_t> string_to_broken_addresss(
          reinterpret_cast<const uint8_t*>(&broken_address), 8);

      Handle<String> string;
      do {
        // We know that the interesting slot will be on the aborted page and
        // hence we allocate until we get our string on the aborted page.
        // We used slot 1 in the fixed size array which corresponds to the
        // the first word in the string. Since the first object definitely
        // migrated we can just allocate until we hit the aborted page.
        string = isolate->factory()
                     ->NewStringFromOneByte(string_to_broken_addresss, TENURED)
                     .ToHandleChecked();
      } while (Page::FromAddress(string->address()) != to_be_aborted_page);

      // If store buffer entries are not properly filtered/reset for aborted
      // pages we have now a broken address at an object slot in old space and
      // the following scavenge will crash.
373
      CcTest::CollectGarbage(NEW_SPACE);
374 375 376 377
    }
  }
}

378
}  // namespace heap
379 380
}  // namespace internal
}  // namespace v8