concurrent-allocator.cc 6.16 KB
Newer Older
1 2 3 4 5 6
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/heap/concurrent-allocator.h"

7
#include "src/common/globals.h"
8 9
#include "src/execution/isolate.h"
#include "src/handles/persistent-handles.h"
10
#include "src/heap/concurrent-allocator-inl.h"
11
#include "src/heap/heap.h"
12
#include "src/heap/local-heap-inl.h"
13
#include "src/heap/local-heap.h"
14
#include "src/heap/marking.h"
15
#include "src/heap/memory-chunk.h"
16
#include "src/heap/parked-scope.h"
17 18 19 20

namespace v8 {
namespace internal {

21 22
void StressConcurrentAllocatorTask::RunInternal() {
  Heap* heap = isolate_->heap();
23
  LocalHeap local_heap(heap, ThreadKind::kBackground);
24
  UnparkedScope unparked_scope(&local_heap);
25 26

  const int kNumIterations = 2000;
27 28
  const int kSmallObjectSize = 10 * kTaggedSize;
  const int kMediumObjectSize = 8 * KB;
29 30 31
  const int kLargeObjectSize =
      static_cast<int>(MemoryChunk::kPageSize -
                       MemoryChunkLayout::ObjectStartOffsetInDataPage());
32 33

  for (int i = 0; i < kNumIterations; i++) {
34 35 36
    // Isolate tear down started, stop allocation...
    if (heap->gc_state() == Heap::TEAR_DOWN) return;

37
    AllocationResult result = local_heap.AllocateRaw(
38
        kSmallObjectSize, AllocationType::kOld, AllocationOrigin::kRuntime,
39
        AllocationAlignment::kTaggedAligned);
40
    if (!result.IsFailure()) {
41 42
      heap->CreateFillerObjectAtBackground(result.ToAddress(),
                                           kSmallObjectSize);
43 44 45
    } else {
      local_heap.TryPerformCollection();
    }
46

47 48
    result = local_heap.AllocateRaw(kMediumObjectSize, AllocationType::kOld,
                                    AllocationOrigin::kRuntime,
49
                                    AllocationAlignment::kTaggedAligned);
50
    if (!result.IsFailure()) {
51 52
      heap->CreateFillerObjectAtBackground(result.ToAddress(),
                                           kMediumObjectSize);
53 54
    } else {
      local_heap.TryPerformCollection();
55 56 57 58
    }

    result = local_heap.AllocateRaw(kLargeObjectSize, AllocationType::kOld,
                                    AllocationOrigin::kRuntime,
59
                                    AllocationAlignment::kTaggedAligned);
60
    if (!result.IsFailure()) {
61 62
      heap->CreateFillerObjectAtBackground(result.ToAddress(),
                                           kLargeObjectSize);
63 64
    } else {
      local_heap.TryPerformCollection();
65
    }
66
    local_heap.Safepoint();
67 68 69 70 71 72 73 74 75 76 77 78 79
  }

  Schedule(isolate_);
}

// static
void StressConcurrentAllocatorTask::Schedule(Isolate* isolate) {
  auto task = std::make_unique<StressConcurrentAllocatorTask>(isolate);
  const double kDelayInSeconds = 0.1;
  V8::GetCurrentPlatform()->CallDelayedOnWorkerThread(std::move(task),
                                                      kDelayInSeconds);
}

80
void ConcurrentAllocator::FreeLinearAllocationArea() {
81 82 83 84 85 86
  // The code page of the linear allocation area needs to be unprotected
  // because we are going to write a filler into that memory area below.
  base::Optional<CodePageMemoryModificationScope> optional_scope;
  if (lab_.IsValid() && space_->identity() == CODE_SPACE) {
    optional_scope.emplace(MemoryChunk::FromAddress(lab_.top()));
  }
87 88 89 90
  lab_.CloseAndMakeIterable();
}

void ConcurrentAllocator::MakeLinearAllocationAreaIterable() {
91 92 93 94 95 96
  // The code page of the linear allocation area needs to be unprotected
  // because we are going to write a filler into that memory area below.
  base::Optional<CodePageMemoryModificationScope> optional_scope;
  if (lab_.IsValid() && space_->identity() == CODE_SPACE) {
    optional_scope.emplace(MemoryChunk::FromAddress(lab_.top()));
  }
97 98
  lab_.MakeIterable();
}
99

100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
void ConcurrentAllocator::MarkLinearAllocationAreaBlack() {
  Address top = lab_.top();
  Address limit = lab_.limit();

  if (top != kNullAddress && top != limit) {
    Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
  }
}

void ConcurrentAllocator::UnmarkLinearAllocationArea() {
  Address top = lab_.top();
  Address limit = lab_.limit();

  if (top != kNullAddress && top != limit) {
    Page::FromAllocationAreaAddress(top)->DestroyBlackAreaBackground(top,
                                                                     limit);
  }
}

119 120 121
AllocationResult ConcurrentAllocator::AllocateInLabSlow(
    int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
  if (!EnsureLab(origin)) {
122
    return AllocationResult::Failure();
123 124 125
  }

  AllocationResult allocation = lab_.AllocateRawAligned(object_size, alignment);
126
  DCHECK(!allocation.IsFailure());
127 128 129 130 131

  return allocation;
}

bool ConcurrentAllocator::EnsureLab(AllocationOrigin origin) {
132
  auto result = space_->RawRefillLabBackground(
133
      local_heap_, kLabSize, kMaxLabSize, kTaggedAligned, origin);
134 135
  if (!result) return false;

136
  if (IsBlackAllocationEnabled()) {
137 138 139 140 141 142 143 144
    Address top = result->first;
    Address limit = top + result->second;
    Page::FromAllocationAreaAddress(top)->CreateBlackAreaBackground(top, limit);
  }

  HeapObject object = HeapObject::FromAddress(result->first);
  LocalAllocationBuffer saved_lab = std::move(lab_);
  lab_ = LocalAllocationBuffer::FromResult(
145
      space_->heap(), AllocationResult::FromObject(object), result->second);
146 147 148 149 150 151 152
  DCHECK(lab_.IsValid());
  if (!lab_.TryMerge(&saved_lab)) {
    saved_lab.CloseAndMakeIterable();
  }
  return true;
}

153 154
AllocationResult ConcurrentAllocator::AllocateOutsideLab(
    int object_size, AllocationAlignment alignment, AllocationOrigin origin) {
155 156
  auto result = space_->RawRefillLabBackground(local_heap_, object_size,
                                               object_size, alignment, origin);
157
  if (!result) return AllocationResult::Failure();
158

159
  HeapObject object = HeapObject::FromAddress(result->first);
160

161 162 163
  if (IsBlackAllocationEnabled()) {
    owning_heap()->incremental_marking()->MarkBlackBackground(object,
                                                              object_size);
164
  }
165

166
  return AllocationResult::FromObject(object);
167 168
}

169 170 171 172 173 174
bool ConcurrentAllocator::IsBlackAllocationEnabled() const {
  return owning_heap()->incremental_marking()->black_allocation();
}

Heap* ConcurrentAllocator::owning_heap() const { return space_->heap(); }

175 176
}  // namespace internal
}  // namespace v8