isolate-allocator.cc 7.91 KB
Newer Older
1 2 3 4
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

5
#include "src/init/isolate-allocator.h"
6

7
#include "src/base/bounded-page-allocator.h"
8
#include "src/execution/isolate.h"
9
#include "src/heap/code-range.h"
Samuel Groß's avatar
Samuel Groß committed
10
#include "src/sandbox/sandbox.h"
11
#include "src/utils/memcopy.h"
12
#include "src/utils/utils.h"
13 14 15 16

namespace v8 {
namespace internal {

17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
#ifdef V8_COMPRESS_POINTERS
namespace {

// "IsolateRootBiasPage" is an optional region before the 4Gb aligned
// reservation. This "IsolateRootBiasPage" page is supposed to be used for
// storing part of the Isolate object when Isolate::isolate_root_bias() is
// not zero.
inline size_t GetIsolateRootBiasPageSize(
    v8::PageAllocator* platform_page_allocator) {
  return RoundUp(Isolate::isolate_root_bias(),
                 platform_page_allocator->AllocatePageSize());
}

}  // namespace

struct PtrComprCageReservationParams
    : public VirtualMemoryCage::ReservationParams {
  PtrComprCageReservationParams() {
    page_allocator = GetPlatformPageAllocator();

    // This is only used when there is a per-Isolate cage, in which case the
    // Isolate is allocated within the cage, and the Isolate root is also the
    // cage base.
    const size_t kIsolateRootBiasPageSize =
        COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL
            ? GetIsolateRootBiasPageSize(page_allocator)
            : 0;
    reservation_size = kPtrComprCageReservationSize + kIsolateRootBiasPageSize;
    base_alignment = kPtrComprCageBaseAlignment;
    base_bias_size = kIsolateRootBiasPageSize;

    // Simplify BoundedPageAllocator's life by configuring it to use same page
    // size as the Heap will use (MemoryChunk::kPageSize).
    page_size =
        RoundUp(size_t{1} << kPageSizeBits, page_allocator->AllocatePageSize());
    requested_start_hint =
        reinterpret_cast<Address>(page_allocator->GetRandomMmapAddr());
  }
};
#endif  // V8_COMPRESS_POINTERS

#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
namespace {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(VirtualMemoryCage, GetProcessWidePtrComprCage)
}  // anonymous namespace

// static
void IsolateAllocator::FreeProcessWidePtrComprCageForTesting() {
65 66 67 68
  if (std::shared_ptr<CodeRange> code_range =
          CodeRange::GetProcessWideCodeRange()) {
    code_range->Free();
  }
69 70 71 72 73 74 75 76
  GetProcessWidePtrComprCage()->Free();
}
#endif  // V8_COMPRESS_POINTERS_IN_SHARED_CAGE

// static
void IsolateAllocator::InitializeOncePerProcess() {
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
  PtrComprCageReservationParams params;
77
  base::AddressRegion existing_reservation;
Samuel Groß's avatar
Samuel Groß committed
78 79 80 81 82 83
#ifdef V8_SANDBOX
  // For now, we allow the sandbox to be disabled even when compiling with
  // v8_enable_sandbox. This fallback will be disallowed in the future, at the
  // latest once sandboxed pointers are enabled.
  if (GetProcessWideSandbox()->is_disabled()) {
    CHECK(kAllowBackingStoresOutsideSandbox);
84
  } else {
85 86
    auto sandbox = GetProcessWideSandbox();
    CHECK(sandbox->is_initialized());
Samuel Groß's avatar
Samuel Groß committed
87
    // The pointer compression cage must be placed at the start of the sandbox.
88

89 90 91 92 93 94
    // TODO(chromium:12180) this currently assumes that no other pages were
    // allocated through the cage's page allocator in the meantime. In the
    // future, the cage initialization will happen just before this function
    // runs, and so this will be guaranteed. Currently however, it is possible
    // that the embedder accidentally uses the cage's page allocator prior to
    // initializing V8, in which case this CHECK will likely fail.
95 96 97 98 99 100
    Address base = sandbox->address_space()->AllocatePages(
        sandbox->base(), params.reservation_size, params.base_alignment,
        PagePermissions::kNoAccess);
    CHECK_EQ(sandbox->base(), base);
    existing_reservation = base::AddressRegion(base, params.reservation_size);
    params.page_allocator = sandbox->page_allocator();
101 102 103 104
  }
#endif
  if (!GetProcessWidePtrComprCage()->InitReservation(params,
                                                     existing_reservation)) {
105 106 107 108
    V8::FatalProcessOutOfMemory(
        nullptr,
        "Failed to reserve virtual memory for process-wide V8 "
        "pointer compression cage");
109 110 111 112
  }
#endif
}

113
IsolateAllocator::IsolateAllocator() {
114
#if defined(V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE)
115 116 117 118 119 120 121 122
  PtrComprCageReservationParams params;
  if (!isolate_ptr_compr_cage_.InitReservation(params)) {
    V8::FatalProcessOutOfMemory(
        nullptr,
        "Failed to reserve memory for Isolate V8 pointer compression cage");
  }
  page_allocator_ = isolate_ptr_compr_cage_.page_allocator();
  CommitPagesForIsolate();
123 124
#elif defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE)
  // Allocate Isolate in C++ heap when sharing a cage.
125 126
  CHECK(GetProcessWidePtrComprCage()->IsReserved());
  page_allocator_ = GetProcessWidePtrComprCage()->page_allocator();
127
  isolate_memory_ = ::operator new(sizeof(Isolate));
128
#else
129 130 131
  // Allocate Isolate in C++ heap.
  page_allocator_ = GetPlatformPageAllocator();
  isolate_memory_ = ::operator new(sizeof(Isolate));
132
#endif  // V8_COMPRESS_POINTERS
133 134

  CHECK_NOT_NULL(page_allocator_);
135 136 137
}

IsolateAllocator::~IsolateAllocator() {
138
#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
139 140 141
  if (isolate_ptr_compr_cage_.reservation()->IsReserved()) {
    // The actual memory will be freed when the |isolate_ptr_compr_cage_| will
    // die.
142 143
    return;
  }
144
#endif
145 146 147 148 149

  // The memory was allocated in C++ heap.
  ::operator delete(isolate_memory_);
}

150
VirtualMemoryCage* IsolateAllocator::GetPtrComprCage() {
151
#if defined V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
152
  return &isolate_ptr_compr_cage_;
153
#elif defined V8_COMPRESS_POINTERS_IN_SHARED_CAGE
154
  return GetProcessWidePtrComprCage();
155
#else
156
  return nullptr;
157 158
#endif
}
159

160 161
const VirtualMemoryCage* IsolateAllocator::GetPtrComprCage() const {
  return const_cast<IsolateAllocator*>(this)->GetPtrComprCage();
162 163
}

164 165 166
#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
void IsolateAllocator::CommitPagesForIsolate() {
  v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
167

168 169
  CHECK(isolate_ptr_compr_cage_.IsReserved());
  Address isolate_root = isolate_ptr_compr_cage_.base();
170
  CHECK(IsAligned(isolate_root, kPtrComprCageBaseAlignment));
171 172 173 174 175
  CHECK_GE(isolate_ptr_compr_cage_.reservation()->size(),
           kPtrComprCageReservationSize +
               GetIsolateRootBiasPageSize(platform_page_allocator));
  CHECK(isolate_ptr_compr_cage_.reservation()->InVM(
      isolate_root, kPtrComprCageReservationSize));
176

177
  size_t page_size = page_allocator_->AllocatePageSize();
178
  Address isolate_address = isolate_root - Isolate::isolate_root_bias();
179 180 181 182
  Address isolate_end = isolate_address + sizeof(Isolate);

  // Inform the bounded page allocator about reserved pages.
  {
183
    Address reserved_region_address = isolate_root;
184 185 186
    size_t reserved_region_size =
        RoundUp(isolate_end, page_size) - reserved_region_address;

187
    CHECK(isolate_ptr_compr_cage_.page_allocator()->AllocatePagesAt(
188 189 190 191 192 193
        reserved_region_address, reserved_region_size,
        PageAllocator::Permission::kNoAccess));
  }

  // Commit pages where the Isolate will be stored.
  {
194
    size_t commit_page_size = platform_page_allocator->CommitPageSize();
195 196 197 198 199
    Address committed_region_address =
        RoundDown(isolate_address, commit_page_size);
    size_t committed_region_size =
        RoundUp(isolate_end, commit_page_size) - committed_region_address;

200 201 202 203 204 205
    // We are using |isolate_ptr_compr_cage_.reservation()| directly here
    // because |page_allocator_| has bigger commit page size than we actually
    // need.
    CHECK(isolate_ptr_compr_cage_.reservation()->SetPermissions(
        committed_region_address, committed_region_size,
        PageAllocator::kReadWrite));
206 207

    if (Heap::ShouldZapGarbage()) {
208 209
      MemsetPointer(reinterpret_cast<Address*>(committed_region_address),
                    kZapValue, committed_region_size / kSystemPointerSize);
210 211 212 213
    }
  }
  isolate_memory_ = reinterpret_cast<void*>(isolate_address);
}
214
#endif  // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
215 216 217

}  // namespace internal
}  // namespace v8