allocation.cc 14.6 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/utils/allocation.h"
6 7

#include <stdlib.h>  // For free, malloc.
8

9
#include "src/base/bits.h"
10
#include "src/base/bounded-page-allocator.h"
11
#include "src/base/lazy-instance.h"
12
#include "src/base/logging.h"
13
#include "src/base/page-allocator.h"
14
#include "src/base/platform/platform.h"
15
#include "src/base/sanitizer/lsan-page-allocator.h"
16
#include "src/base/vector.h"
17
#include "src/flags/flags.h"
18
#include "src/init/v8.h"
19
#include "src/utils/memcopy.h"
20

21
#if V8_LIBC_BIONIC
22
#include <malloc.h>
23 24

#include "src/base/platform/wrappers.h"
25 26
#endif

27 28
namespace v8 {
namespace internal {
29

30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
namespace {

void* AlignedAllocInternal(size_t size, size_t alignment) {
  void* ptr;
#if V8_OS_WIN
  ptr = _aligned_malloc(size, alignment);
#elif V8_LIBC_BIONIC
  // posix_memalign is not exposed in some Android versions, so we fall back to
  // memalign. See http://code.google.com/p/android/issues/detail?id=35391.
  ptr = memalign(alignment, size);
#else
  if (posix_memalign(&ptr, alignment, size)) ptr = nullptr;
#endif
  return ptr;
}

46 47 48
class PageAllocatorInitializer {
 public:
  PageAllocatorInitializer() {
49 50 51 52
    page_allocator_ = V8::GetCurrentPlatform()->GetPageAllocator();
    if (page_allocator_ == nullptr) {
      static base::LeakyObject<base::PageAllocator> default_page_allocator;
      page_allocator_ = default_page_allocator.get();
53
    }
54
#if defined(LEAK_SANITIZER)
55 56 57
    static base::LeakyObject<base::LsanPageAllocator> lsan_allocator(
        page_allocator_);
    page_allocator_ = lsan_allocator.get();
58
#endif
59
  }
60 61 62 63 64 65 66 67 68

  PageAllocator* page_allocator() const { return page_allocator_; }

  void SetPageAllocatorForTesting(PageAllocator* allocator) {
    page_allocator_ = allocator;
  }

 private:
  PageAllocator* page_allocator_;
69 70
};

71
DEFINE_LAZY_LEAKY_OBJECT_GETTER(PageAllocatorInitializer,
72
                                GetPageTableInitializer)
73

74 75 76 77
// We will attempt allocation this many times. After each failure, we call
// OnCriticalMemoryPressure to try to free some memory.
const int kAllocationTries = 2;

78 79
}  // namespace

80
v8::PageAllocator* GetPlatformPageAllocator() {
81 82
  DCHECK_NOT_NULL(GetPageTableInitializer()->page_allocator());
  return GetPageTableInitializer()->page_allocator();
83 84
}

85 86 87
v8::PageAllocator* SetPlatformPageAllocatorForTesting(
    v8::PageAllocator* new_page_allocator) {
  v8::PageAllocator* old_page_allocator = GetPlatformPageAllocator();
88
  GetPageTableInitializer()->SetPageAllocatorForTesting(new_page_allocator);
89 90 91
  return old_page_allocator;
}

92
void* Malloced::operator new(size_t size) {
93
  void* result = AllocWithRetry(size);
94
  if (result == nullptr) {
95
    V8::FatalProcessOutOfMemory(nullptr, "Malloced operator new");
96
  }
97 98 99
  return result;
}

100
void Malloced::operator delete(void* p) { base::Free(p); }
101 102

char* StrDup(const char* str) {
103
  size_t length = strlen(str);
104
  char* result = NewArray<char>(length + 1);
105
  MemCopy(result, str, length);
106 107 108 109
  result[length] = '\0';
  return result;
}

110 111
char* StrNDup(const char* str, size_t n) {
  size_t length = strlen(str);
112 113
  if (n < length) length = n;
  char* result = NewArray<char>(length + 1);
114
  MemCopy(result, str, length);
115 116 117 118
  result[length] = '\0';
  return result;
}

119 120 121
void* AllocWithRetry(size_t size) {
  void* result = nullptr;
  for (int i = 0; i < kAllocationTries; ++i) {
122
    result = base::Malloc(size);
123 124 125 126 127
    if (result != nullptr) break;
    if (!OnCriticalMemoryPressure(size)) break;
  }
  return result;
}
128 129

void* AlignedAlloc(size_t size, size_t alignment) {
130
  DCHECK_LE(alignof(void*), alignment);
131
  DCHECK(base::bits::IsPowerOfTwo(alignment));
132 133 134 135 136
  void* result = nullptr;
  for (int i = 0; i < kAllocationTries; ++i) {
    result = AlignedAllocInternal(size, alignment);
    if (result != nullptr) break;
    if (!OnCriticalMemoryPressure(size + alignment)) break;
137
  }
138
  if (result == nullptr) {
139
    V8::FatalProcessOutOfMemory(nullptr, "AlignedAlloc");
140 141
  }
  return result;
142 143
}

144
void AlignedFree(void* ptr) {
145 146 147 148
#if V8_OS_WIN
  _aligned_free(ptr);
#elif V8_LIBC_BIONIC
  // Using free is not correct in general, but for V8_LIBC_BIONIC it is.
149
  base::Free(ptr);
150
#else
151
  base::Free(ptr);
152 153 154
#endif
}

155 156 157
size_t AllocatePageSize() {
  return GetPlatformPageAllocator()->AllocatePageSize();
}
158

159
size_t CommitPageSize() { return GetPlatformPageAllocator()->CommitPageSize(); }
160

161
void SetRandomMmapSeed(int64_t seed) {
162
  GetPlatformPageAllocator()->SetRandomMmapSeed(seed);
163 164
}

165 166 167
void* GetRandomMmapAddr() {
  return GetPlatformPageAllocator()->GetRandomMmapAddr();
}
168

169 170
void* AllocatePages(v8::PageAllocator* page_allocator, void* hint, size_t size,
                    size_t alignment, PageAllocator::Permission access) {
171
  DCHECK_NOT_NULL(page_allocator);
172
  DCHECK_EQ(hint, AlignedAddress(hint, alignment));
173
  DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
174 175 176
  if (FLAG_randomize_all_allocations) {
    hint = page_allocator->GetRandomMmapAddr();
  }
177 178
  void* result = nullptr;
  for (int i = 0; i < kAllocationTries; ++i) {
179
    result = page_allocator->AllocatePages(hint, size, alignment, access);
180
    if (result != nullptr) break;
181
    size_t request_size = size + alignment - page_allocator->AllocatePageSize();
182 183
    if (!OnCriticalMemoryPressure(request_size)) break;
  }
184
  return result;
185 186
}

187 188 189
bool FreePages(v8::PageAllocator* page_allocator, void* address,
               const size_t size) {
  DCHECK_NOT_NULL(page_allocator);
190
  DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
191
  return page_allocator->FreePages(address, size);
192 193
}

194 195 196
bool ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
                  size_t new_size) {
  DCHECK_NOT_NULL(page_allocator);
197
  DCHECK_LT(new_size, size);
198
  DCHECK(IsAligned(new_size, page_allocator->CommitPageSize()));
199
  return page_allocator->ReleasePages(address, size, new_size);
200 201
}

202 203 204 205
bool SetPermissions(v8::PageAllocator* page_allocator, void* address,
                    size_t size, PageAllocator::Permission access) {
  DCHECK_NOT_NULL(page_allocator);
  return page_allocator->SetPermissions(address, size, access);
206 207
}

208 209 210 211 212 213 214 215 216
bool OnCriticalMemoryPressure(size_t length) {
  // TODO(bbudge) Rework retry logic once embedders implement the more
  // informative overload.
  if (!V8::GetCurrentPlatform()->OnCriticalMemoryPressure(length)) {
    V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
  }
  return true;
}

217 218
VirtualMemory::VirtualMemory() = default;

219
VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size,
220
                             void* hint, size_t alignment, JitPermission jit)
221
    : page_allocator_(page_allocator) {
222
  DCHECK_NOT_NULL(page_allocator);
223
  DCHECK(IsAligned(size, page_allocator_->CommitPageSize()));
224
  size_t page_size = page_allocator_->AllocatePageSize();
225
  alignment = RoundUp(alignment, page_size);
226 227 228 229 230
  PageAllocator::Permission permissions =
      jit == kMapAsJittable ? PageAllocator::kNoAccessWillJitLater
                            : PageAllocator::kNoAccess;
  Address address = reinterpret_cast<Address>(AllocatePages(
      page_allocator_, hint, RoundUp(size, page_size), alignment, permissions));
231
  if (address != kNullAddress) {
232
    DCHECK(IsAligned(address, alignment));
233
    region_ = base::AddressRegion(address, size);
234
  }
235 236 237 238
}

VirtualMemory::~VirtualMemory() {
  if (IsReserved()) {
239
    Free();
240 241 242 243
  }
}

void VirtualMemory::Reset() {
244
  page_allocator_ = nullptr;
245
  region_ = base::AddressRegion();
246 247
}

248
bool VirtualMemory::SetPermissions(Address address, size_t size,
249
                                   PageAllocator::Permission access) {
250
  CHECK(InVM(address, size));
251 252
  bool result =
      v8::internal::SetPermissions(page_allocator_, address, size, access);
253
  DCHECK(result);
254
  return result;
255 256
}

257
size_t VirtualMemory::Release(Address free_start) {
258
  DCHECK(IsReserved());
259
  DCHECK(IsAligned(free_start, page_allocator_->CommitPageSize()));
260 261
  // Notice: Order is important here. The VirtualMemory object might live
  // inside the allocated region.
262 263 264

  const size_t old_size = region_.size();
  const size_t free_size = old_size - (free_start - region_.begin());
265
  CHECK(InVM(free_start, free_size));
266 267 268
  region_.set_size(old_size - free_size);
  CHECK(ReleasePages(page_allocator_, reinterpret_cast<void*>(region_.begin()),
                     old_size, region_.size()));
269
  return free_size;
270 271
}

272
void VirtualMemory::Free() {
273 274 275
  DCHECK(IsReserved());
  // Notice: Order is important here. The VirtualMemory object might live
  // inside the allocated region.
276
  v8::PageAllocator* page_allocator = page_allocator_;
277
  base::AddressRegion region = region_;
278
  Reset();
279 280 281 282
  // FreePages expects size to be aligned to allocation granularity however
  // ReleasePages may leave size at only commit granularity. Align it here.
  CHECK(FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
                  RoundUp(region.size(), page_allocator->AllocatePageSize())));
283 284
}

285 286 287 288 289 290 291 292 293 294 295 296 297
void VirtualMemory::FreeReadOnly() {
  DCHECK(IsReserved());
  // The only difference to Free is that it doesn't call Reset which would write
  // to the VirtualMemory object.
  v8::PageAllocator* page_allocator = page_allocator_;
  base::AddressRegion region = region_;

  // FreePages expects size to be aligned to allocation granularity however
  // ReleasePages may leave size at only commit granularity. Align it here.
  CHECK(FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
                  RoundUp(region.size(), page_allocator->AllocatePageSize())));
}

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
VirtualMemoryCage::VirtualMemoryCage() = default;

VirtualMemoryCage::~VirtualMemoryCage() { Free(); }

VirtualMemoryCage::VirtualMemoryCage(VirtualMemoryCage&& other) V8_NOEXCEPT {
  *this = std::move(other);
}

VirtualMemoryCage& VirtualMemoryCage::operator=(VirtualMemoryCage&& other)
    V8_NOEXCEPT {
  page_allocator_ = std::move(other.page_allocator_);
  reservation_ = std::move(other.reservation_);
  return *this;
}

namespace {
inline Address VirtualMemoryCageStart(
    Address reservation_start,
    const VirtualMemoryCage::ReservationParams& params) {
  return RoundUp(reservation_start + params.base_bias_size,
                 params.base_alignment) -
         params.base_bias_size;
}
}  // namespace

bool VirtualMemoryCage::InitReservation(const ReservationParams& params) {
  DCHECK(!reservation_.IsReserved());

  const size_t allocate_page_size = params.page_allocator->AllocatePageSize();
  CHECK(IsAligned(params.reservation_size, allocate_page_size));
  CHECK(params.base_alignment == ReservationParams::kAnyBaseAlignment ||
        (IsAligned(params.base_alignment, allocate_page_size) &&
         IsAligned(params.base_bias_size, allocate_page_size)));
  CHECK_LE(params.base_bias_size, params.reservation_size);

  Address hint = RoundDown(params.requested_start_hint,
                           RoundUp(params.base_alignment, allocate_page_size)) -
                 RoundUp(params.base_bias_size, allocate_page_size);

  if (params.base_alignment == ReservationParams::kAnyBaseAlignment) {
    // When the base doesn't need to be aligned, the virtual memory reservation
    // fails only due to OOM.
    VirtualMemory reservation(params.page_allocator, params.reservation_size,
                              reinterpret_cast<void*>(hint));
    if (!reservation.IsReserved()) return false;

    reservation_ = std::move(reservation);
    base_ = reservation_.address() + params.base_bias_size;
    CHECK_EQ(reservation_.size(), params.reservation_size);
  } else {
    // Otherwise, we need to try harder by first overreserving
    // in hopes of finding a correctly aligned address within the larger
    // reservation.
    const int kMaxAttempts = 4;
    for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
      // Reserve a region of twice the size so that there is an aligned address
      // within it that's usable as the cage base.
      VirtualMemory padded_reservation(params.page_allocator,
                                       params.reservation_size * 2,
                                       reinterpret_cast<void*>(hint));
      if (!padded_reservation.IsReserved()) return false;

      // Find properly aligned sub-region inside the reservation.
      Address address =
          VirtualMemoryCageStart(padded_reservation.address(), params);
      CHECK(padded_reservation.InVM(address, params.reservation_size));

#if defined(V8_OS_FUCHSIA)
      // Fuchsia does not respect given hints so as a workaround we will use
      // overreserved address space region instead of trying to re-reserve
      // a subregion.
      bool overreserve = true;
#else
      // For the last attempt use the overreserved region to avoid an OOM crash.
      // This case can happen if there are many isolates being created in
      // parallel that race for reserving the regions.
      bool overreserve = (attempt == kMaxAttempts - 1);
#endif

      if (overreserve) {
        if (padded_reservation.InVM(address, params.reservation_size)) {
          reservation_ = std::move(padded_reservation);
          base_ = address + params.base_bias_size;
          break;
        }
      } else {
        // Now free the padded reservation and immediately try to reserve an
        // exact region at aligned address. We have to do this dancing because
        // the reservation address requirement is more complex than just a
        // certain alignment and not all operating systems support freeing parts
        // of reserved address space regions.
        padded_reservation.Free();

        VirtualMemory reservation(params.page_allocator,
                                  params.reservation_size,
                                  reinterpret_cast<void*>(address));
        if (!reservation.IsReserved()) return false;

        // The reservation could still be somewhere else but we can accept it
        // if it has the required alignment.
        Address address = VirtualMemoryCageStart(reservation.address(), params);
        if (reservation.address() == address) {
          reservation_ = std::move(reservation);
          base_ = address + params.base_bias_size;
          CHECK_EQ(reservation_.size(), params.reservation_size);
          break;
        }
      }
    }
  }
  CHECK_NE(base_, kNullAddress);
  CHECK(IsAligned(base_, params.base_alignment));

  const Address allocatable_base = RoundUp(base_, params.page_size);
  const size_t allocatable_size =
      RoundDown(params.reservation_size - (allocatable_base - base_) -
                    params.base_bias_size,
                params.page_size);
  page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
      params.page_allocator, allocatable_base, allocatable_size,
      params.page_size);
  return true;
}

void VirtualMemoryCage::Free() {
  if (IsReserved()) {
    base_ = kNullAddress;
    page_allocator_.reset();
    reservation_.Free();
  }
}

430 431
}  // namespace internal
}  // namespace v8