allocation.cc 8.41 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/allocation.h"
6 7

#include <stdlib.h>  // For free, malloc.
8
#include "src/base/bits.h"
9
#include "src/base/lazy-instance.h"
10
#include "src/base/logging.h"
11
#include "src/base/page-allocator.h"
12
#include "src/base/platform/platform.h"
13
#include "src/utils.h"
14
#include "src/v8.h"
15

16 17 18 19
#if V8_LIBC_BIONIC
#include <malloc.h>  // NOLINT
#endif

20 21 22 23
#if defined(LEAK_SANITIZER)
#include <sanitizer/lsan_interface.h>
#endif

24 25
namespace v8 {
namespace internal {
26

27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
namespace {

void* AlignedAllocInternal(size_t size, size_t alignment) {
  void* ptr;
#if V8_OS_WIN
  ptr = _aligned_malloc(size, alignment);
#elif V8_LIBC_BIONIC
  // posix_memalign is not exposed in some Android versions, so we fall back to
  // memalign. See http://code.google.com/p/android/issues/detail?id=35391.
  ptr = memalign(alignment, size);
#else
  if (posix_memalign(&ptr, alignment, size)) ptr = nullptr;
#endif
  return ptr;
}

43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
// TODO(bbudge) Simplify this once all embedders implement a page allocator.
struct InitializePageAllocator {
  static void Construct(void* page_allocator_ptr_arg) {
    auto page_allocator_ptr =
        reinterpret_cast<v8::PageAllocator**>(page_allocator_ptr_arg);
    v8::PageAllocator* page_allocator =
        V8::GetCurrentPlatform()->GetPageAllocator();
    if (page_allocator == nullptr) {
      static v8::base::PageAllocator default_allocator;
      page_allocator = &default_allocator;
    }
    *page_allocator_ptr = page_allocator;
  }
};

static base::LazyInstance<v8::PageAllocator*, InitializePageAllocator>::type
    page_allocator = LAZY_INSTANCE_INITIALIZER;

v8::PageAllocator* GetPageAllocator() { return page_allocator.Get(); }

63 64 65 66
// We will attempt allocation this many times. After each failure, we call
// OnCriticalMemoryPressure to try to free some memory.
const int kAllocationTries = 2;

67 68
}  // namespace

69
void* Malloced::New(size_t size) {
70
  void* result = AllocWithRetry(size);
71
  if (result == nullptr) {
72
    V8::FatalProcessOutOfMemory(nullptr, "Malloced operator new");
73
  }
74 75 76 77 78 79 80 81
  return result;
}

void Malloced::Delete(void* p) {
  free(p);
}

char* StrDup(const char* str) {
82
  int length = StrLength(str);
83
  char* result = NewArray<char>(length + 1);
84
  MemCopy(result, str, length);
85 86 87 88
  result[length] = '\0';
  return result;
}

89 90
char* StrNDup(const char* str, int n) {
  int length = StrLength(str);
91 92
  if (n < length) length = n;
  char* result = NewArray<char>(length + 1);
93
  MemCopy(result, str, length);
94 95 96 97
  result[length] = '\0';
  return result;
}

98 99 100 101 102 103 104 105 106
void* AllocWithRetry(size_t size) {
  void* result = nullptr;
  for (int i = 0; i < kAllocationTries; ++i) {
    result = malloc(size);
    if (result != nullptr) break;
    if (!OnCriticalMemoryPressure(size)) break;
  }
  return result;
}
107 108

void* AlignedAlloc(size_t size, size_t alignment) {
109
  DCHECK_LE(V8_ALIGNOF(void*), alignment);
110
  DCHECK(base::bits::IsPowerOfTwo(alignment));
111 112 113 114 115
  void* result = nullptr;
  for (int i = 0; i < kAllocationTries; ++i) {
    result = AlignedAllocInternal(size, alignment);
    if (result != nullptr) break;
    if (!OnCriticalMemoryPressure(size + alignment)) break;
116
  }
117
  if (result == nullptr) {
118
    V8::FatalProcessOutOfMemory(nullptr, "AlignedAlloc");
119 120
  }
  return result;
121 122 123 124 125 126 127 128 129 130 131 132 133
}

void AlignedFree(void *ptr) {
#if V8_OS_WIN
  _aligned_free(ptr);
#elif V8_LIBC_BIONIC
  // Using free is not correct in general, but for V8_LIBC_BIONIC it is.
  free(ptr);
#else
  free(ptr);
#endif
}

134
size_t AllocatePageSize() { return GetPageAllocator()->AllocatePageSize(); }
135

136
size_t CommitPageSize() { return GetPageAllocator()->CommitPageSize(); }
137

138
void SetRandomMmapSeed(int64_t seed) {
139
  GetPageAllocator()->SetRandomMmapSeed(seed);
140 141
}

142
void* GetRandomMmapAddr() { return GetPageAllocator()->GetRandomMmapAddr(); }
143 144

void* AllocatePages(void* address, size_t size, size_t alignment,
145
                    PageAllocator::Permission access) {
146 147
  DCHECK_EQ(address, AlignedAddress(address, alignment));
  DCHECK_EQ(0UL, size & (GetPageAllocator()->AllocatePageSize() - 1));
148 149 150 151 152 153 154 155
  void* result = nullptr;
  for (int i = 0; i < kAllocationTries; ++i) {
    result =
        GetPageAllocator()->AllocatePages(address, size, alignment, access);
    if (result != nullptr) break;
    size_t request_size = size + alignment - AllocatePageSize();
    if (!OnCriticalMemoryPressure(request_size)) break;
  }
156 157 158 159 160 161
#if defined(LEAK_SANITIZER)
  if (result != nullptr) {
    __lsan_register_root_region(result, size);
  }
#endif
  return result;
162 163 164
}

bool FreePages(void* address, const size_t size) {
165
  DCHECK_EQ(0UL, size & (GetPageAllocator()->AllocatePageSize() - 1));
166 167 168 169 170 171 172
  bool result = GetPageAllocator()->FreePages(address, size);
#if defined(LEAK_SANITIZER)
  if (result) {
    __lsan_unregister_root_region(address, size);
  }
#endif
  return result;
173 174 175
}

bool ReleasePages(void* address, size_t size, size_t new_size) {
176 177 178 179 180 181 182 183 184
  DCHECK_LT(new_size, size);
  bool result = GetPageAllocator()->ReleasePages(address, size, new_size);
#if defined(LEAK_SANITIZER)
  if (result) {
    __lsan_unregister_root_region(address, size);
    __lsan_register_root_region(address, new_size);
  }
#endif
  return result;
185 186
}

187 188 189
bool SetPermissions(void* address, size_t size,
                    PageAllocator::Permission access) {
  return GetPageAllocator()->SetPermissions(address, size, access);
190 191 192 193
}

byte* AllocatePage(void* address, size_t* allocated) {
  size_t page_size = AllocatePageSize();
194 195
  void* result =
      AllocatePages(address, page_size, page_size, PageAllocator::kReadWrite);
196 197
  if (result != nullptr) *allocated = page_size;
  return static_cast<byte*>(result);
198 199
}

200 201 202 203 204 205 206 207 208
bool OnCriticalMemoryPressure(size_t length) {
  // TODO(bbudge) Rework retry logic once embedders implement the more
  // informative overload.
  if (!V8::GetCurrentPlatform()->OnCriticalMemoryPressure(length)) {
    V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
  }
  return true;
}

209
VirtualMemory::VirtualMemory() : address_(kNullAddress), size_(0) {}
210 211

VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment)
212
    : address_(kNullAddress), size_(0) {
213
  size_t page_size = AllocatePageSize();
214
  size_t alloc_size = RoundUp(size, page_size);
215 216 217
  address_ = reinterpret_cast<Address>(
      AllocatePages(hint, alloc_size, alignment, PageAllocator::kNoAccess));
  if (address_ != kNullAddress) {
218 219
    size_ = alloc_size;
  }
220 221 222 223
}

VirtualMemory::~VirtualMemory() {
  if (IsReserved()) {
224
    Free();
225 226 227 228
  }
}

void VirtualMemory::Reset() {
229
  address_ = kNullAddress;
230 231 232
  size_ = 0;
}

233
bool VirtualMemory::SetPermissions(Address address, size_t size,
234
                                   PageAllocator::Permission access) {
235
  CHECK(InVM(address, size));
236
  bool result = v8::internal::SetPermissions(address, size, access);
237
  DCHECK(result);
238
  return result;
239 240
}

241
size_t VirtualMemory::Release(Address free_start) {
242
  DCHECK(IsReserved());
243
  DCHECK(IsAddressAligned(free_start, CommitPageSize()));
244 245
  // Notice: Order is important here. The VirtualMemory object might live
  // inside the allocated region.
246
  const size_t free_size = size_ - (free_start - address_);
247
  CHECK(InVM(free_start, free_size));
248
  DCHECK_LT(address_, free_start);
249 250 251
  DCHECK_LT(free_start, address_ + size_);
  CHECK(ReleasePages(reinterpret_cast<void*>(address_), size_,
                     size_ - free_size));
252 253
  size_ -= free_size;
  return free_size;
254 255
}

256
void VirtualMemory::Free() {
257 258 259
  DCHECK(IsReserved());
  // Notice: Order is important here. The VirtualMemory object might live
  // inside the allocated region.
260
  Address address = address_;
261 262 263
  size_t size = size_;
  CHECK(InVM(address, size));
  Reset();
264 265
  // FreePages expects size to be aligned to allocation granularity. Trimming
  // may leave size at only commit granularity. Align it here.
266 267
  CHECK(FreePages(reinterpret_cast<void*>(address),
                  RoundUp(size, AllocatePageSize())));
268 269 270 271 272 273 274 275 276 277
}

void VirtualMemory::TakeControl(VirtualMemory* from) {
  DCHECK(!IsReserved());
  address_ = from->address_;
  size_ = from->size_;
  from->Reset();
}

bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
278 279 280
  VirtualMemory vm(size, hint);
  if (vm.IsReserved()) {
    result->TakeControl(&vm);
281 282
    return true;
  }
283
  return false;
284 285 286
}

bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
287
                               VirtualMemory* result) {
288 289 290
  VirtualMemory vm(size, hint, alignment);
  if (vm.IsReserved()) {
    result->TakeControl(&vm);
291 292
    return true;
  }
293
  return false;
294 295
}

296 297
}  // namespace internal
}  // namespace v8