allocation.cc 8.8 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/allocation.h"
6 7

#include <stdlib.h>  // For free, malloc.
8
#include "src/base/bits.h"
9
#include "src/base/lazy-instance.h"
10
#include "src/base/logging.h"
11
#include "src/base/lsan-page-allocator.h"
12
#include "src/base/page-allocator.h"
13
#include "src/base/platform/platform.h"
14
#include "src/memcopy.h"
15
#include "src/v8.h"
16
#include "src/vector.h"
17

18 19 20 21
#if V8_LIBC_BIONIC
#include <malloc.h>  // NOLINT
#endif

22 23
namespace v8 {
namespace internal {
24

25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
namespace {

void* AlignedAllocInternal(size_t size, size_t alignment) {
  void* ptr;
#if V8_OS_WIN
  ptr = _aligned_malloc(size, alignment);
#elif V8_LIBC_BIONIC
  // posix_memalign is not exposed in some Android versions, so we fall back to
  // memalign. See http://code.google.com/p/android/issues/detail?id=35391.
  ptr = memalign(alignment, size);
#else
  if (posix_memalign(&ptr, alignment, size)) ptr = nullptr;
#endif
  return ptr;
}

41 42 43
class PageAllocatorInitializer {
 public:
  PageAllocatorInitializer() {
44 45 46 47
    page_allocator_ = V8::GetCurrentPlatform()->GetPageAllocator();
    if (page_allocator_ == nullptr) {
      static base::LeakyObject<base::PageAllocator> default_page_allocator;
      page_allocator_ = default_page_allocator.get();
48
    }
49
#if defined(LEAK_SANITIZER)
50 51 52
    static base::LeakyObject<base::LsanPageAllocator> lsan_allocator(
        page_allocator_);
    page_allocator_ = lsan_allocator.get();
53
#endif
54
  }
55 56 57 58 59 60 61 62 63

  PageAllocator* page_allocator() const { return page_allocator_; }

  void SetPageAllocatorForTesting(PageAllocator* allocator) {
    page_allocator_ = allocator;
  }

 private:
  PageAllocator* page_allocator_;
64 65
};

66
DEFINE_LAZY_LEAKY_OBJECT_GETTER(PageAllocatorInitializer,
67
                                GetPageTableInitializer)
68

69 70 71 72
// We will attempt allocation this many times. After each failure, we call
// OnCriticalMemoryPressure to try to free some memory.
const int kAllocationTries = 2;

73 74
}  // namespace

75
v8::PageAllocator* GetPlatformPageAllocator() {
76 77
  DCHECK_NOT_NULL(GetPageTableInitializer()->page_allocator());
  return GetPageTableInitializer()->page_allocator();
78 79
}

80 81 82
v8::PageAllocator* SetPlatformPageAllocatorForTesting(
    v8::PageAllocator* new_page_allocator) {
  v8::PageAllocator* old_page_allocator = GetPlatformPageAllocator();
83
  GetPageTableInitializer()->SetPageAllocatorForTesting(new_page_allocator);
84 85 86
  return old_page_allocator;
}

87
void* Malloced::New(size_t size) {
88
  void* result = AllocWithRetry(size);
89
  if (result == nullptr) {
90
    V8::FatalProcessOutOfMemory(nullptr, "Malloced operator new");
91
  }
92 93 94 95 96 97 98 99
  return result;
}

void Malloced::Delete(void* p) {
  free(p);
}

char* StrDup(const char* str) {
100
  size_t length = strlen(str);
101
  char* result = NewArray<char>(length + 1);
102
  MemCopy(result, str, length);
103 104 105 106
  result[length] = '\0';
  return result;
}

107 108
char* StrNDup(const char* str, size_t n) {
  size_t length = strlen(str);
109 110
  if (n < length) length = n;
  char* result = NewArray<char>(length + 1);
111
  MemCopy(result, str, length);
112 113 114 115
  result[length] = '\0';
  return result;
}

116 117 118 119 120 121 122 123 124
void* AllocWithRetry(size_t size) {
  void* result = nullptr;
  for (int i = 0; i < kAllocationTries; ++i) {
    result = malloc(size);
    if (result != nullptr) break;
    if (!OnCriticalMemoryPressure(size)) break;
  }
  return result;
}
125 126

void* AlignedAlloc(size_t size, size_t alignment) {
127
  DCHECK_LE(alignof(void*), alignment);
128
  DCHECK(base::bits::IsPowerOfTwo(alignment));
129 130 131 132 133
  void* result = nullptr;
  for (int i = 0; i < kAllocationTries; ++i) {
    result = AlignedAllocInternal(size, alignment);
    if (result != nullptr) break;
    if (!OnCriticalMemoryPressure(size + alignment)) break;
134
  }
135
  if (result == nullptr) {
136
    V8::FatalProcessOutOfMemory(nullptr, "AlignedAlloc");
137 138
  }
  return result;
139 140 141 142 143 144 145 146 147 148 149 150 151
}

void AlignedFree(void *ptr) {
#if V8_OS_WIN
  _aligned_free(ptr);
#elif V8_LIBC_BIONIC
  // Using free is not correct in general, but for V8_LIBC_BIONIC it is.
  free(ptr);
#else
  free(ptr);
#endif
}

152 153 154
size_t AllocatePageSize() {
  return GetPlatformPageAllocator()->AllocatePageSize();
}
155

156
size_t CommitPageSize() { return GetPlatformPageAllocator()->CommitPageSize(); }
157

158
void SetRandomMmapSeed(int64_t seed) {
159
  GetPlatformPageAllocator()->SetRandomMmapSeed(seed);
160 161
}

162 163 164
void* GetRandomMmapAddr() {
  return GetPlatformPageAllocator()->GetRandomMmapAddr();
}
165

166 167
void* AllocatePages(v8::PageAllocator* page_allocator, void* address,
                    size_t size, size_t alignment,
168
                    PageAllocator::Permission access) {
169
  DCHECK_NOT_NULL(page_allocator);
170
  DCHECK_EQ(address, AlignedAddress(address, alignment));
171
  DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
172 173
  void* result = nullptr;
  for (int i = 0; i < kAllocationTries; ++i) {
174
    result = page_allocator->AllocatePages(address, size, alignment, access);
175
    if (result != nullptr) break;
176
    size_t request_size = size + alignment - page_allocator->AllocatePageSize();
177 178
    if (!OnCriticalMemoryPressure(request_size)) break;
  }
179
  return result;
180 181
}

182 183 184
bool FreePages(v8::PageAllocator* page_allocator, void* address,
               const size_t size) {
  DCHECK_NOT_NULL(page_allocator);
185
  DCHECK(IsAligned(size, page_allocator->AllocatePageSize()));
186
  return page_allocator->FreePages(address, size);
187 188
}

189 190 191
bool ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
                  size_t new_size) {
  DCHECK_NOT_NULL(page_allocator);
192
  DCHECK_LT(new_size, size);
193
  DCHECK(IsAligned(new_size, page_allocator->CommitPageSize()));
194
  return page_allocator->ReleasePages(address, size, new_size);
195 196
}

197 198 199 200
bool SetPermissions(v8::PageAllocator* page_allocator, void* address,
                    size_t size, PageAllocator::Permission access) {
  DCHECK_NOT_NULL(page_allocator);
  return page_allocator->SetPermissions(address, size, access);
201 202
}

203 204 205 206 207 208
byte* AllocatePage(v8::PageAllocator* page_allocator, void* address,
                   size_t* allocated) {
  DCHECK_NOT_NULL(page_allocator);
  size_t page_size = page_allocator->AllocatePageSize();
  void* result = AllocatePages(page_allocator, address, page_size, page_size,
                               PageAllocator::kReadWrite);
209 210
  if (result != nullptr) *allocated = page_size;
  return static_cast<byte*>(result);
211 212
}

213 214 215 216 217 218 219 220 221
bool OnCriticalMemoryPressure(size_t length) {
  // TODO(bbudge) Rework retry logic once embedders implement the more
  // informative overload.
  if (!V8::GetCurrentPlatform()->OnCriticalMemoryPressure(length)) {
    V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
  }
  return true;
}

222 223
VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size,
                             void* hint, size_t alignment)
224
    : page_allocator_(page_allocator) {
225
  DCHECK_NOT_NULL(page_allocator);
226
  DCHECK(IsAligned(size, page_allocator_->CommitPageSize()));
227
  size_t page_size = page_allocator_->AllocatePageSize();
228
  alignment = RoundUp(alignment, page_size);
229 230 231
  Address address = reinterpret_cast<Address>(
      AllocatePages(page_allocator_, hint, RoundUp(size, page_size), alignment,
                    PageAllocator::kNoAccess));
232
  if (address != kNullAddress) {
233
    DCHECK(IsAligned(address, alignment));
234
    region_ = base::AddressRegion(address, size);
235
  }
236 237 238 239
}

VirtualMemory::~VirtualMemory() {
  if (IsReserved()) {
240
    Free();
241 242 243 244
  }
}

void VirtualMemory::Reset() {
245
  page_allocator_ = nullptr;
246
  region_ = base::AddressRegion();
247 248
}

249
bool VirtualMemory::SetPermissions(Address address, size_t size,
250
                                   PageAllocator::Permission access) {
251
  CHECK(InVM(address, size));
252 253
  bool result =
      v8::internal::SetPermissions(page_allocator_, address, size, access);
254
  DCHECK(result);
255
  return result;
256 257
}

258
size_t VirtualMemory::Release(Address free_start) {
259
  DCHECK(IsReserved());
260
  DCHECK(IsAligned(free_start, page_allocator_->CommitPageSize()));
261 262
  // Notice: Order is important here. The VirtualMemory object might live
  // inside the allocated region.
263 264 265

  const size_t old_size = region_.size();
  const size_t free_size = old_size - (free_start - region_.begin());
266
  CHECK(InVM(free_start, free_size));
267 268 269
  region_.set_size(old_size - free_size);
  CHECK(ReleasePages(page_allocator_, reinterpret_cast<void*>(region_.begin()),
                     old_size, region_.size()));
270
  return free_size;
271 272
}

273
void VirtualMemory::Free() {
274 275 276
  DCHECK(IsReserved());
  // Notice: Order is important here. The VirtualMemory object might live
  // inside the allocated region.
277
  v8::PageAllocator* page_allocator = page_allocator_;
278
  base::AddressRegion region = region_;
279
  Reset();
280 281 282 283
  // FreePages expects size to be aligned to allocation granularity however
  // ReleasePages may leave size at only commit granularity. Align it here.
  CHECK(FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
                  RoundUp(region.size(), page_allocator->AllocatePageSize())));
284 285
}

286 287
}  // namespace internal
}  // namespace v8