allocation.cc 8.56 KB
Newer Older
1
// Copyright 2012 the V8 project authors. All rights reserved.
2 3
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
4

5
#include "src/allocation.h"
6 7

#include <stdlib.h>  // For free, malloc.
8
#include "src/base/bits.h"
9 10
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
11
#include "src/utils.h"
12
#include "src/v8.h"
13

14 15 16 17
#if V8_LIBC_BIONIC
#include <malloc.h>  // NOLINT
#endif

18 19 20 21
#if defined(LEAK_SANITIZER)
#include <sanitizer/lsan_interface.h>
#endif

22 23
namespace v8 {
namespace internal {
24

25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
namespace {

void* AlignedAllocInternal(size_t size, size_t alignment) {
  void* ptr;
#if V8_OS_WIN
  ptr = _aligned_malloc(size, alignment);
#elif V8_LIBC_BIONIC
  // posix_memalign is not exposed in some Android versions, so we fall back to
  // memalign. See http://code.google.com/p/android/issues/detail?id=35391.
  ptr = memalign(alignment, size);
#else
  if (posix_memalign(&ptr, alignment, size)) ptr = nullptr;
#endif
  return ptr;
}

}  // namespace

43 44
void* Malloced::New(size_t size) {
  void* result = malloc(size);
45 46 47 48 49 50
  if (result == nullptr) {
    V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
    result = malloc(size);
    if (result == nullptr) {
      V8::FatalProcessOutOfMemory("Malloced operator new");
    }
51
  }
52 53 54 55 56 57 58 59 60 61
  return result;
}


void Malloced::Delete(void* p) {
  free(p);
}


char* StrDup(const char* str) {
62
  int length = StrLength(str);
63
  char* result = NewArray<char>(length + 1);
64
  MemCopy(result, str, length);
65 66 67 68 69
  result[length] = '\0';
  return result;
}


70 71
char* StrNDup(const char* str, int n) {
  int length = StrLength(str);
72 73
  if (n < length) length = n;
  char* result = NewArray<char>(length + 1);
74
  MemCopy(result, str, length);
75 76 77 78
  result[length] = '\0';
  return result;
}

79 80

void* AlignedAlloc(size_t size, size_t alignment) {
81
  DCHECK_LE(V8_ALIGNOF(void*), alignment);
82
  DCHECK(base::bits::IsPowerOfTwo(alignment));
83 84 85 86 87 88 89 90
  void* ptr = AlignedAllocInternal(size, alignment);
  if (ptr == nullptr) {
    V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
    ptr = AlignedAllocInternal(size, alignment);
    if (ptr == nullptr) {
      V8::FatalProcessOutOfMemory("AlignedAlloc");
    }
  }
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
  return ptr;
}


void AlignedFree(void *ptr) {
#if V8_OS_WIN
  _aligned_free(ptr);
#elif V8_LIBC_BIONIC
  // Using free is not correct in general, but for V8_LIBC_BIONIC it is.
  free(ptr);
#else
  free(ptr);
#endif
}

106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
#define STATIC_ASSERT_ENUM(a, b)                            \
  static_assert(static_cast<int>(a) == static_cast<int>(b), \
                "mismatching enum: " #a)

STATIC_ASSERT_ENUM(MemoryPermission::kNoAccess,
                   base::OS::MemoryPermission::kNoAccess);
STATIC_ASSERT_ENUM(MemoryPermission::kReadWrite,
                   base::OS::MemoryPermission::kReadWrite);
STATIC_ASSERT_ENUM(MemoryPermission::kReadWriteExecute,
                   base::OS::MemoryPermission::kReadWriteExecute);
STATIC_ASSERT_ENUM(MemoryPermission::kReadExecute,
                   base::OS::MemoryPermission::kReadExecute);

#undef STATIC_ASSERT_ENUM

// Default Memory Manager.
// TODO(bbudge) Move this to libplatform.
class DefaultMemoryManager {
 public:
  static size_t AllocatePageSize() { return base::OS::AllocatePageSize(); }
  static size_t CommitPageSize() { return base::OS::CommitPageSize(); }

  static void* GetRandomMmapAddr() { return base::OS::GetRandomMmapAddr(); }

  static void* AllocatePages(void* address, size_t size, size_t alignment,
                             MemoryPermission access) {
    void* result =
        base::OS::Allocate(address, size, alignment,
                           static_cast<base::OS::MemoryPermission>(access));
#if defined(LEAK_SANITIZER)
    if (result != nullptr) {
      __lsan_register_root_region(result, size);
    }
#endif
    return result;
  }

  static bool FreePages(void* address, const size_t size) {
    bool result = base::OS::Free(address, size);
#if defined(LEAK_SANITIZER)
    if (result) {
      __lsan_unregister_root_region(address, size);
    }
#endif
    return result;
  }

  static bool ReleasePages(void* address, size_t size, size_t new_size) {
    DCHECK_LT(new_size, size);
    bool result = base::OS::Release(reinterpret_cast<byte*>(address) + new_size,
                                    size - new_size);
#if defined(LEAK_SANITIZER)
    if (result) {
      __lsan_unregister_root_region(address, size);
      __lsan_register_root_region(address, new_size);
    }
#endif
    return result;
  }

  static bool SetPermissions(void* address, size_t size,
                             MemoryPermission access) {
    return base::OS::SetPermissions(
        address, size, static_cast<base::OS::MemoryPermission>(access));
  }
};

size_t AllocatePageSize() { return DefaultMemoryManager::AllocatePageSize(); }

size_t CommitPageSize() { return DefaultMemoryManager::CommitPageSize(); }

// Generate a random address to be used for hinting allocation calls.
void* GetRandomMmapAddr() { return DefaultMemoryManager::GetRandomMmapAddr(); }

void* AllocatePages(void* address, size_t size, size_t alignment,
                    MemoryPermission access) {
  return DefaultMemoryManager::AllocatePages(address, size, alignment, access);
}

bool FreePages(void* address, const size_t size) {
  return DefaultMemoryManager::FreePages(address, size);
}

bool ReleasePages(void* address, size_t size, size_t new_size) {
  return DefaultMemoryManager::ReleasePages(address, size, new_size);
}

bool SetPermissions(void* address, size_t size, MemoryPermission access) {
  return DefaultMemoryManager::SetPermissions(address, size, access);
}

byte* AllocatePage(void* address, size_t* allocated) {
  size_t page_size = AllocatePageSize();
  void* result = AllocatePages(address, page_size, page_size,
                               MemoryPermission::kReadWrite);
201 202
  if (result != nullptr) *allocated = page_size;
  return static_cast<byte*>(result);
203 204
}

205 206 207
VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}

VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment)
208
    : address_(nullptr), size_(0) {
209
  size_t page_size = AllocatePageSize();
210
  size_t alloc_size = RoundUp(size, page_size);
211 212
  address_ =
      AllocatePages(hint, alloc_size, alignment, MemoryPermission::kNoAccess);
213 214 215
  if (address_ != nullptr) {
    size_ = alloc_size;
  }
216 217 218 219
}

VirtualMemory::~VirtualMemory() {
  if (IsReserved()) {
220
    Free();
221 222 223 224 225 226 227 228
  }
}

void VirtualMemory::Reset() {
  address_ = nullptr;
  size_ = 0;
}

229
bool VirtualMemory::SetPermissions(void* address, size_t size,
230
                                   MemoryPermission access) {
231
  CHECK(InVM(address, size));
232
  bool result = v8::internal::SetPermissions(address, size, access);
233 234
  DCHECK(result);
  USE(result);
235
  return result;
236 237
}

238
size_t VirtualMemory::Release(void* free_start) {
239
  DCHECK(IsReserved());
240
  DCHECK(IsAddressAligned(static_cast<Address>(free_start), CommitPageSize()));
241 242
  // Notice: Order is important here. The VirtualMemory object might live
  // inside the allocated region.
243 244 245
  const size_t free_size = size_ - (reinterpret_cast<size_t>(free_start) -
                                    reinterpret_cast<size_t>(address_));
  CHECK(InVM(free_start, free_size));
246 247 248
  DCHECK_LT(address_, free_start);
  DCHECK_LT(free_start, reinterpret_cast<void*>(
                            reinterpret_cast<size_t>(address_) + size_));
249
  CHECK(ReleasePages(address_, size_, size_ - free_size));
250 251
  size_ -= free_size;
  return free_size;
252 253
}

254
void VirtualMemory::Free() {
255 256 257 258 259 260 261
  DCHECK(IsReserved());
  // Notice: Order is important here. The VirtualMemory object might live
  // inside the allocated region.
  void* address = address_;
  size_t size = size_;
  CHECK(InVM(address, size));
  Reset();
262
  CHECK(FreePages(address, size));
263 264 265 266 267 268 269 270 271 272 273
}

void VirtualMemory::TakeControl(VirtualMemory* from) {
  DCHECK(!IsReserved());
  address_ = from->address_;
  size_ = from->size_;
  from->Reset();
}

bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
  VirtualMemory first_try(size, hint);
274 275 276 277 278 279
  if (first_try.IsReserved()) {
    result->TakeControl(&first_try);
    return true;
  }

  V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
280
  VirtualMemory second_try(size, hint);
281 282 283 284 285
  result->TakeControl(&second_try);
  return result->IsReserved();
}

bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
286
                               VirtualMemory* result) {
287
  VirtualMemory first_try(size, hint, alignment);
288 289 290 291 292 293
  if (first_try.IsReserved()) {
    result->TakeControl(&first_try);
    return true;
  }

  V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
294
  VirtualMemory second_try(size, hint, alignment);
295 296 297 298
  result->TakeControl(&second_try);
  return result->IsReserved();
}

299 300
}  // namespace internal
}  // namespace v8