bounded-page-allocator.cc 8.24 KB
Newer Older
1 2 3 4 5 6 7 8 9
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/base/bounded-page-allocator.h"

namespace v8 {
namespace base {

10 11
BoundedPageAllocator::BoundedPageAllocator(
    v8::PageAllocator* page_allocator, Address start, size_t size,
12 13
    size_t allocate_page_size, PageInitializationMode page_initialization_mode,
    PageFreeingMode page_freeing_mode)
14 15 16
    : allocate_page_size_(allocate_page_size),
      commit_page_size_(page_allocator->CommitPageSize()),
      page_allocator_(page_allocator),
17
      region_allocator_(start, size, allocate_page_size_),
18 19
      page_initialization_mode_(page_initialization_mode),
      page_freeing_mode_(page_freeing_mode) {
20 21 22
  DCHECK_NOT_NULL(page_allocator);
  DCHECK(IsAligned(allocate_page_size, page_allocator->AllocatePageSize()));
  DCHECK(IsAligned(allocate_page_size_, commit_page_size_));
23 24
}

25 26 27 28 29 30
BoundedPageAllocator::Address BoundedPageAllocator::begin() const {
  return region_allocator_.begin();
}

size_t BoundedPageAllocator::size() const { return region_allocator_.size(); }

31 32 33
void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
                                          size_t alignment,
                                          PageAllocator::Permission access) {
34
  MutexGuard guard(&mutex_);
35
  DCHECK(IsAligned(alignment, region_allocator_.page_size()));
36 37
  DCHECK(IsAligned(alignment, allocate_page_size_));

38 39 40 41 42 43 44 45
  Address address = RegionAllocator::kAllocationFailure;

  Address hint_address = reinterpret_cast<Address>(hint);
  if (hint_address && IsAligned(hint_address, alignment) &&
      region_allocator_.contains(hint_address, size)) {
    if (region_allocator_.AllocateRegionAt(hint_address, size)) {
      address = hint_address;
    }
46
  }
47 48 49 50 51 52 53 54 55 56

  if (address == RegionAllocator::kAllocationFailure) {
    if (alignment <= allocate_page_size_) {
      // TODO(ishell): Consider using randomized version here.
      address = region_allocator_.AllocateRegion(size);
    } else {
      address = region_allocator_.AllocateAlignedRegion(size, alignment);
    }
  }

57 58 59
  if (address == RegionAllocator::kAllocationFailure) {
    return nullptr;
  }
60 61

  void* ptr = reinterpret_cast<void*>(address);
62 63 64 65 66 67 68 69 70
  // It's assumed that free regions are in kNoAccess/kNoAccessWillJitLater
  // state.
  if (access != PageAllocator::kNoAccess &&
      access != PageAllocator::kNoAccessWillJitLater) {
    if (!page_allocator_->SetPermissions(ptr, size, access)) {
      // This most likely means that we ran out of memory.
      CHECK_EQ(region_allocator_.FreeRegion(address), size);
      return nullptr;
    }
71 72 73
  }

  return ptr;
74 75
}

76 77
bool BoundedPageAllocator::AllocatePagesAt(Address address, size_t size,
                                           PageAllocator::Permission access) {
78 79
  DCHECK(IsAligned(address, allocate_page_size_));
  DCHECK(IsAligned(size, allocate_page_size_));
80

81 82
  {
    MutexGuard guard(&mutex_);
83
    DCHECK(region_allocator_.contains(address, size));
84 85 86 87

    if (!region_allocator_.AllocateRegionAt(address, size)) {
      return false;
    }
88
  }
89

90 91 92 93 94 95 96
  void* ptr = reinterpret_cast<void*>(address);
  if (!page_allocator_->SetPermissions(ptr, size, access)) {
    // This most likely means that we ran out of memory.
    CHECK_EQ(region_allocator_.FreeRegion(address), size);
    return false;
  }

97 98 99
  return true;
}

100 101 102
bool BoundedPageAllocator::ReserveForSharedMemoryMapping(void* ptr,
                                                         size_t size) {
  Address address = reinterpret_cast<Address>(ptr);
103 104
  DCHECK(IsAligned(address, allocate_page_size_));
  DCHECK(IsAligned(size, commit_page_size_));
105 106 107

  {
    MutexGuard guard(&mutex_);
108
    DCHECK(region_allocator_.contains(address, size));
109 110 111 112 113 114 115 116

    // Region allocator requires page size rather than commit size so just over-
    // allocate there since any extra space couldn't be used anyway.
    size_t region_size = RoundUp(size, allocate_page_size_);
    if (!region_allocator_.AllocateRegionAt(
            address, region_size, RegionAllocator::RegionState::kExcluded)) {
      return false;
    }
117 118 119 120 121 122 123
  }

  CHECK(page_allocator_->SetPermissions(ptr, size,
                                        PageAllocator::Permission::kNoAccess));
  return true;
}

124
bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
125
  MutexGuard guard(&mutex_);
126 127

  Address address = reinterpret_cast<Address>(raw_address);
128
  CHECK_EQ(size, region_allocator_.FreeRegion(address));
129 130
  if (page_initialization_mode_ ==
      PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) {
131
    DCHECK_NE(page_freeing_mode_, PageFreeingMode::kDiscard);
132 133 134 135 136 137
    // When we are required to return zero-initialized pages, we decommit the
    // pages here, which will cause any wired pages to be removed by the OS.
    CHECK(page_allocator_->DecommitPages(raw_address, size));
  } else {
    DCHECK_EQ(page_initialization_mode_,
              PageInitializationMode::kAllocatedPagesCanBeUninitialized);
138 139 140 141 142 143 144
    if (page_freeing_mode_ == PageFreeingMode::kMakeInaccessible) {
      CHECK(page_allocator_->SetPermissions(raw_address, size,
                                            PageAllocator::kNoAccess));
    } else {
      CHECK_EQ(page_freeing_mode_, PageFreeingMode::kDiscard);
      CHECK(page_allocator_->DiscardSystemPages(raw_address, size));
    }
145
  }
146 147 148 149 150 151
  return true;
}

bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
                                        size_t new_size) {
  Address address = reinterpret_cast<Address>(raw_address);
152
  DCHECK(IsAligned(address, allocate_page_size_));
153 154 155 156

  DCHECK_LT(new_size, size);
  DCHECK(IsAligned(size - new_size, commit_page_size_));

157 158 159
  // This must be held until the page permissions are updated.
  MutexGuard guard(&mutex_);

160 161 162 163
  // Check if we freed any allocatable pages by this release.
  size_t allocated_size = RoundUp(size, allocate_page_size_);
  size_t new_allocated_size = RoundUp(new_size, allocate_page_size_);

164 165 166 167
#ifdef DEBUG
  {
    // There must be an allocated region at given |address| of a size not
    // smaller than |size|.
168
    DCHECK_EQ(allocated_size, region_allocator_.CheckRegion(address));
169 170
  }
#endif
171 172 173 174 175

  if (new_allocated_size < allocated_size) {
    region_allocator_.TrimRegion(address, new_allocated_size);
  }

176
  // Keep the region in "used" state just uncommit some pages.
177
  void* free_address = reinterpret_cast<void*>(address + new_size);
178
  size_t free_size = size - new_size;
179 180
  if (page_initialization_mode_ ==
      PageInitializationMode::kAllocatedPagesMustBeZeroInitialized) {
181
    DCHECK_NE(page_freeing_mode_, PageFreeingMode::kDiscard);
182
    // See comment in FreePages().
183
    CHECK(page_allocator_->DecommitPages(free_address, free_size));
184 185 186
  } else {
    DCHECK_EQ(page_initialization_mode_,
              PageInitializationMode::kAllocatedPagesCanBeUninitialized);
187 188 189 190 191 192 193
    if (page_freeing_mode_ == PageFreeingMode::kMakeInaccessible) {
      CHECK(page_allocator_->SetPermissions(free_address, free_size,
                                            PageAllocator::kNoAccess));
    } else {
      CHECK_EQ(page_freeing_mode_, PageFreeingMode::kDiscard);
      CHECK(page_allocator_->DiscardSystemPages(free_address, free_size));
    }
194
  }
195
  return true;
196 197 198 199 200 201 202 203 204 205
}

bool BoundedPageAllocator::SetPermissions(void* address, size_t size,
                                          PageAllocator::Permission access) {
  DCHECK(IsAligned(reinterpret_cast<Address>(address), commit_page_size_));
  DCHECK(IsAligned(size, commit_page_size_));
  DCHECK(region_allocator_.contains(reinterpret_cast<Address>(address), size));
  return page_allocator_->SetPermissions(address, size, access);
}

206 207 208 209 210 211 212 213
bool BoundedPageAllocator::RecommitPages(void* address, size_t size,
                                         PageAllocator::Permission access) {
  DCHECK(IsAligned(reinterpret_cast<Address>(address), commit_page_size_));
  DCHECK(IsAligned(size, commit_page_size_));
  DCHECK(region_allocator_.contains(reinterpret_cast<Address>(address), size));
  return page_allocator_->RecommitPages(address, size, access);
}

214 215 216 217
bool BoundedPageAllocator::DiscardSystemPages(void* address, size_t size) {
  return page_allocator_->DiscardSystemPages(address, size);
}

218 219 220 221
bool BoundedPageAllocator::DecommitPages(void* address, size_t size) {
  return page_allocator_->DecommitPages(address, size);
}

222 223
}  // namespace base
}  // namespace v8