code-range.h 6.41 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_HEAP_CODE_RANGE_H_
#define V8_HEAP_CODE_RANGE_H_

#include <unordered_map>
#include <vector>

#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/utils/allocation.h"

namespace v8 {
namespace internal {

// The process-wide singleton that keeps track of code range regions with the
// intention to reuse free code range regions as a workaround for CFG memory
// leaks (see crbug.com/870054).
class CodeRangeAddressHint {
 public:
23
  // When near code range is enabled, an address within
24
  // kMaxPCRelativeCodeRangeInMB to the embedded blob is returned if
25 26 27 28 29 30
  // there is enough space. Otherwise a random address is returned.
  // When near code range is disabled, returns the most recently freed code
  // range start address for the given size. If there is no such entry, then a
  // random address is returned.
  V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size,
                                           size_t alignment);
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70

  V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
                                              size_t code_range_size);

 private:
  base::Mutex mutex_;
  // A map from code range size to an array of recently freed code range
  // addresses. There should be O(1) different code range sizes.
  // The length of each array is limited by the peak number of code ranges,
  // which should be also O(1).
  std::unordered_map<size_t, std::vector<Address>> recently_freed_;
};

// A code range is a virtual memory cage that may contain executable code. It
// has the following layout.
//
// +------------+-----+----------------  ~~~  -+
// |     RW     | ... |    ...                 |
// +------------+-----+----------------- ~~~  -+
// ^            ^     ^
// start        base  allocatable base
//
// <------------>     <------------------------>
//   reserved            allocatable region
// <------------------------------------------->
//               code region
//
// The start of the reservation may include reserved page with read-write access
// as required by some platforms (Win64). The cage's page allocator does not
// control the optional reserved page in the beginning of the code region.
//
// The following conditions hold:
// 1) |reservation()->region()| >= |optional RW pages| +
//    |reservation()->page_allocator()|
// 2) |reservation()| is AllocatePageSize()-aligned
// 3) |reservation()->page_allocator()| (i.e. allocatable base) is
//    MemoryChunk::kAlignment-aligned
// 4) |base()| is CommitPageSize()-aligned
class CodeRange final : public VirtualMemoryCage {
 public:
71 72 73 74 75
  V8_EXPORT_PRIVATE ~CodeRange() override;

  // Returns the size of the initial area of a code-range, which is marked
  // writable and reserved to contain unwind information.
  static size_t GetWritableReservedAreaSize();
76

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
  uint8_t* embedded_blob_code_copy() const {
    // remap_embedded_builtins_mutex_ is designed to protect write contention to
    // embedded_blob_code_copy_. It is safe to be read without taking the
    // mutex. It is read to check if short builtins ought to be enabled because
    // a shared CodeRange has already remapped builtins and to find where the
    // instruction stream for a builtin is.
    //
    // For the first, this racing with an Isolate calling RemapEmbeddedBuiltins
    // may result in disabling short builtins, which is not a correctness issue.
    //
    // For the second, this racing with an Isolate calling RemapEmbeddedBuiltins
    // may result in an already running Isolate that did not have short builtins
    // enabled (due to max old generation size) to switch over to using remapped
    // builtins, which is also not a correctness issue as the remapped builtins
    // are byte-equivalent.
    //
    // Both these scenarios should be rare. The initial Isolate is usually
    // created by itself, i.e. without contention. Additionally, the first
    // Isolate usually remaps builtins on machines with enough memory, not
    // subsequent Isolates in the same process.
97
    return embedded_blob_code_copy_.load(std::memory_order_acquire);
98
  }
99

100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
#ifdef V8_OS_WIN64
  // 64-bit Windows needs to track how many Isolates are using the CodeRange for
  // registering and unregistering of unwind info. Note that even though
  // CodeRanges are used with std::shared_ptr, std::shared_ptr::use_count should
  // not be used for synchronization as it's usually implemented with a relaxed
  // read.
  uint32_t AtomicIncrementUnwindInfoUseCount() {
    return unwindinfo_use_count_.fetch_add(1, std::memory_order_acq_rel);
  }

  uint32_t AtomicDecrementUnwindInfoUseCount() {
    return unwindinfo_use_count_.fetch_sub(1, std::memory_order_acq_rel);
  }
#endif  // V8_OS_WIN64

115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
  bool InitReservation(v8::PageAllocator* page_allocator, size_t requested);

  void Free();

  // Remap and copy the embedded builtins into this CodeRange. This method is
  // idempotent and only performs the copy once. This property is so that this
  // method can be used uniformly regardless of having a per-Isolate or a shared
  // pointer cage. Returns the address of the copy.
  //
  // The builtins code region will be freed with the code range at tear down.
  //
  // When ENABLE_SLOW_DCHECKS is on, the contents of the embedded_blob_code are
  // compared against the already copied version.
  uint8_t* RemapEmbeddedBuiltins(Isolate* isolate,
                                 const uint8_t* embedded_blob_code,
                                 size_t embedded_blob_code_size);

132
  static std::shared_ptr<CodeRange> EnsureProcessWideCodeRange(
133 134 135 136
      v8::PageAllocator* page_allocator, size_t requested_size);

  // If InitializeProcessWideCodeRangeOnce has been called, returns the
  // initialized CodeRange. Otherwise returns an empty std::shared_ptr.
137
  V8_EXPORT_PRIVATE static std::shared_ptr<CodeRange> GetProcessWideCodeRange();
138 139 140 141

 private:
  // Used when short builtin calls are enabled, where embedded builtins are
  // copied into the CodeRange so calls can be nearer.
142 143 144 145 146
  std::atomic<uint8_t*> embedded_blob_code_copy_{nullptr};

  // When sharing a CodeRange among Isolates, calls to RemapEmbeddedBuiltins may
  // race during Isolate::Init.
  base::Mutex remap_embedded_builtins_mutex_;
147 148 149 150

#ifdef V8_OS_WIN64
  std::atomic<uint32_t> unwindinfo_use_count_{0};
#endif
151 152 153 154 155 156
};

}  // namespace internal
}  // namespace v8

#endif  // V8_HEAP_CODE_RANGE_H_