// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/heap/code-range.h"

#include "src/base/bits.h"
#include "src/base/lazy-instance.h"
#include "src/codegen/constants-arch.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/heap/heap-inl.h"
#include "src/utils/allocation.h"

namespace v8 {
namespace internal {

namespace {

// Mutex for creating process_wide_code_range_.
base::LazyMutex process_wide_code_range_creation_mutex_ =
    LAZY_MUTEX_INITIALIZER;

// Weak pointer holding the process-wide CodeRange, if one has been created. All
// Heaps hold a std::shared_ptr to this, so this is destroyed when no Heaps
// remain.
base::LazyInstance<std::weak_ptr<CodeRange>>::type process_wide_code_range_ =
    LAZY_INSTANCE_INITIALIZER;

DEFINE_LAZY_LEAKY_OBJECT_GETTER(CodeRangeAddressHint, GetCodeRangeAddressHint)

void FunctionInStaticBinaryForAddressHint() {}
}  // anonymous namespace

Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size,
                                             size_t alignment) {
  base::MutexGuard guard(&mutex_);

  // Try to allocate code range in the preferred region where we can use
  // short instructions for calling/jumping to embedded builtins.
  base::AddressRegion preferred_region = Isolate::GetShortBuiltinsCallRegion();

  Address result = 0;
  auto it = recently_freed_.find(code_range_size);
  // No recently freed region has been found, try to provide a hint for placing
  // a code region.
  if (it == recently_freed_.end() || it->second.empty()) {
    if (V8_ENABLE_NEAR_CODE_RANGE_BOOL && !preferred_region.is_empty()) {
      auto memory_ranges = base::OS::GetFreeMemoryRangesWithin(
          preferred_region.begin(), preferred_region.end(), code_range_size,
          alignment);
      if (!memory_ranges.empty()) {
        result = memory_ranges.front().start;
        CHECK(IsAligned(result, alignment));
        return result;
      }
      // The empty memory_ranges means that GetFreeMemoryRangesWithin() API
      // is not supported, so use the lowest address from the preferred region
      // as a hint because it'll be at least as good as the fallback hint but
      // with a higher chances to point to the free address space range.
      return RoundUp(preferred_region.begin(), alignment);
    }
    return RoundUp(FUNCTION_ADDR(&FunctionInStaticBinaryForAddressHint),
                   alignment);
  }

  // Try to reuse near code range first.
  if (V8_ENABLE_NEAR_CODE_RANGE_BOOL && !preferred_region.is_empty()) {
    auto freed_regions_for_size = it->second;
    for (auto it_freed = freed_regions_for_size.rbegin();
         it_freed != freed_regions_for_size.rend(); ++it_freed) {
      Address code_range_start = *it_freed;
      if (preferred_region.contains(code_range_start, code_range_size)) {
        CHECK(IsAligned(code_range_start, alignment));
        freed_regions_for_size.erase((it_freed + 1).base());
        return code_range_start;
      }
    }
  }

  result = it->second.back();
  CHECK(IsAligned(result, alignment));
  it->second.pop_back();
  return result;
}

void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
                                                size_t code_range_size) {
  base::MutexGuard guard(&mutex_);
  recently_freed_[code_range_size].push_back(code_range_start);
}

CodeRange::~CodeRange() { Free(); }

// static
size_t CodeRange::GetWritableReservedAreaSize() {
  return kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
}

bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
                                size_t requested) {
  DCHECK_NE(requested, 0);
  if (V8_EXTERNAL_CODE_SPACE_BOOL) {
    page_allocator = GetPlatformPageAllocator();
  }

  if (requested <= kMinimumCodeRangeSize) {
    requested = kMinimumCodeRangeSize;
  }

  // When V8_EXTERNAL_CODE_SPACE_BOOL is enabled the allocatable region must
  // not cross the 4Gb boundary and thus the default compression scheme of
  // truncating the Code pointers to 32-bits still works. It's achieved by
  // specifying base_alignment parameter.
  // Note that the alignment is calculated before adjusting the requested size
  // for GetWritableReservedAreaSize(). The reasons are:
  //  - this extra page is used by breakpad on Windows and it's allowed to cross
  //    the 4Gb boundary,
  //  - rounding up the adjusted size would result in requresting unnecessarily
  //    big aligment.
  const size_t base_alignment =
      V8_EXTERNAL_CODE_SPACE_BOOL
          ? base::bits::RoundUpToPowerOfTwo(requested)
          : VirtualMemoryCage::ReservationParams::kAnyBaseAlignment;

  const size_t reserved_area = GetWritableReservedAreaSize();
  if (requested < (kMaximalCodeRangeSize - reserved_area)) {
    requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
    // Fulfilling both reserved pages requirement and huge code area
    // alignments is not supported (requires re-implementation).
    DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
  }
  DCHECK_IMPLIES(kPlatformRequiresCodeRange,
                 requested <= kMaximalCodeRangeSize);

  VirtualMemoryCage::ReservationParams params;
  params.page_allocator = page_allocator;
  params.reservation_size = requested;
  const size_t allocate_page_size = page_allocator->AllocatePageSize();
  params.base_alignment = base_alignment;
  params.base_bias_size = RoundUp(reserved_area, allocate_page_size);
  params.page_size = MemoryChunk::kPageSize;
  params.requested_start_hint =
      GetCodeRangeAddressHint()->GetAddressHint(requested, allocate_page_size);
  params.jit =
      FLAG_jitless ? JitPermission::kNoJit : JitPermission::kMapAsJittable;

  if (!VirtualMemoryCage::InitReservation(params)) return false;

  if (V8_EXTERNAL_CODE_SPACE_BOOL) {
    // Ensure that the code range does not cross the 4Gb boundary and thus
    // default compression scheme of truncating the Code pointers to 32-bits
    // still works.
    Address base = page_allocator_->begin();
    Address last = base + page_allocator_->size() - 1;
    CHECK_EQ(GetPtrComprCageBaseAddress(base),
             GetPtrComprCageBaseAddress(last));
  }

  // On some platforms, specifically Win64, we need to reserve some pages at
  // the beginning of an executable space. See
  //   https://cs.chromium.org/chromium/src/components/crash/content/
  //     app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
  // for details.
  if (reserved_area > 0) {
    if (!reservation()->SetPermissions(reservation()->address(), reserved_area,
                                       PageAllocator::kReadWrite)) {
      return false;
    }
  }
  if (V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT &&
      params.jit == JitPermission::kMapAsJittable) {
    void* base = reinterpret_cast<void*>(page_allocator_->begin());
    size_t size = page_allocator_->size();
    CHECK(params.page_allocator->SetPermissions(
        base, size, PageAllocator::kReadWriteExecute));
    CHECK(params.page_allocator->DiscardSystemPages(base, size));
  }
  return true;
}

void CodeRange::Free() {
  if (IsReserved()) {
    GetCodeRangeAddressHint()->NotifyFreedCodeRange(
        reservation()->region().begin(), reservation()->region().size());
    VirtualMemoryCage::Free();
  }
}

uint8_t* CodeRange::RemapEmbeddedBuiltins(Isolate* isolate,
                                          const uint8_t* embedded_blob_code,
                                          size_t embedded_blob_code_size) {
  base::MutexGuard guard(&remap_embedded_builtins_mutex_);

  // Remap embedded builtins into the end of the address range controlled by
  // the BoundedPageAllocator.
  const base::AddressRegion code_region(page_allocator()->begin(),
                                        page_allocator()->size());
  CHECK_NE(code_region.begin(), kNullAddress);
  CHECK(!code_region.is_empty());

  uint8_t* embedded_blob_code_copy =
      embedded_blob_code_copy_.load(std::memory_order_acquire);
  if (embedded_blob_code_copy) {
    DCHECK(
        code_region.contains(reinterpret_cast<Address>(embedded_blob_code_copy),
                             embedded_blob_code_size));
    SLOW_DCHECK(memcmp(embedded_blob_code, embedded_blob_code_copy,
                       embedded_blob_code_size) == 0);
    return embedded_blob_code_copy;
  }

  const size_t kAllocatePageSize = page_allocator()->AllocatePageSize();
  const size_t kCommitPageSize = page_allocator()->CommitPageSize();
  size_t allocate_code_size =
      RoundUp(embedded_blob_code_size, kAllocatePageSize);

  // Allocate the re-embedded code blob in such a way that it will be reachable
  // by PC-relative addressing from biggest possible region.
  const size_t max_pc_relative_code_range = kMaxPCRelativeCodeRangeInMB * MB;
  size_t hint_offset =
      std::min(max_pc_relative_code_range, code_region.size()) -
      allocate_code_size;
  void* hint = reinterpret_cast<void*>(code_region.begin() + hint_offset);

  embedded_blob_code_copy =
      reinterpret_cast<uint8_t*>(page_allocator()->AllocatePages(
          hint, allocate_code_size, kAllocatePageSize,
          PageAllocator::kNoAccess));

  if (!embedded_blob_code_copy) {
    V8::FatalProcessOutOfMemory(
        isolate, "Can't allocate space for re-embedded builtins");
  }
  CHECK_EQ(embedded_blob_code_copy, hint);

  if (code_region.size() > max_pc_relative_code_range) {
    // The re-embedded code blob might not be reachable from the end part of
    // the code range, so ensure that code pages will never be allocated in
    // the "unreachable" area.
    Address unreachable_start =
        reinterpret_cast<Address>(embedded_blob_code_copy) +
        max_pc_relative_code_range;

    if (code_region.contains(unreachable_start)) {
      size_t unreachable_size = code_region.end() - unreachable_start;

      void* result = page_allocator()->AllocatePages(
          reinterpret_cast<void*>(unreachable_start), unreachable_size,
          kAllocatePageSize, PageAllocator::kNoAccess);
      CHECK_EQ(reinterpret_cast<Address>(result), unreachable_start);
    }
  }

  size_t code_size = RoundUp(embedded_blob_code_size, kCommitPageSize);
  if constexpr (base::OS::IsRemapPageSupported()) {
    // By default, the embedded builtins are not remapped, but copied. This
    // costs memory, since builtins become private dirty anonymous memory,
    // rather than shared, clean, file-backed memory for the embedded version.
    // If the OS supports it, we can remap the builtins *on top* of the space
    // allocated in the code range, making the "copy" shared, clean, file-backed
    // memory, and thus saving sizeof(builtins).
    //
    // Builtins should start at a page boundary, see
    // platform-embedded-file-writer-mac.cc. If it's not the case (e.g. if the
    // embedded builtins are not coming from the binary), fall back to copying.
    if (IsAligned(reinterpret_cast<uintptr_t>(embedded_blob_code),
                  kCommitPageSize)) {
      bool ok = base::OS::RemapPages(embedded_blob_code, code_size,
                                     embedded_blob_code_copy,
                                     base::OS::MemoryPermission::kReadExecute);

      if (ok) {
        embedded_blob_code_copy_.store(embedded_blob_code_copy,
                                       std::memory_order_release);
        return embedded_blob_code_copy;
      }
    }
  }

  if (V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT) {
    if (!page_allocator()->RecommitPages(embedded_blob_code_copy, code_size,
                                         PageAllocator::kReadWriteExecute)) {
      V8::FatalProcessOutOfMemory(isolate,
                                  "Re-embedded builtins: recommit pages");
    }
    RwxMemoryWriteScope rwx_write_scope(
        "Enable write access to copy the blob code into the code range");
    memcpy(embedded_blob_code_copy, embedded_blob_code,
           embedded_blob_code_size);
  } else {
    if (!page_allocator()->SetPermissions(embedded_blob_code_copy, code_size,
                                          PageAllocator::kReadWrite)) {
      V8::FatalProcessOutOfMemory(isolate,
                                  "Re-embedded builtins: set permissions");
    }
    memcpy(embedded_blob_code_copy, embedded_blob_code,
           embedded_blob_code_size);

    if (!page_allocator()->SetPermissions(embedded_blob_code_copy, code_size,
                                          PageAllocator::kReadExecute)) {
      V8::FatalProcessOutOfMemory(isolate,
                                  "Re-embedded builtins: set permissions");
    }
  }
  embedded_blob_code_copy_.store(embedded_blob_code_copy,
                                 std::memory_order_release);
  return embedded_blob_code_copy;
}

// static
std::shared_ptr<CodeRange> CodeRange::EnsureProcessWideCodeRange(
    v8::PageAllocator* page_allocator, size_t requested_size) {
  base::MutexGuard guard(process_wide_code_range_creation_mutex_.Pointer());
  std::shared_ptr<CodeRange> code_range = process_wide_code_range_.Get().lock();
  if (!code_range) {
    code_range = std::make_shared<CodeRange>();
    if (!code_range->InitReservation(page_allocator, requested_size)) {
      V8::FatalProcessOutOfMemory(
          nullptr, "Failed to reserve virtual memory for CodeRange");
    }
    *process_wide_code_range_.Pointer() = code_range;
  }
  return code_range;
}

// static
std::shared_ptr<CodeRange> CodeRange::GetProcessWideCodeRange() {
  return process_wide_code_range_.Get().lock();
}

}  // namespace internal
}  // namespace v8