Commit b1b5cdda authored by Hao Xu's avatar Hao Xu Committed by V8 LUCI CQ

[sparkplug][x64] Enable short builtin calls in x64 when pointer compression is disabled

Allocate code range close to binary (<2GB) when pointer compression is
disabled. And enable short builtin calls if it succeeds.

Bug: v8:12045, v8:11527
Change-Id: I1a9d635b243337980fd75883d9802bc0cee75e43
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3069457
Commit-Queue: Hao A Xu <hao.a.xu@intel.com>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarIgor Sheludko <ishell@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77248}
parent 8860a022
......@@ -456,9 +456,12 @@ if (v8_multi_arch_build &&
v8_enable_pointer_compression_shared_cage = v8_enable_pointer_compression
}
if (v8_enable_short_builtin_calls &&
(!v8_enable_pointer_compression || v8_control_flow_integrity)) {
((!v8_enable_pointer_compression && v8_current_cpu != "x64") ||
v8_control_flow_integrity)) {
# Disable short calls when pointer compression is not enabled.
# Or when CFI is enabled (until the CFI-related issues are fixed).
# Or when CFI is enabled (until the CFI-related issues are fixed), except x64,
# where short builtin calls can still be enabled if the code range is
# guaranteed to be close enough to embedded builtins.
v8_enable_short_builtin_calls = false
}
if (v8_enable_shared_ro_heap == "") {
......
......@@ -129,6 +129,12 @@ void OS::SignalCodeMovingGC() {}
void OS::AdjustSchedulingParams() {}
std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size,
size_t alignment) {
return {};
}
// static
Stack::StackSlot Stack::GetStackStart() {
// pthread_getthrds_np creates 3 values:
......
......@@ -271,5 +271,11 @@ void OS::SignalCodeMovingGC() {
void OS::AdjustSchedulingParams() {}
std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size,
size_t alignment) {
return {};
}
} // namespace base
} // namespace v8
......@@ -97,6 +97,12 @@ void OS::SignalCodeMovingGC() {}
void OS::AdjustSchedulingParams() {}
std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size,
size_t alignment) {
return {};
}
// static
Stack::StackSlot Stack::GetStackStart() {
pthread_attr_t attr;
......
......@@ -177,5 +177,11 @@ int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
void OS::AdjustSchedulingParams() {}
std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size,
size_t alignment) {
return {};
}
} // namespace base
} // namespace v8
......@@ -155,5 +155,56 @@ void* OS::RemapShared(void* old_address, void* new_address, size_t size) {
return result;
}
std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size,
size_t alignment) {
std::vector<OS::MemoryRange> result = {};
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// and the lines are arranged in increasing order of address.
// If we encounter an unexpected situation we abort scanning further entries.
FILE* fp = fopen("/proc/self/maps", "r");
if (fp == nullptr) return {};
// Search for the gaps between existing virtual memory (vm) areas. If the gap
// contains enough space for the requested-size range that is within the
// boundary, push the overlapped memory range to the vector.
uintptr_t gap_start = 0, gap_end = 0;
// This loop will terminate once the scanning hits an EOF or reaches the gap
// at the higher address to the end of boundary.
uintptr_t vm_start;
uintptr_t vm_end;
while (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &vm_start, &vm_end) == 2 &&
gap_start < boundary_end) {
// Visit the gap at the lower address to this vm.
gap_end = vm_start;
// Skip the gaps at the lower address to the start of boundary.
if (gap_end > boundary_start) {
// The available area is the overlap of the gap and boundary. Push
// the overlapped memory range to the vector if there is enough space.
const uintptr_t overlap_start =
RoundUp(std::max(gap_start, boundary_start), alignment);
const uintptr_t overlap_end =
RoundDown(std::min(gap_end, boundary_end), alignment);
if (overlap_start < overlap_end &&
overlap_end - overlap_start >= minimum_size) {
result.push_back({overlap_start, overlap_end});
}
}
// Continue to visit the next gap.
gap_start = vm_end;
int c;
// Skip characters until we reach the end of the line or EOF.
do {
c = getc(fp);
} while ((c != EOF) && (c != '\n'));
if (c == EOF) break;
}
fclose(fp);
return result;
}
} // namespace base
} // namespace v8
......@@ -93,6 +93,12 @@ void OS::AdjustSchedulingParams() {
#endif
}
std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size,
size_t alignment) {
return {};
}
// static
Stack::StackSlot Stack::GetStackStart() {
return pthread_get_stackaddr_np(pthread_self());
......
......@@ -122,5 +122,11 @@ void OS::SignalCodeMovingGC() {
void OS::AdjustSchedulingParams() {}
std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size,
size_t alignment) {
return {};
}
} // namespace base
} // namespace v8
......@@ -148,5 +148,11 @@ void OS::SignalCodeMovingGC() {}
void OS::AdjustSchedulingParams() {}
std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size,
size_t alignment) {
return {};
}
} // namespace base
} // namespace v8
......@@ -65,6 +65,12 @@ void OS::SignalCodeMovingGC() {}
void OS::AdjustSchedulingParams() {}
std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size,
size_t alignment) {
return {};
}
// static
Stack::StackSlot Stack::GetStackStart() {
pthread_attr_t attr;
......
......@@ -474,6 +474,12 @@ void OS::SignalCodeMovingGC() { SB_NOTIMPLEMENTED(); }
void OS::AdjustSchedulingParams() {}
std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size,
size_t alignment) {
return {};
}
bool OS::DiscardSystemPages(void* address, size_t size) {
// Starboard API does not support this function yet.
return true;
......
......@@ -1440,6 +1440,12 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
void OS::AdjustSchedulingParams() {}
std::vector<OS::MemoryRange> OS::GetFreeMemoryRangesWithin(
OS::Address boundary_start, OS::Address boundary_end, size_t minimum_size,
size_t alignment) {
return {};
}
// static
Stack::StackSlot Stack::GetStackStart() {
#if defined(V8_TARGET_ARCH_X64)
......
......@@ -274,6 +274,19 @@ class V8_BASE_EXPORT OS {
static void AdjustSchedulingParams();
using Address = uintptr_t;
struct MemoryRange {
uintptr_t start = 0;
uintptr_t end = 0;
};
// Find gaps between existing virtual memory ranges that have enough space
// to place a region with minimum_size within (boundary_start, boundary_end)
static std::vector<MemoryRange> GetFreeMemoryRangesWithin(
Address boundary_start, Address boundary_end, size_t minimum_size,
size_t alignment);
[[noreturn]] static void ExitProcess(int exit_code);
private:
......
......@@ -103,19 +103,21 @@ STATIC_ASSERT(V8_DEFAULT_STACK_SIZE_KB* KB +
kStackLimitSlackForDeoptimizationInBytes <=
MB);
// Determine whether the short builtin calls optimization is enabled.
#ifdef V8_SHORT_BUILTIN_CALLS
#ifndef V8_COMPRESS_POINTERS
// TODO(11527): Fix this by passing Isolate* to Code::OffHeapInstructionStart()
// and friends.
#error Short builtin calls feature requires pointer compression
#endif
#if defined(V8_SHORT_BUILTIN_CALLS) && !defined(V8_COMPRESS_POINTERS)
#define V8_ENABLE_NEAR_CODE_RANGE_BOOL true
#else
#define V8_ENABLE_NEAR_CODE_RANGE_BOOL false
#endif
// This constant is used for detecting whether the machine has >= 4GB of
// physical memory by checking the max old space size.
const size_t kShortBuiltinCallsOldSpaceSizeThreshold = size_t{2} * GB;
// This constant is used for detecting whether code range could be
// allocated within the +/- 2GB boundary to builtins' embedded blob
// to use short builtin calls.
const size_t kShortBuiltinCallsBoundary = size_t{2} * GB;
// Determine whether dict mode prototypes feature is enabled.
#ifdef V8_ENABLE_SWISS_NAME_DICTIONARY
#define V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL true
......
......@@ -362,6 +362,21 @@ uint32_t Isolate::CurrentEmbeddedBlobDataSize() {
std::memory_order::memory_order_relaxed);
}
base::AddressRegion Isolate::GetShortBuiltinsCallRegion() {
DCHECK(V8_ENABLE_NEAR_CODE_RANGE_BOOL);
DCHECK_LT(CurrentEmbeddedBlobCodeSize(), kShortBuiltinCallsBoundary);
Address embedded_blob_code_start =
reinterpret_cast<Address>(CurrentEmbeddedBlobCode());
Address embedded_blob_code_end =
embedded_blob_code_start + CurrentEmbeddedBlobCodeSize();
Address region_start =
(embedded_blob_code_end > kShortBuiltinCallsBoundary)
? (embedded_blob_code_end - kShortBuiltinCallsBoundary)
: 0;
Address region_end = embedded_blob_code_start + kShortBuiltinCallsBoundary;
return base::AddressRegion(region_start, region_end - region_start);
}
size_t Isolate::HashIsolateForEmbeddedBlob() {
DCHECK(builtins_.is_initialized());
DCHECK(Builtins::AllBuiltinsAreIsolateIndependent());
......@@ -3460,7 +3475,10 @@ void Isolate::CreateAndSetEmbeddedBlob() {
}
void Isolate::MaybeRemapEmbeddedBuiltinsIntoCodeRange() {
if (!is_short_builtin_calls_enabled() || !RequiresCodeRange()) return;
if (!is_short_builtin_calls_enabled() || V8_ENABLE_NEAR_CODE_RANGE_BOOL ||
!RequiresCodeRange()) {
return;
}
CHECK_NOT_NULL(embedded_blob_code_);
CHECK_NE(embedded_blob_code_size_, 0);
......@@ -3653,6 +3671,13 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
is_short_builtin_calls_enabled_ = true;
}
}
if (V8_ENABLE_NEAR_CODE_RANGE_BOOL) {
// When enable short builtin calls by near code range, the
// code range should be close (<2GB) to the embedded blob to use
// pc-relative calls.
is_short_builtin_calls_enabled_ =
GetShortBuiltinsCallRegion().contains(heap_.code_region());
}
}
// Create LocalIsolate/LocalHeap for the main thread and set state to Running.
......
......@@ -1598,6 +1598,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
return V8_SHORT_BUILTIN_CALLS_BOOL && is_short_builtin_calls_enabled_;
}
static base::AddressRegion GetShortBuiltinsCallRegion();
void set_array_buffer_allocator(v8::ArrayBuffer::Allocator* allocator) {
array_buffer_allocator_ = allocator;
}
......
......@@ -29,13 +29,47 @@ DEFINE_LAZY_LEAKY_OBJECT_GETTER(CodeRangeAddressHint, GetCodeRangeAddressHint)
void FunctionInStaticBinaryForAddressHint() {}
} // anonymous namespace
Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size,
size_t alignment) {
base::MutexGuard guard(&mutex_);
Address result = 0;
auto it = recently_freed_.find(code_range_size);
// No recently freed region has been found, try to provide a hint for placing
// a code region
if (it == recently_freed_.end() || it->second.empty()) {
return FUNCTION_ADDR(&FunctionInStaticBinaryForAddressHint);
if (V8_ENABLE_NEAR_CODE_RANGE_BOOL) {
base::AddressRegion region = Isolate::GetShortBuiltinsCallRegion();
DCHECK_LT(region.begin(), region.end());
auto memory_ranges = base::OS::GetFreeMemoryRangesWithin(
region.begin(), region.end(), code_range_size, alignment);
if (!memory_ranges.empty()) {
result = memory_ranges.front().start;
CHECK(IsAligned(result, alignment));
return result;
}
}
return RoundUp(FUNCTION_ADDR(&FunctionInStaticBinaryForAddressHint),
alignment);
}
// Try to reuse near code range first.
if (V8_ENABLE_NEAR_CODE_RANGE_BOOL) {
base::AddressRegion region = Isolate::GetShortBuiltinsCallRegion();
auto freed_regions_for_size = it->second;
for (auto it_freed = freed_regions_for_size.rbegin();
it_freed != freed_regions_for_size.rend(); ++it_freed) {
Address code_range_start = *it_freed;
if (region.contains(code_range_start, code_range_size)) {
CHECK(IsAligned(code_range_start, alignment));
freed_regions_for_size.erase((it_freed + 1).base());
return code_range_start;
}
}
}
Address result = it->second.back();
result = it->second.back();
CHECK(IsAligned(result, alignment));
it->second.pop_back();
return result;
}
......@@ -69,12 +103,15 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
VirtualMemoryCage::ReservationParams params;
params.page_allocator = page_allocator;
params.reservation_size = requested;
// base_alignment should be kAnyBaseAlignment when V8_ENABLE_NEAR_CODE_RANGE
// is enabled so that InitReservation would not break the alignment in
// GetAddressHint().
params.base_alignment =
VirtualMemoryCage::ReservationParams::kAnyBaseAlignment;
params.base_bias_size = reserved_area;
params.page_size = MemoryChunk::kPageSize;
params.requested_start_hint =
GetCodeRangeAddressHint()->GetAddressHint(requested);
params.requested_start_hint = GetCodeRangeAddressHint()->GetAddressHint(
requested, page_allocator->AllocatePageSize());
if (!VirtualMemoryCage::InitReservation(params)) return false;
......
......@@ -20,9 +20,14 @@ namespace internal {
// leaks (see crbug.com/870054).
class CodeRangeAddressHint {
public:
// Returns the most recently freed code range start address for the given
// size. If there is no such entry, then a random address is returned.
V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
// When near code range is enabled, an address within
// kShortBuiltinCallsBoundary to the embedded blob is returned if
// there is enough space. Otherwise a random address is returned.
// When near code range is disabled, returns the most recently freed code
// range start address for the given size. If there is no such entry, then a
// random address is returned.
V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size,
size_t alignment);
V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
size_t code_range_size);
......
......@@ -358,10 +358,6 @@ bool VirtualMemoryCage::InitReservation(
IsAligned(params.base_bias_size, allocate_page_size)));
CHECK_LE(params.base_bias_size, params.reservation_size);
Address hint = RoundDown(params.requested_start_hint,
RoundUp(params.base_alignment, allocate_page_size)) -
RoundUp(params.base_bias_size, allocate_page_size);
if (!existing_reservation.is_empty()) {
CHECK_EQ(existing_reservation.size(), params.reservation_size);
CHECK(params.base_alignment == ReservationParams::kAnyBaseAlignment ||
......@@ -373,6 +369,9 @@ bool VirtualMemoryCage::InitReservation(
} else if (params.base_alignment == ReservationParams::kAnyBaseAlignment) {
// When the base doesn't need to be aligned, the virtual memory reservation
// fails only due to OOM.
Address hint =
RoundDown(params.requested_start_hint,
RoundUp(params.base_alignment, allocate_page_size));
VirtualMemory reservation(params.page_allocator, params.reservation_size,
reinterpret_cast<void*>(hint));
if (!reservation.IsReserved()) return false;
......@@ -384,6 +383,10 @@ bool VirtualMemoryCage::InitReservation(
// Otherwise, we need to try harder by first overreserving
// in hopes of finding a correctly aligned address within the larger
// reservation.
Address hint =
RoundDown(params.requested_start_hint,
RoundUp(params.base_alignment, allocate_page_size)) -
RoundUp(params.base_bias_size, allocate_page_size);
const int kMaxAttempts = 4;
for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
// Reserve a region of twice the size so that there is an aligned address
......
......@@ -129,10 +129,11 @@ TEST_F(SpacesTest, WriteBarrierInYoungGenerationFromSpace) {
TEST_F(SpacesTest, CodeRangeAddressReuse) {
CodeRangeAddressHint hint;
const size_t kAnyBaseAlignment = 1;
// Create code ranges.
Address code_range1 = hint.GetAddressHint(100);
Address code_range2 = hint.GetAddressHint(200);
Address code_range3 = hint.GetAddressHint(100);
Address code_range1 = hint.GetAddressHint(100, kAnyBaseAlignment);
Address code_range2 = hint.GetAddressHint(200, kAnyBaseAlignment);
Address code_range3 = hint.GetAddressHint(100, kAnyBaseAlignment);
// Since the addresses are random, we cannot check that they are different.
......@@ -141,14 +142,14 @@ TEST_F(SpacesTest, CodeRangeAddressReuse) {
hint.NotifyFreedCodeRange(code_range2, 200);
// The next two code ranges should reuse the freed addresses.
Address code_range4 = hint.GetAddressHint(100);
Address code_range4 = hint.GetAddressHint(100, kAnyBaseAlignment);
EXPECT_EQ(code_range4, code_range1);
Address code_range5 = hint.GetAddressHint(200);
Address code_range5 = hint.GetAddressHint(200, kAnyBaseAlignment);
EXPECT_EQ(code_range5, code_range2);
// Free the third code range and check address reuse.
hint.NotifyFreedCodeRange(code_range3, 100);
Address code_range6 = hint.GetAddressHint(100);
Address code_range6 = hint.GetAddressHint(100, kAnyBaseAlignment);
EXPECT_EQ(code_range6, code_range3);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment