Commit c4d31fea authored by Jakob Gruber's avatar Jakob Gruber Committed by Commit Bot

[roots] Remove pseudo-smi stack limit roots

Stack limits were additionally maintained in pseudo-smi roots.
"Pseudo", because we stored the raw limit pointers there, just making
sure their values looked like smis by masking the least significant
bits.

This mechanism is no longer needed now that we can access the stack
limit external references as efficiently as the smi roots.

Bug: v8:9534
Change-Id: I37e78851c97aebc1012ea0e95075e56366a40a73
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1745339
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63183}
parent d75392bc
......@@ -90,12 +90,24 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
CHECK(is_int32(offset));
__ ldr(destination, MemOperand(kRootRegister, offset));
}
void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch, Label* stack_overflow) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
__ LoadRoot(scratch, RootIndex::kRealStackLimit);
LoadRealStackLimit(masm, scratch);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
__ sub(scratch, sp, scratch);
......@@ -428,7 +440,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
__ CompareRoot(sp, RootIndex::kRealStackLimit);
LoadRealStackLimit(masm, scratch);
__ cmp(sp, scratch);
__ b(lo, &stack_overflow);
// Push receiver.
......@@ -1116,7 +1129,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
__ sub(r9, sp, Operand(r4));
__ LoadRoot(r2, RootIndex::kRealStackLimit);
LoadRealStackLimit(masm, r2);
__ cmp(r9, Operand(r2));
__ b(hs, &ok);
__ CallRuntime(Runtime::kThrowStackOverflow);
......@@ -2089,7 +2102,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Compute the space we have left. The stack might already be overflowed
// here which will cause remaining_stack_size to become negative.
__ LoadRoot(remaining_stack_size, RootIndex::kRealStackLimit);
LoadRealStackLimit(masm, remaining_stack_size);
__ sub(remaining_stack_size, sp, remaining_stack_size);
// Check if the arguments will overflow the stack.
......
......@@ -89,6 +89,17 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
__ Ldr(destination, MemOperand(kRootRegister, offset));
}
void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Label* stack_overflow) {
UseScratchRegisterScope temps(masm);
......@@ -98,7 +109,7 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// We are not trying to catch interruptions (e.g. debug break and
// preemption) here, so the "real stack limit" is checked.
__ LoadRoot(scratch, RootIndex::kRealStackLimit);
LoadRealStackLimit(masm, scratch);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
__ Sub(scratch, sp, scratch);
......@@ -480,7 +491,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
__ CompareRoot(sp, RootIndex::kRealStackLimit);
LoadRealStackLimit(masm, x10);
__ Cmp(sp, x10);
__ B(lo, &stack_overflow);
// Get number of arguments for generator function.
......@@ -1244,7 +1256,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit.
Label ok;
__ Sub(x10, sp, Operand(x11));
__ CompareRoot(x10, RootIndex::kRealStackLimit);
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
LoadRealStackLimit(masm, scratch);
__ Cmp(x10, scratch);
}
__ B(hs, &ok);
__ CallRuntime(Runtime::kThrowStackOverflow);
__ Bind(&ok);
......@@ -2490,7 +2507,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
Label done;
__ LoadRoot(x10, RootIndex::kRealStackLimit);
LoadRealStackLimit(masm, x10);
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
__ Sub(x10, sp, x10);
......
......@@ -64,6 +64,18 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
Operand RealStackLimitAsOperand(MacroAssembler* masm) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
CHECK(is_int32(offset));
return Operand(kRootRegister, static_cast<int32_t>(offset));
}
void Generate_StackOverflowCheck(
MacroAssembler* masm, Register num_args, Register scratch,
Label* stack_overflow,
......@@ -71,7 +83,7 @@ void Generate_StackOverflowCheck(
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
__ LoadRoot(kScratchRegister, RootIndex::kRealStackLimit);
__ movq(kScratchRegister, RealStackLimitAsOperand(masm));
__ movq(scratch, rsp);
// Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative.
......@@ -735,7 +747,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow;
__ CompareRoot(rsp, RootIndex::kRealStackLimit);
__ cmpq(rsp, RealStackLimitAsOperand(masm));
__ j(below, &stack_overflow);
// Pop return address.
......@@ -1134,7 +1146,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Label ok;
__ movq(rax, rsp);
__ subq(rax, rcx);
__ CompareRoot(rax, RootIndex::kRealStackLimit);
__ cmpq(rax, RealStackLimitAsOperand(masm));
__ j(above_equal, &ok, Label::kNear);
__ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
......@@ -2339,9 +2351,10 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ shlq(rbx, Immediate(kSystemPointerSizeLog2));
__ movq(kScratchRegister, rsp);
__ subq(kScratchRegister, rbx);
// We are not trying to catch interruptions (i.e. debug break and
// preemption) here, so check the "real stack limit".
__ CompareRoot(kScratchRegister, RootIndex::kRealStackLimit);
__ cmpq(kScratchRegister, RealStackLimitAsOperand(masm));
__ j(above_equal, &done, Label::kNear);
{
FrameScope scope(masm, StackFrame::MANUAL);
......
......@@ -329,11 +329,17 @@ ExternalReference ExternalReference::allocation_sites_list_address(
}
ExternalReference ExternalReference::address_of_jslimit(Isolate* isolate) {
return ExternalReference(isolate->stack_guard()->address_of_jslimit());
Address address = isolate->stack_guard()->address_of_jslimit();
// For efficient generated code, this should be root-register-addressable.
DCHECK(isolate->root_register_addressable_region().contains(address));
return ExternalReference(address);
}
ExternalReference ExternalReference::address_of_real_jslimit(Isolate* isolate) {
return ExternalReference(isolate->stack_guard()->address_of_real_jslimit());
Address address = isolate->stack_guard()->address_of_real_jslimit();
// For efficient generated code, this should be root-register-addressable.
DCHECK(isolate->root_register_addressable_region().contains(address));
return ExternalReference(address);
}
ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
......
......@@ -92,14 +92,15 @@ void TurboAssembler::CompareRoot(Register with, RootIndex index) {
}
void TurboAssembler::CompareRealStackLimit(Register with) {
if (root_array_available()) {
CompareRoot(with, RootIndex::kRealStackLimit);
} else {
DCHECK(!options().isolate_independent_code);
ExternalReference ref =
ExternalReference::address_of_real_jslimit(isolate());
cmp(with, Operand(ref.address(), RelocInfo::EXTERNAL_REFERENCE));
}
CHECK(root_array_available()); // Only used by builtins.
// Address through the root register. No load is needed.
ExternalReference limit =
ExternalReference::address_of_real_jslimit(isolate());
DCHECK(IsAddressableThroughRootRegister(isolate(), limit));
intptr_t offset = RootRegisterOffsetForExternalReference(isolate(), limit);
cmp(with, Operand(kRootRegister, offset));
}
void MacroAssembler::PushRoot(RootIndex index) {
......
......@@ -3356,9 +3356,6 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
// The initialization process does not handle memory exhaustion.
AlwaysAllocateScope always_allocate(this);
// Safe after setting Heap::isolate_, and initializing StackGuard
heap_.SetStackLimits();
#define ASSIGN_ELEMENT(CamelName, hacker_name) \
isolate_addresses_[IsolateAddressId::k##CamelName##Address] = \
reinterpret_cast<Address>(hacker_name##_address());
......@@ -3537,10 +3534,6 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
clear_pending_message();
clear_scheduled_exception();
// Deserializing may put strange things in the root array's copy of the
// stack guard.
heap_.SetStackLimits();
// Quiet the heap NaN if needed on target platform.
if (!create_heap_objects)
Assembler::QuietNaN(ReadOnlyRoots(this).nan_value());
......
......@@ -21,14 +21,12 @@ void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
DCHECK_NOT_NULL(isolate_);
thread_local_.set_jslimit(kInterruptLimit);
thread_local_.set_climit(kInterruptLimit);
isolate_->heap()->SetStackLimits();
}
void StackGuard::reset_limits(const ExecutionAccess& lock) {
DCHECK_NOT_NULL(isolate_);
thread_local_.set_jslimit(thread_local_.real_jslimit_);
thread_local_.set_climit(thread_local_.real_climit_);
isolate_->heap()->SetStackLimits();
}
void StackGuard::SetStackLimit(uintptr_t limit) {
......@@ -54,7 +52,6 @@ void StackGuard::AdjustStackLimitForSimulator() {
uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, climit);
if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
thread_local_.set_jslimit(jslimit);
isolate_->heap()->SetStackLimits();
}
}
......@@ -181,23 +178,13 @@ int StackGuard::FetchAndClearInterrupts() {
char* StackGuard::ArchiveStackGuard(char* to) {
ExecutionAccess access(isolate_);
MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
ThreadLocal blank;
// Set the stack limits using the old thread_local_.
// TODO(isolates): This was the old semantics of constructing a ThreadLocal
// (as the ctor called SetStackLimits, which looked at the
// current thread_local_ from StackGuard)-- but is this
// really what was intended?
isolate_->heap()->SetStackLimits();
thread_local_ = blank;
thread_local_ = {};
return to + sizeof(ThreadLocal);
}
char* StackGuard::RestoreStackGuard(char* from) {
ExecutionAccess access(isolate_);
MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
isolate_->heap()->SetStackLimits();
return from + sizeof(ThreadLocal);
}
......@@ -216,8 +203,7 @@ void StackGuard::ThreadLocal::Clear() {
interrupt_flags_ = 0;
}
bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
bool should_set_stack_limits = false;
void StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
if (real_climit_ == kIllegalLimit) {
const uintptr_t kLimitSize = FLAG_stack_size * KB;
DCHECK_GT(GetCurrentStackPosition(), kLimitSize);
......@@ -226,20 +212,17 @@ bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
set_jslimit(SimulatorStack::JsLimitFromCLimit(isolate, limit));
real_climit_ = limit;
set_climit(limit);
should_set_stack_limits = true;
}
interrupt_scopes_ = nullptr;
interrupt_flags_ = 0;
return should_set_stack_limits;
}
void StackGuard::ClearThread(const ExecutionAccess& lock) {
thread_local_.Clear();
isolate_->heap()->SetStackLimits();
}
void StackGuard::InitThread(const ExecutionAccess& lock) {
if (thread_local_.Initialize(isolate_)) isolate_->heap()->SetStackLimits();
thread_local_.Initialize(isolate_);
Isolate::PerIsolateThreadData* per_thread =
isolate_->FindOrAllocatePerThreadDataForThisThread();
uintptr_t stored_limit = per_thread->stack_limit();
......
......@@ -132,8 +132,7 @@ class V8_EXPORT_PRIVATE StackGuard final {
// Clear.
void Clear();
// Returns true if the heap's stack limits should be set, false if not.
bool Initialize(Isolate* isolate);
void Initialize(Isolate* isolate);
// The stack limit is split into a JavaScript and a C++ stack limit. These
// two are the same except when running on a simulator where the C++ and
......
......@@ -5089,25 +5089,6 @@ void Heap::InitializeHashSeed() {
0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
}
void Heap::SetStackLimits() {
DCHECK_NOT_NULL(isolate_);
DCHECK(isolate_ == isolate());
// On 64 bit machines, pointers are generally out of range of Smis. We write
// something that looks like an out of range Smi to the GC.
// Set up the special root array entries containing the stack limits.
// These are actually addresses, but the tag makes the GC ignore it.
roots_table()[RootIndex::kStackLimit] =
(isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag;
roots_table()[RootIndex::kRealStackLimit] =
(isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag;
}
void Heap::ClearStackLimits() {
roots_table()[RootIndex::kStackLimit] = kNullAddress;
roots_table()[RootIndex::kRealStackLimit] = kNullAddress;
}
int Heap::NextAllocationTimeout(int current_timeout) {
if (FLAG_random_gc_interval > 0) {
// If current timeout hasn't reached 0 the GC was caused by something
......
......@@ -722,15 +722,6 @@ class Heap {
V8_INLINE void SetMessageListeners(TemplateList value);
V8_INLINE void SetPendingOptimizeForTestBytecode(Object bytecode);
// Set the stack limit in the roots table. Some architectures generate
// code that looks here, because it is faster than loading from the static
// jslimit_/real_jslimit_ variable in the StackGuard.
void SetStackLimits();
// The stack limit is thread-dependent. To be able to reproduce the same
// snapshot blob, we need to reset it before serializing.
void ClearStackLimits();
void RegisterStrongRoots(FullObjectSlot start, FullObjectSlot end);
void UnregisterStrongRoots(FullObjectSlot start);
......
......@@ -273,8 +273,6 @@ class Symbol;
// Entries in this list are limited to Smis and are not visited during GC.
#define SMI_ROOT_LIST(V) \
V(Smi, stack_limit, StackLimit) \
V(Smi, real_stack_limit, RealStackLimit) \
V(Smi, last_script_id, LastScriptId) \
V(Smi, last_debugging_id, LastDebuggingId) \
/* To distinguish the function templates, so that we can find them in the */ \
......
......@@ -143,13 +143,9 @@ void StartupSerializer::SerializeStrongReferences() {
// No active or weak handles.
CHECK(isolate->handle_scope_implementer()->blocks()->empty());
// Visit smi roots.
// Clear the stack limits to make the snapshot reproducible.
// Reset it again afterwards.
isolate->heap()->ClearStackLimits();
// Visit smi roots and immortal immovables first to make sure they end up in
// the first page.
isolate->heap()->IterateSmiRoots(this);
isolate->heap()->SetStackLimits();
// First visit immortal immovables to make sure they end up in the first page.
isolate->heap()->IterateStrongRoots(this, VISIT_FOR_SERIALIZATION);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment