Commit c4d31fea authored by Jakob Gruber's avatar Jakob Gruber Committed by Commit Bot

[roots] Remove pseudo-smi stack limit roots

Stack limits were additionally maintained in pseudo-smi roots.
"Pseudo", because we stored the raw limit pointers there, just making
sure their values looked like smis by masking the least significant
bits.

This mechanism is no longer needed now that we can access the stack
limit external references as efficiently as the smi roots.

Bug: v8:9534
Change-Id: I37e78851c97aebc1012ea0e95075e56366a40a73
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1745339
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63183}
parent d75392bc
...@@ -90,12 +90,24 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm, ...@@ -90,12 +90,24 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace { namespace {
void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
CHECK(is_int32(offset));
__ ldr(destination, MemOperand(kRootRegister, offset));
}
void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch, Label* stack_overflow) { Register scratch, Label* stack_overflow) {
// Check the stack for overflow. We are not trying to catch // Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack // interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked. // limit" is checked.
__ LoadRoot(scratch, RootIndex::kRealStackLimit); LoadRealStackLimit(masm, scratch);
// Make scratch the space we have left. The stack might already be overflowed // Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative. // here which will cause scratch to become negative.
__ sub(scratch, sp, scratch); __ sub(scratch, sp, scratch);
...@@ -428,7 +440,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ...@@ -428,7 +440,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions // Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit". // (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow; Label stack_overflow;
__ CompareRoot(sp, RootIndex::kRealStackLimit); LoadRealStackLimit(masm, scratch);
__ cmp(sp, scratch);
__ b(lo, &stack_overflow); __ b(lo, &stack_overflow);
// Push receiver. // Push receiver.
...@@ -1116,7 +1129,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1116,7 +1129,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit. // Do a stack check to ensure we don't go over the limit.
Label ok; Label ok;
__ sub(r9, sp, Operand(r4)); __ sub(r9, sp, Operand(r4));
__ LoadRoot(r2, RootIndex::kRealStackLimit); LoadRealStackLimit(masm, r2);
__ cmp(r9, Operand(r2)); __ cmp(r9, Operand(r2));
__ b(hs, &ok); __ b(hs, &ok);
__ CallRuntime(Runtime::kThrowStackOverflow); __ CallRuntime(Runtime::kThrowStackOverflow);
...@@ -2089,7 +2102,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { ...@@ -2089,7 +2102,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// Compute the space we have left. The stack might already be overflowed // Compute the space we have left. The stack might already be overflowed
// here which will cause remaining_stack_size to become negative. // here which will cause remaining_stack_size to become negative.
__ LoadRoot(remaining_stack_size, RootIndex::kRealStackLimit); LoadRealStackLimit(masm, remaining_stack_size);
__ sub(remaining_stack_size, sp, remaining_stack_size); __ sub(remaining_stack_size, sp, remaining_stack_size);
// Check if the arguments will overflow the stack. // Check if the arguments will overflow the stack.
......
...@@ -89,6 +89,17 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm, ...@@ -89,6 +89,17 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace { namespace {
void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
__ Ldr(destination, MemOperand(kRootRegister, offset));
}
void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Label* stack_overflow) { Label* stack_overflow) {
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
...@@ -98,7 +109,7 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args, ...@@ -98,7 +109,7 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// We are not trying to catch interruptions (e.g. debug break and // We are not trying to catch interruptions (e.g. debug break and
// preemption) here, so the "real stack limit" is checked. // preemption) here, so the "real stack limit" is checked.
__ LoadRoot(scratch, RootIndex::kRealStackLimit); LoadRealStackLimit(masm, scratch);
// Make scratch the space we have left. The stack might already be overflowed // Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative. // here which will cause scratch to become negative.
__ Sub(scratch, sp, scratch); __ Sub(scratch, sp, scratch);
...@@ -480,7 +491,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ...@@ -480,7 +491,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions // Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit". // (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow; Label stack_overflow;
__ CompareRoot(sp, RootIndex::kRealStackLimit); LoadRealStackLimit(masm, x10);
__ Cmp(sp, x10);
__ B(lo, &stack_overflow); __ B(lo, &stack_overflow);
// Get number of arguments for generator function. // Get number of arguments for generator function.
...@@ -1244,7 +1256,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1244,7 +1256,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Do a stack check to ensure we don't go over the limit. // Do a stack check to ensure we don't go over the limit.
Label ok; Label ok;
__ Sub(x10, sp, Operand(x11)); __ Sub(x10, sp, Operand(x11));
__ CompareRoot(x10, RootIndex::kRealStackLimit); {
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
LoadRealStackLimit(masm, scratch);
__ Cmp(x10, scratch);
}
__ B(hs, &ok); __ B(hs, &ok);
__ CallRuntime(Runtime::kThrowStackOverflow); __ CallRuntime(Runtime::kThrowStackOverflow);
__ Bind(&ok); __ Bind(&ok);
...@@ -2490,7 +2507,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { ...@@ -2490,7 +2507,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// (i.e. debug break and preemption) here, so check the "real stack // (i.e. debug break and preemption) here, so check the "real stack
// limit". // limit".
Label done; Label done;
__ LoadRoot(x10, RootIndex::kRealStackLimit); LoadRealStackLimit(masm, x10);
// Make x10 the space we have left. The stack might already be overflowed // Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative. // here which will cause x10 to become negative.
__ Sub(x10, sp, x10); __ Sub(x10, sp, x10);
......
...@@ -64,6 +64,18 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm, ...@@ -64,6 +64,18 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace { namespace {
Operand RealStackLimitAsOperand(MacroAssembler* masm) {
DCHECK(masm->root_array_available());
Isolate* isolate = masm->isolate();
ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
intptr_t offset =
TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
CHECK(is_int32(offset));
return Operand(kRootRegister, static_cast<int32_t>(offset));
}
void Generate_StackOverflowCheck( void Generate_StackOverflowCheck(
MacroAssembler* masm, Register num_args, Register scratch, MacroAssembler* masm, Register num_args, Register scratch,
Label* stack_overflow, Label* stack_overflow,
...@@ -71,7 +83,7 @@ void Generate_StackOverflowCheck( ...@@ -71,7 +83,7 @@ void Generate_StackOverflowCheck(
// Check the stack for overflow. We are not trying to catch // Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack // interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked. // limit" is checked.
__ LoadRoot(kScratchRegister, RootIndex::kRealStackLimit); __ movq(kScratchRegister, RealStackLimitAsOperand(masm));
__ movq(scratch, rsp); __ movq(scratch, rsp);
// Make scratch the space we have left. The stack might already be overflowed // Make scratch the space we have left. The stack might already be overflowed
// here which will cause scratch to become negative. // here which will cause scratch to become negative.
...@@ -735,7 +747,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ...@@ -735,7 +747,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Check the stack for overflow. We are not trying to catch interruptions // Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit". // (i.e. debug break and preemption) here, so check the "real stack limit".
Label stack_overflow; Label stack_overflow;
__ CompareRoot(rsp, RootIndex::kRealStackLimit); __ cmpq(rsp, RealStackLimitAsOperand(masm));
__ j(below, &stack_overflow); __ j(below, &stack_overflow);
// Pop return address. // Pop return address.
...@@ -1134,7 +1146,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1134,7 +1146,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Label ok; Label ok;
__ movq(rax, rsp); __ movq(rax, rsp);
__ subq(rax, rcx); __ subq(rax, rcx);
__ CompareRoot(rax, RootIndex::kRealStackLimit); __ cmpq(rax, RealStackLimitAsOperand(masm));
__ j(above_equal, &ok, Label::kNear); __ j(above_equal, &ok, Label::kNear);
__ CallRuntime(Runtime::kThrowStackOverflow); __ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok); __ bind(&ok);
...@@ -2339,9 +2351,10 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { ...@@ -2339,9 +2351,10 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ shlq(rbx, Immediate(kSystemPointerSizeLog2)); __ shlq(rbx, Immediate(kSystemPointerSizeLog2));
__ movq(kScratchRegister, rsp); __ movq(kScratchRegister, rsp);
__ subq(kScratchRegister, rbx); __ subq(kScratchRegister, rbx);
// We are not trying to catch interruptions (i.e. debug break and // We are not trying to catch interruptions (i.e. debug break and
// preemption) here, so check the "real stack limit". // preemption) here, so check the "real stack limit".
__ CompareRoot(kScratchRegister, RootIndex::kRealStackLimit); __ cmpq(kScratchRegister, RealStackLimitAsOperand(masm));
__ j(above_equal, &done, Label::kNear); __ j(above_equal, &done, Label::kNear);
{ {
FrameScope scope(masm, StackFrame::MANUAL); FrameScope scope(masm, StackFrame::MANUAL);
......
...@@ -329,11 +329,17 @@ ExternalReference ExternalReference::allocation_sites_list_address( ...@@ -329,11 +329,17 @@ ExternalReference ExternalReference::allocation_sites_list_address(
} }
ExternalReference ExternalReference::address_of_jslimit(Isolate* isolate) { ExternalReference ExternalReference::address_of_jslimit(Isolate* isolate) {
return ExternalReference(isolate->stack_guard()->address_of_jslimit()); Address address = isolate->stack_guard()->address_of_jslimit();
// For efficient generated code, this should be root-register-addressable.
DCHECK(isolate->root_register_addressable_region().contains(address));
return ExternalReference(address);
} }
ExternalReference ExternalReference::address_of_real_jslimit(Isolate* isolate) { ExternalReference ExternalReference::address_of_real_jslimit(Isolate* isolate) {
return ExternalReference(isolate->stack_guard()->address_of_real_jslimit()); Address address = isolate->stack_guard()->address_of_real_jslimit();
// For efficient generated code, this should be root-register-addressable.
DCHECK(isolate->root_register_addressable_region().contains(address));
return ExternalReference(address);
} }
ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) { ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
......
...@@ -92,14 +92,15 @@ void TurboAssembler::CompareRoot(Register with, RootIndex index) { ...@@ -92,14 +92,15 @@ void TurboAssembler::CompareRoot(Register with, RootIndex index) {
} }
void TurboAssembler::CompareRealStackLimit(Register with) { void TurboAssembler::CompareRealStackLimit(Register with) {
if (root_array_available()) { CHECK(root_array_available()); // Only used by builtins.
CompareRoot(with, RootIndex::kRealStackLimit);
} else { // Address through the root register. No load is needed.
DCHECK(!options().isolate_independent_code); ExternalReference limit =
ExternalReference ref = ExternalReference::address_of_real_jslimit(isolate());
ExternalReference::address_of_real_jslimit(isolate()); DCHECK(IsAddressableThroughRootRegister(isolate(), limit));
cmp(with, Operand(ref.address(), RelocInfo::EXTERNAL_REFERENCE));
} intptr_t offset = RootRegisterOffsetForExternalReference(isolate(), limit);
cmp(with, Operand(kRootRegister, offset));
} }
void MacroAssembler::PushRoot(RootIndex index) { void MacroAssembler::PushRoot(RootIndex index) {
......
...@@ -3356,9 +3356,6 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer, ...@@ -3356,9 +3356,6 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
// The initialization process does not handle memory exhaustion. // The initialization process does not handle memory exhaustion.
AlwaysAllocateScope always_allocate(this); AlwaysAllocateScope always_allocate(this);
// Safe after setting Heap::isolate_, and initializing StackGuard
heap_.SetStackLimits();
#define ASSIGN_ELEMENT(CamelName, hacker_name) \ #define ASSIGN_ELEMENT(CamelName, hacker_name) \
isolate_addresses_[IsolateAddressId::k##CamelName##Address] = \ isolate_addresses_[IsolateAddressId::k##CamelName##Address] = \
reinterpret_cast<Address>(hacker_name##_address()); reinterpret_cast<Address>(hacker_name##_address());
...@@ -3537,10 +3534,6 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer, ...@@ -3537,10 +3534,6 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
clear_pending_message(); clear_pending_message();
clear_scheduled_exception(); clear_scheduled_exception();
// Deserializing may put strange things in the root array's copy of the
// stack guard.
heap_.SetStackLimits();
// Quiet the heap NaN if needed on target platform. // Quiet the heap NaN if needed on target platform.
if (!create_heap_objects) if (!create_heap_objects)
Assembler::QuietNaN(ReadOnlyRoots(this).nan_value()); Assembler::QuietNaN(ReadOnlyRoots(this).nan_value());
......
...@@ -21,14 +21,12 @@ void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) { ...@@ -21,14 +21,12 @@ void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
DCHECK_NOT_NULL(isolate_); DCHECK_NOT_NULL(isolate_);
thread_local_.set_jslimit(kInterruptLimit); thread_local_.set_jslimit(kInterruptLimit);
thread_local_.set_climit(kInterruptLimit); thread_local_.set_climit(kInterruptLimit);
isolate_->heap()->SetStackLimits();
} }
void StackGuard::reset_limits(const ExecutionAccess& lock) { void StackGuard::reset_limits(const ExecutionAccess& lock) {
DCHECK_NOT_NULL(isolate_); DCHECK_NOT_NULL(isolate_);
thread_local_.set_jslimit(thread_local_.real_jslimit_); thread_local_.set_jslimit(thread_local_.real_jslimit_);
thread_local_.set_climit(thread_local_.real_climit_); thread_local_.set_climit(thread_local_.real_climit_);
isolate_->heap()->SetStackLimits();
} }
void StackGuard::SetStackLimit(uintptr_t limit) { void StackGuard::SetStackLimit(uintptr_t limit) {
...@@ -54,7 +52,6 @@ void StackGuard::AdjustStackLimitForSimulator() { ...@@ -54,7 +52,6 @@ void StackGuard::AdjustStackLimitForSimulator() {
uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, climit); uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, climit);
if (thread_local_.jslimit() == thread_local_.real_jslimit_) { if (thread_local_.jslimit() == thread_local_.real_jslimit_) {
thread_local_.set_jslimit(jslimit); thread_local_.set_jslimit(jslimit);
isolate_->heap()->SetStackLimits();
} }
} }
...@@ -181,23 +178,13 @@ int StackGuard::FetchAndClearInterrupts() { ...@@ -181,23 +178,13 @@ int StackGuard::FetchAndClearInterrupts() {
char* StackGuard::ArchiveStackGuard(char* to) { char* StackGuard::ArchiveStackGuard(char* to) {
ExecutionAccess access(isolate_); ExecutionAccess access(isolate_);
MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal)); MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
ThreadLocal blank; thread_local_ = {};
// Set the stack limits using the old thread_local_.
// TODO(isolates): This was the old semantics of constructing a ThreadLocal
// (as the ctor called SetStackLimits, which looked at the
// current thread_local_ from StackGuard)-- but is this
// really what was intended?
isolate_->heap()->SetStackLimits();
thread_local_ = blank;
return to + sizeof(ThreadLocal); return to + sizeof(ThreadLocal);
} }
char* StackGuard::RestoreStackGuard(char* from) { char* StackGuard::RestoreStackGuard(char* from) {
ExecutionAccess access(isolate_); ExecutionAccess access(isolate_);
MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal)); MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
isolate_->heap()->SetStackLimits();
return from + sizeof(ThreadLocal); return from + sizeof(ThreadLocal);
} }
...@@ -216,8 +203,7 @@ void StackGuard::ThreadLocal::Clear() { ...@@ -216,8 +203,7 @@ void StackGuard::ThreadLocal::Clear() {
interrupt_flags_ = 0; interrupt_flags_ = 0;
} }
bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) { void StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
bool should_set_stack_limits = false;
if (real_climit_ == kIllegalLimit) { if (real_climit_ == kIllegalLimit) {
const uintptr_t kLimitSize = FLAG_stack_size * KB; const uintptr_t kLimitSize = FLAG_stack_size * KB;
DCHECK_GT(GetCurrentStackPosition(), kLimitSize); DCHECK_GT(GetCurrentStackPosition(), kLimitSize);
...@@ -226,20 +212,17 @@ bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) { ...@@ -226,20 +212,17 @@ bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
set_jslimit(SimulatorStack::JsLimitFromCLimit(isolate, limit)); set_jslimit(SimulatorStack::JsLimitFromCLimit(isolate, limit));
real_climit_ = limit; real_climit_ = limit;
set_climit(limit); set_climit(limit);
should_set_stack_limits = true;
} }
interrupt_scopes_ = nullptr; interrupt_scopes_ = nullptr;
interrupt_flags_ = 0; interrupt_flags_ = 0;
return should_set_stack_limits;
} }
void StackGuard::ClearThread(const ExecutionAccess& lock) { void StackGuard::ClearThread(const ExecutionAccess& lock) {
thread_local_.Clear(); thread_local_.Clear();
isolate_->heap()->SetStackLimits();
} }
void StackGuard::InitThread(const ExecutionAccess& lock) { void StackGuard::InitThread(const ExecutionAccess& lock) {
if (thread_local_.Initialize(isolate_)) isolate_->heap()->SetStackLimits(); thread_local_.Initialize(isolate_);
Isolate::PerIsolateThreadData* per_thread = Isolate::PerIsolateThreadData* per_thread =
isolate_->FindOrAllocatePerThreadDataForThisThread(); isolate_->FindOrAllocatePerThreadDataForThisThread();
uintptr_t stored_limit = per_thread->stack_limit(); uintptr_t stored_limit = per_thread->stack_limit();
......
...@@ -132,8 +132,7 @@ class V8_EXPORT_PRIVATE StackGuard final { ...@@ -132,8 +132,7 @@ class V8_EXPORT_PRIVATE StackGuard final {
// Clear. // Clear.
void Clear(); void Clear();
// Returns true if the heap's stack limits should be set, false if not. void Initialize(Isolate* isolate);
bool Initialize(Isolate* isolate);
// The stack limit is split into a JavaScript and a C++ stack limit. These // The stack limit is split into a JavaScript and a C++ stack limit. These
// two are the same except when running on a simulator where the C++ and // two are the same except when running on a simulator where the C++ and
......
...@@ -5089,25 +5089,6 @@ void Heap::InitializeHashSeed() { ...@@ -5089,25 +5089,6 @@ void Heap::InitializeHashSeed() {
0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size); 0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
} }
void Heap::SetStackLimits() {
DCHECK_NOT_NULL(isolate_);
DCHECK(isolate_ == isolate());
// On 64 bit machines, pointers are generally out of range of Smis. We write
// something that looks like an out of range Smi to the GC.
// Set up the special root array entries containing the stack limits.
// These are actually addresses, but the tag makes the GC ignore it.
roots_table()[RootIndex::kStackLimit] =
(isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag;
roots_table()[RootIndex::kRealStackLimit] =
(isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag;
}
void Heap::ClearStackLimits() {
roots_table()[RootIndex::kStackLimit] = kNullAddress;
roots_table()[RootIndex::kRealStackLimit] = kNullAddress;
}
int Heap::NextAllocationTimeout(int current_timeout) { int Heap::NextAllocationTimeout(int current_timeout) {
if (FLAG_random_gc_interval > 0) { if (FLAG_random_gc_interval > 0) {
// If current timeout hasn't reached 0 the GC was caused by something // If current timeout hasn't reached 0 the GC was caused by something
......
...@@ -722,15 +722,6 @@ class Heap { ...@@ -722,15 +722,6 @@ class Heap {
V8_INLINE void SetMessageListeners(TemplateList value); V8_INLINE void SetMessageListeners(TemplateList value);
V8_INLINE void SetPendingOptimizeForTestBytecode(Object bytecode); V8_INLINE void SetPendingOptimizeForTestBytecode(Object bytecode);
// Set the stack limit in the roots table. Some architectures generate
// code that looks here, because it is faster than loading from the static
// jslimit_/real_jslimit_ variable in the StackGuard.
void SetStackLimits();
// The stack limit is thread-dependent. To be able to reproduce the same
// snapshot blob, we need to reset it before serializing.
void ClearStackLimits();
void RegisterStrongRoots(FullObjectSlot start, FullObjectSlot end); void RegisterStrongRoots(FullObjectSlot start, FullObjectSlot end);
void UnregisterStrongRoots(FullObjectSlot start); void UnregisterStrongRoots(FullObjectSlot start);
......
...@@ -273,8 +273,6 @@ class Symbol; ...@@ -273,8 +273,6 @@ class Symbol;
// Entries in this list are limited to Smis and are not visited during GC. // Entries in this list are limited to Smis and are not visited during GC.
#define SMI_ROOT_LIST(V) \ #define SMI_ROOT_LIST(V) \
V(Smi, stack_limit, StackLimit) \
V(Smi, real_stack_limit, RealStackLimit) \
V(Smi, last_script_id, LastScriptId) \ V(Smi, last_script_id, LastScriptId) \
V(Smi, last_debugging_id, LastDebuggingId) \ V(Smi, last_debugging_id, LastDebuggingId) \
/* To distinguish the function templates, so that we can find them in the */ \ /* To distinguish the function templates, so that we can find them in the */ \
......
...@@ -143,13 +143,9 @@ void StartupSerializer::SerializeStrongReferences() { ...@@ -143,13 +143,9 @@ void StartupSerializer::SerializeStrongReferences() {
// No active or weak handles. // No active or weak handles.
CHECK(isolate->handle_scope_implementer()->blocks()->empty()); CHECK(isolate->handle_scope_implementer()->blocks()->empty());
// Visit smi roots. // Visit smi roots and immortal immovables first to make sure they end up in
// Clear the stack limits to make the snapshot reproducible. // the first page.
// Reset it again afterwards.
isolate->heap()->ClearStackLimits();
isolate->heap()->IterateSmiRoots(this); isolate->heap()->IterateSmiRoots(this);
isolate->heap()->SetStackLimits();
// First visit immortal immovables to make sure they end up in the first page.
isolate->heap()->IterateStrongRoots(this, VISIT_FOR_SERIALIZATION); isolate->heap()->IterateStrongRoots(this, VISIT_FOR_SERIALIZATION);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment