Commit ef24a565 authored by Jakob Gruber's avatar Jakob Gruber Committed by Commit Bot

[isolate-data] Move the StackGuard to IsolateData

IsolateData guarantees a fixed root-relative offset for its contents,
thus allowing more efficient code generation for accesses to these
addresses. The stack limit, located within the StackGuard, is used by
all stack checks in CSA.

This CL moves the StackGuard inside IsolateData to make such efficient
loads of the limit possible.

Bug: v8:9595,v8:9534
Change-Id: I9abe26b88952709c88bf625cc6c028497815a58c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1741648Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Auto-Submit: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63160}
parent 04a6f872
......@@ -9,7 +9,7 @@
namespace v8 {
namespace internal {
InterruptsScope::InterruptsScope(Isolate* isolate, int intercept_mask,
InterruptsScope::InterruptsScope(Isolate* isolate, intptr_t intercept_mask,
Mode mode)
: stack_guard_(isolate->stack_guard()),
intercept_mask_(intercept_mask),
......
......@@ -18,7 +18,7 @@ class InterruptsScope {
public:
enum Mode { kPostponeInterrupts, kRunInterrupts, kNoop };
V8_EXPORT_PRIVATE InterruptsScope(Isolate* isolate, int intercept_mask,
V8_EXPORT_PRIVATE InterruptsScope(Isolate* isolate, intptr_t intercept_mask,
Mode mode);
virtual ~InterruptsScope() {
......@@ -33,8 +33,8 @@ class InterruptsScope {
private:
StackGuard* stack_guard_;
int intercept_mask_;
int intercepted_flags_;
intptr_t intercept_mask_;
intptr_t intercepted_flags_;
Mode mode_;
InterruptsScope* prev_;
......
......@@ -8,6 +8,7 @@
#include "src/builtins/builtins.h"
#include "src/codegen/constants-arch.h"
#include "src/codegen/external-reference-table.h"
#include "src/execution/stack-guard.h"
#include "src/execution/thread-local-top.h"
#include "src/roots/roots.h"
#include "src/utils/utils.h"
......@@ -27,7 +28,7 @@ class Isolate;
// register.
class IsolateData final {
public:
IsolateData() = default;
explicit IsolateData(Isolate* isolate) : stack_guard_(isolate) {}
static constexpr intptr_t kIsolateRootBias = kRootRegisterBias;
......@@ -81,6 +82,7 @@ class IsolateData final {
// The FP and PC that are saved right before TurboAssembler::CallCFunction.
Address* fast_c_call_caller_fp_address() { return &fast_c_call_caller_fp_; }
Address* fast_c_call_caller_pc_address() { return &fast_c_call_caller_pc_; }
StackGuard* stack_guard() { return &stack_guard_; }
uint8_t* stack_is_iterable_address() { return &stack_is_iterable_; }
Address fast_c_call_caller_fp() { return fast_c_call_caller_fp_; }
Address fast_c_call_caller_pc() { return fast_c_call_caller_pc_; }
......@@ -123,6 +125,7 @@ class IsolateData final {
V(kVirtualCallTargetRegisterOffset, kSystemPointerSize) \
V(kFastCCallCallerFPOffset, kSystemPointerSize) \
V(kFastCCallCallerPCOffset, kSystemPointerSize) \
V(kStackGuardOffset, StackGuard::kSizeInBytes) \
V(kStackIsIterableOffset, kUInt8Size) \
/* This padding aligns IsolateData size by 8 bytes. */ \
V(kPaddingOffset, \
......@@ -175,6 +178,11 @@ class IsolateData final {
// instruction in compiled code.
Address fast_c_call_caller_fp_ = kNullAddress;
Address fast_c_call_caller_pc_ = kNullAddress;
// Fields related to the system and JS stack. In particular, this contains the
// stack limit used by stack checks in generated code.
StackGuard stack_guard_;
// Whether the SafeStackFrameIterator can successfully iterate the current
// stack. Only valid values are 0 or 1.
uint8_t stack_is_iterable_ = 1;
......@@ -225,6 +233,7 @@ void IsolateData::AssertPredictableLayout() {
kFastCCallCallerFPOffset);
STATIC_ASSERT(offsetof(IsolateData, fast_c_call_caller_pc_) ==
kFastCCallCallerPCOffset);
STATIC_ASSERT(offsetof(IsolateData, stack_guard_) == kStackGuardOffset);
STATIC_ASSERT(offsetof(IsolateData, stack_is_iterable_) ==
kStackIsIterableOffset);
STATIC_ASSERT(sizeof(IsolateData) == IsolateData::kSize);
......
......@@ -2885,9 +2885,9 @@ v8::PageAllocator* Isolate::page_allocator() {
}
Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
: isolate_allocator_(std::move(isolate_allocator)),
: isolate_data_(this),
isolate_allocator_(std::move(isolate_allocator)),
id_(isolate_counter.fetch_add(1, std::memory_order_relaxed)),
stack_guard_(this),
allocator_(FLAG_trace_zone_stats
? new VerboseAccountingAllocator(&heap_, 256 * KB)
: new AccountingAllocator()),
......@@ -3380,7 +3380,7 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
// will ensure this too, but we don't have to use lockers if we are only
// using one thread.
ExecutionAccess lock(this);
stack_guard_.InitThread(lock);
stack_guard()->InitThread(lock);
}
// SetUp the object heap.
......
......@@ -902,7 +902,7 @@ class Isolate final : private HiddenFactory {
DCHECK_NOT_NULL(logger_);
return logger_;
}
StackGuard* stack_guard() { return &stack_guard_; }
StackGuard* stack_guard() { return isolate_data()->stack_guard(); }
Heap* heap() { return &heap_; }
ReadOnlyHeap* read_only_heap() const { return read_only_heap_; }
static Isolate* FromHeap(Heap* heap) {
......@@ -1678,7 +1678,6 @@ class Isolate final : private HiddenFactory {
std::shared_ptr<Counters> async_counters_;
base::RecursiveMutex break_access_;
Logger* logger_ = nullptr;
StackGuard stack_guard_;
StubCache* load_stub_cache_ = nullptr;
StubCache* store_stub_cache_ = nullptr;
DeoptimizerData* deoptimizer_data_ = nullptr;
......
......@@ -75,7 +75,8 @@ void StackGuard::PushInterruptsScope(InterruptsScope* scope) {
DCHECK_NE(scope->mode_, InterruptsScope::kNoop);
if (scope->mode_ == InterruptsScope::kPostponeInterrupts) {
// Intercept already requested interrupts.
int intercepted = thread_local_.interrupt_flags_ & scope->intercept_mask_;
intptr_t intercepted =
thread_local_.interrupt_flags_ & scope->intercept_mask_;
scope->intercepted_flags_ = intercepted;
thread_local_.interrupt_flags_ &= ~intercepted;
} else {
......@@ -124,7 +125,7 @@ void StackGuard::PopInterruptsScope() {
bool StackGuard::CheckInterrupt(InterruptFlag flag) {
ExecutionAccess access(isolate_);
return thread_local_.interrupt_flags_ & flag;
return (thread_local_.interrupt_flags_ & flag) != 0;
}
void StackGuard::RequestInterrupt(InterruptFlag flag) {
......@@ -160,7 +161,7 @@ int StackGuard::FetchAndClearInterrupts() {
ExecutionAccess access(isolate_);
int result = 0;
if (thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) {
if ((thread_local_.interrupt_flags_ & TERMINATE_EXECUTION) != 0) {
// The TERMINATE_EXECUTION interrupt is special, since it terminates
// execution but should leave V8 in a resumable state. If it exists, we only
// fetch and clear that bit. On resume, V8 can continue processing other
......@@ -169,7 +170,7 @@ int StackGuard::FetchAndClearInterrupts() {
thread_local_.interrupt_flags_ &= ~TERMINATE_EXECUTION;
if (!has_pending_interrupts(access)) reset_limits(access);
} else {
result = thread_local_.interrupt_flags_;
result = static_cast<int>(thread_local_.interrupt_flags_);
thread_local_.interrupt_flags_ = 0;
reset_limits(access);
}
......
......@@ -7,6 +7,7 @@
#include "include/v8-internal.h"
#include "src/base/atomicops.h"
#include "src/common/globals.h"
namespace v8 {
namespace internal {
......@@ -89,6 +90,8 @@ class V8_EXPORT_PRIVATE StackGuard final {
// stack overflow, then handle the interruption accordingly.
Object HandleInterrupts();
static constexpr int kSizeInBytes = 7 * kSystemPointerSize;
private:
bool CheckInterrupt(InterruptFlag flag);
void RequestInterrupt(InterruptFlag flag);
......@@ -165,7 +168,7 @@ class V8_EXPORT_PRIVATE StackGuard final {
}
InterruptsScope* interrupt_scopes_;
int interrupt_flags_;
intptr_t interrupt_flags_;
};
// TODO(isolates): Technically this could be calculated directly from a
......@@ -180,6 +183,8 @@ class V8_EXPORT_PRIVATE StackGuard final {
DISALLOW_COPY_AND_ASSIGN(StackGuard);
};
STATIC_ASSERT(StackGuard::kSizeInBytes == sizeof(StackGuard));
} // namespace internal
} // namespace v8
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment