Commit a6f465d4 authored by Leszek Swirski's avatar Leszek Swirski Committed by Commit Bot

[compiler] Remove disallow scopes

TurboFan creates DisallowHeapAccess scopes, to prevent heap access in
the concurrent parts of the compiler. Then, for parts of the compiler
that do want to access the heap, it either creates Allow* scopes (which
should be avoided since they "punch a hole" in the Disallow* scopes), or
relies on a weakening of Handle::IsDereferenceAllowed which allows
handles owned by a LocalHeap to be dereferenced even if there is a
DisallowHeapDereference scope.

This patch:

  a) Strengthens the implicit requirements around handle dereferencing
     to require a running heap on this thread (either main-thread heap
     or an un-parked, un-safepointed LocalHeap).
  b) Removes the overly strict Disallow scopes in TurboFan, relying
     instead on implicit requirements for allocation/handle
     dereferencing in off-thread code.
  c) Cleans up the "should_disallow_heap_access" predicate to be more
     explicit about what should be disallowed (e.g. property accesses
     can't be computed concurrently)

Change-Id: Icb56b7764913ac17e2db197a70bb189af88a6978
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2554617
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Auto-Submit: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Reviewed-by: 's avatarSantiago Aboy Solanes <solanes@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71600}
parent 9bccbee4
...@@ -119,8 +119,10 @@ OptimizedCompilationJob* OptimizingCompileDispatcher::NextInput( ...@@ -119,8 +119,10 @@ OptimizedCompilationJob* OptimizingCompileDispatcher::NextInput(
if (check_if_flushing) { if (check_if_flushing) {
if (mode_ == FLUSH) { if (mode_ == FLUSH) {
UnparkedScope scope(local_isolate->heap()); UnparkedScope scope(local_isolate->heap());
AllowHandleDereference allow_handle_dereference; local_isolate->heap()->AttachPersistentHandles(
job->compilation_info()->DetachPersistentHandles());
DisposeCompilationJob(job, true); DisposeCompilationJob(job, true);
local_isolate->heap()->DetachPersistentHandles();
return nullptr; return nullptr;
} }
} }
......
...@@ -855,7 +855,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) { ...@@ -855,7 +855,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
tasm()->isolate()->concurrent_recompilation_enabled()) { tasm()->isolate()->concurrent_recompilation_enabled()) {
buffer << source_position; buffer << source_position;
} else { } else {
AllowHeapAllocation allocation; AllowGarbageCollection allocation;
AllowHandleAllocation handles; AllowHandleAllocation handles;
AllowHandleDereference deref; AllowHandleDereference deref;
buffer << source_position.InliningStack(info); buffer << source_position.InliningStack(info);
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "src/base/platform/wrappers.h" #include "src/base/platform/wrappers.h"
#include "src/codegen/source-position-table.h" #include "src/codegen/source-position-table.h"
#include "src/codegen/tick-counter.h" #include "src/codegen/tick-counter.h"
#include "src/common/assert-scope.h"
#include "src/compiler/access-builder.h" #include "src/compiler/access-builder.h"
#include "src/compiler/bytecode-analysis.h" #include "src/compiler/bytecode-analysis.h"
#include "src/compiler/compiler-source-position-table.h" #include "src/compiler/compiler-source-position-table.h"
...@@ -412,10 +413,6 @@ class BytecodeGraphBuilder { ...@@ -412,10 +413,6 @@ class BytecodeGraphBuilder {
NativeContextRef native_context() const { return native_context_; } NativeContextRef native_context() const { return native_context_; }
SharedFunctionInfoRef shared_info() const { return shared_info_; } SharedFunctionInfoRef shared_info() const { return shared_info_; }
bool should_disallow_heap_access() const {
return broker_->is_concurrent_inlining();
}
#define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name(); #define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name();
BYTECODE_LIST(DECLARE_VISIT_BYTECODE) BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
#undef DECLARE_VISIT_BYTECODE #undef DECLARE_VISIT_BYTECODE
...@@ -1018,7 +1015,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( ...@@ -1018,7 +1015,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
bytecode_analysis_(broker_->GetBytecodeAnalysis( bytecode_analysis_(broker_->GetBytecodeAnalysis(
bytecode_array().object(), osr_offset, bytecode_array().object(), osr_offset,
flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness, flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness,
should_disallow_heap_access() broker->is_concurrent_inlining()
? SerializationPolicy::kAssumeSerialized ? SerializationPolicy::kAssumeSerialized
: SerializationPolicy::kSerializeIfNeeded)), : SerializationPolicy::kSerializeIfNeeded)),
environment_(nullptr), environment_(nullptr),
...@@ -1175,7 +1172,6 @@ FeedbackSource BytecodeGraphBuilder::CreateFeedbackSource(FeedbackSlot slot) { ...@@ -1175,7 +1172,6 @@ FeedbackSource BytecodeGraphBuilder::CreateFeedbackSource(FeedbackSlot slot) {
} }
void BytecodeGraphBuilder::CreateGraph() { void BytecodeGraphBuilder::CreateGraph() {
DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access());
SourcePositionTable::Scope pos_scope(source_positions_, start_position_); SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
// Set up the basic structure of the graph. Outputs for {Start} are the formal // Set up the basic structure of the graph. Outputs for {Start} are the formal
...@@ -1508,7 +1504,8 @@ void BytecodeGraphBuilder::VisitBytecodes() { ...@@ -1508,7 +1504,8 @@ void BytecodeGraphBuilder::VisitBytecodes() {
VisitSingleBytecode(); VisitSingleBytecode();
} }
if (!should_disallow_heap_access() && has_one_shot_bytecode) { // TODO(leszeks): Increment usage counter on BG thread.
if (!FLAG_concurrent_inlining && has_one_shot_bytecode) {
// (For concurrent inlining this is done in the serializer instead.) // (For concurrent inlining this is done in the serializer instead.)
isolate()->CountUsage( isolate()->CountUsage(
v8::Isolate::UseCounterFeature::kOptimizedFunctionWithOneShotBytecode); v8::Isolate::UseCounterFeature::kOptimizedFunctionWithOneShotBytecode);
...@@ -2246,7 +2243,6 @@ void BytecodeGraphBuilder::VisitCreateClosure() { ...@@ -2246,7 +2243,6 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
} }
void BytecodeGraphBuilder::VisitCreateBlockContext() { void BytecodeGraphBuilder::VisitCreateBlockContext() {
DisallowHeapAccessIf no_heap_access(should_disallow_heap_access());
ScopeInfoRef scope_info( ScopeInfoRef scope_info(
broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())); broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
const Operator* op = javascript()->CreateBlockContext(scope_info.object()); const Operator* op = javascript()->CreateBlockContext(scope_info.object());
...@@ -2255,7 +2251,6 @@ void BytecodeGraphBuilder::VisitCreateBlockContext() { ...@@ -2255,7 +2251,6 @@ void BytecodeGraphBuilder::VisitCreateBlockContext() {
} }
void BytecodeGraphBuilder::VisitCreateFunctionContext() { void BytecodeGraphBuilder::VisitCreateFunctionContext() {
DisallowHeapAccessIf no_heap_access(should_disallow_heap_access());
ScopeInfoRef scope_info( ScopeInfoRef scope_info(
broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())); broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1); uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
...@@ -2266,7 +2261,6 @@ void BytecodeGraphBuilder::VisitCreateFunctionContext() { ...@@ -2266,7 +2261,6 @@ void BytecodeGraphBuilder::VisitCreateFunctionContext() {
} }
void BytecodeGraphBuilder::VisitCreateEvalContext() { void BytecodeGraphBuilder::VisitCreateEvalContext() {
DisallowHeapAccessIf no_heap_access(should_disallow_heap_access());
ScopeInfoRef scope_info( ScopeInfoRef scope_info(
broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate())); broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1); uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
...@@ -2277,7 +2271,6 @@ void BytecodeGraphBuilder::VisitCreateEvalContext() { ...@@ -2277,7 +2271,6 @@ void BytecodeGraphBuilder::VisitCreateEvalContext() {
} }
void BytecodeGraphBuilder::VisitCreateCatchContext() { void BytecodeGraphBuilder::VisitCreateCatchContext() {
DisallowHeapAccessIf no_heap_access(should_disallow_heap_access());
interpreter::Register reg = bytecode_iterator().GetRegisterOperand(0); interpreter::Register reg = bytecode_iterator().GetRegisterOperand(0);
Node* exception = environment()->LookupRegister(reg); Node* exception = environment()->LookupRegister(reg);
ScopeInfoRef scope_info( ScopeInfoRef scope_info(
...@@ -2289,7 +2282,6 @@ void BytecodeGraphBuilder::VisitCreateCatchContext() { ...@@ -2289,7 +2282,6 @@ void BytecodeGraphBuilder::VisitCreateCatchContext() {
} }
void BytecodeGraphBuilder::VisitCreateWithContext() { void BytecodeGraphBuilder::VisitCreateWithContext() {
DisallowHeapAccessIf no_heap_access(should_disallow_heap_access());
Node* object = Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0)); environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
ScopeInfoRef scope_info( ScopeInfoRef scope_info(
...@@ -2409,7 +2401,6 @@ void BytecodeGraphBuilder::VisitCloneObject() { ...@@ -2409,7 +2401,6 @@ void BytecodeGraphBuilder::VisitCloneObject() {
} }
void BytecodeGraphBuilder::VisitGetTemplateObject() { void BytecodeGraphBuilder::VisitGetTemplateObject() {
DisallowHeapAccessIf no_heap_access(should_disallow_heap_access());
FeedbackSource source = FeedbackSource source =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1)); CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1));
TemplateObjectDescriptionRef description( TemplateObjectDescriptionRef description(
......
...@@ -53,7 +53,7 @@ CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph, ...@@ -53,7 +53,7 @@ CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
} }
Reduction CommonOperatorReducer::Reduce(Node* node) { Reduction CommonOperatorReducer::Reduce(Node* node) {
DisallowHeapAccess no_heap_access; DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
switch (node->opcode()) { switch (node->opcode()) {
case IrOpcode::kBranch: case IrOpcode::kBranch:
return ReduceBranch(node); return ReduceBranch(node);
......
...@@ -63,7 +63,7 @@ ConstantFoldingReducer::ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph, ...@@ -63,7 +63,7 @@ ConstantFoldingReducer::ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph,
ConstantFoldingReducer::~ConstantFoldingReducer() = default; ConstantFoldingReducer::~ConstantFoldingReducer() = default;
Reduction ConstantFoldingReducer::Reduce(Node* node) { Reduction ConstantFoldingReducer::Reduce(Node* node) {
DisallowHeapAccess no_heap_access; DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) && if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) &&
node->op()->HasProperty(Operator::kEliminatable) && node->op()->HasProperty(Operator::kEliminatable) &&
node->opcode() != IrOpcode::kFinishRegion) { node->opcode() != IrOpcode::kFinishRegion) {
......
...@@ -46,7 +46,7 @@ Node* FindDeadInput(Node* node) { ...@@ -46,7 +46,7 @@ Node* FindDeadInput(Node* node) {
} // namespace } // namespace
Reduction DeadCodeElimination::Reduce(Node* node) { Reduction DeadCodeElimination::Reduce(Node* node) {
DisallowHeapAccess no_heap_access; DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
switch (node->opcode()) { switch (node->opcode()) {
case IrOpcode::kEnd: case IrOpcode::kEnd:
return ReduceEnd(node); return ReduceEnd(node);
......
...@@ -139,6 +139,7 @@ class V8_EXPORT_PRIVATE ObjectRef { ...@@ -139,6 +139,7 @@ class V8_EXPORT_PRIVATE ObjectRef {
Handle<Object> object() const; Handle<Object> object() const;
bool equals(const ObjectRef& other) const; bool equals(const ObjectRef& other) const;
bool ShouldHaveBeenSerialized() const;
bool IsSmi() const; bool IsSmi() const;
int AsSmi() const; int AsSmi() const;
......
This diff is collapsed.
...@@ -238,7 +238,6 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer { ...@@ -238,7 +238,6 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
SimplifiedOperatorBuilder* simplified() const; SimplifiedOperatorBuilder* simplified() const;
Flags flags() const { return flags_; } Flags flags() const { return flags_; }
CompilationDependencies* dependencies() const { return dependencies_; } CompilationDependencies* dependencies() const { return dependencies_; }
bool should_disallow_heap_access() const;
JSGraph* const jsgraph_; JSGraph* const jsgraph_;
JSHeapBroker* const broker_; JSHeapBroker* const broker_;
......
...@@ -55,7 +55,6 @@ const int kBlockContextAllocationLimit = 16; ...@@ -55,7 +55,6 @@ const int kBlockContextAllocationLimit = 16;
} // namespace } // namespace
Reduction JSCreateLowering::Reduce(Node* node) { Reduction JSCreateLowering::Reduce(Node* node) {
DisallowHeapAccess disallow_heap_access;
switch (node->opcode()) { switch (node->opcode()) {
case IrOpcode::kJSCreate: case IrOpcode::kJSCreate:
return ReduceJSCreate(node); return ReduceJSCreate(node);
......
This diff is collapsed.
...@@ -142,8 +142,6 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions( ...@@ -142,8 +142,6 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
} }
Reduction JSInliningHeuristic::Reduce(Node* node) { Reduction JSInliningHeuristic::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_acess(broker()->is_concurrent_inlining());
if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange(); if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
if (total_inlined_bytecode_size_ >= FLAG_max_inlined_bytecode_size_absolute) { if (total_inlined_bytecode_size_ >= FLAG_max_inlined_bytecode_size_absolute) {
...@@ -246,8 +244,6 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { ...@@ -246,8 +244,6 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
} }
void JSInliningHeuristic::Finalize() { void JSInliningHeuristic::Finalize() {
DisallowHeapAccessIf no_heap_acess(broker()->is_concurrent_inlining());
if (candidates_.empty()) return; // Nothing to do without candidates. if (candidates_.empty()) return; // Nothing to do without candidates.
if (FLAG_trace_turbo_inlining) PrintCandidates(); if (FLAG_trace_turbo_inlining) PrintCandidates();
......
...@@ -53,10 +53,6 @@ bool HasOnlyJSArrayMaps(JSHeapBroker* broker, ...@@ -53,10 +53,6 @@ bool HasOnlyJSArrayMaps(JSHeapBroker* broker,
} // namespace } // namespace
bool JSNativeContextSpecialization::should_disallow_heap_access() const {
return broker()->is_concurrent_inlining();
}
JSNativeContextSpecialization::JSNativeContextSpecialization( JSNativeContextSpecialization::JSNativeContextSpecialization(
Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker, Flags flags, Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker, Flags flags,
CompilationDependencies* dependencies, Zone* zone, Zone* shared_zone) CompilationDependencies* dependencies, Zone* zone, Zone* shared_zone)
...@@ -73,8 +69,6 @@ JSNativeContextSpecialization::JSNativeContextSpecialization( ...@@ -73,8 +69,6 @@ JSNativeContextSpecialization::JSNativeContextSpecialization(
type_cache_(TypeCache::Get()) {} type_cache_(TypeCache::Get()) {}
Reduction JSNativeContextSpecialization::Reduce(Node* node) { Reduction JSNativeContextSpecialization::Reduce(Node* node) {
DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access());
switch (node->opcode()) { switch (node->opcode()) {
case IrOpcode::kJSAdd: case IrOpcode::kJSAdd:
return ReduceJSAdd(node); return ReduceJSAdd(node);
...@@ -360,7 +354,8 @@ Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor( ...@@ -360,7 +354,8 @@ Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
} }
JSFunctionRef function = m.Ref(broker()).AsJSFunction(); JSFunctionRef function = m.Ref(broker()).AsJSFunction();
MapRef function_map = function.map(); MapRef function_map = function.map();
if (should_disallow_heap_access() && !function_map.serialized_prototype()) { if (function_map.ShouldHaveBeenSerialized() &&
!function_map.serialized_prototype()) {
TRACE_BROKER_MISSING(broker(), "data for map " << function_map); TRACE_BROKER_MISSING(broker(), "data for map " << function_map);
return NoChange(); return NoChange();
} }
...@@ -411,7 +406,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) { ...@@ -411,7 +406,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
MapRef receiver_map = receiver_ref.map(); MapRef receiver_map = receiver_ref.map();
PropertyAccessInfo access_info = PropertyAccessInfo::Invalid(graph()->zone()); PropertyAccessInfo access_info = PropertyAccessInfo::Invalid(graph()->zone());
if (should_disallow_heap_access()) { if (broker()->is_concurrent_inlining()) {
access_info = broker()->GetPropertyAccessInfo( access_info = broker()->GetPropertyAccessInfo(
receiver_map, receiver_map,
NameRef(broker(), isolate()->factory()->has_instance_symbol()), NameRef(broker(), isolate()->factory()->has_instance_symbol()),
...@@ -545,7 +540,7 @@ JSNativeContextSpecialization::InferHasInPrototypeChain( ...@@ -545,7 +540,7 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
all = false; all = false;
break; break;
} }
if (should_disallow_heap_access() && !map.serialized_prototype()) { if (map.ShouldHaveBeenSerialized() && !map.serialized_prototype()) {
TRACE_BROKER_MISSING(broker(), "prototype data for map " << map); TRACE_BROKER_MISSING(broker(), "prototype data for map " << map);
return kMayBeInPrototypeChain; return kMayBeInPrototypeChain;
} }
...@@ -624,7 +619,7 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance( ...@@ -624,7 +619,7 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// OrdinaryHasInstance on bound functions turns into a recursive invocation // OrdinaryHasInstance on bound functions turns into a recursive invocation
// of the instanceof operator again. // of the instanceof operator again.
JSBoundFunctionRef function = m.Ref(broker()).AsJSBoundFunction(); JSBoundFunctionRef function = m.Ref(broker()).AsJSBoundFunction();
if (should_disallow_heap_access() && !function.serialized()) { if (function.ShouldHaveBeenSerialized() && !function.serialized()) {
TRACE_BROKER_MISSING(broker(), "data for JSBoundFunction " << function); TRACE_BROKER_MISSING(broker(), "data for JSBoundFunction " << function);
return NoChange(); return NoChange();
} }
...@@ -647,7 +642,7 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance( ...@@ -647,7 +642,7 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// Optimize if we currently know the "prototype" property. // Optimize if we currently know the "prototype" property.
JSFunctionRef function = m.Ref(broker()).AsJSFunction(); JSFunctionRef function = m.Ref(broker()).AsJSFunction();
if (should_disallow_heap_access() && !function.serialized()) { if (function.ShouldHaveBeenSerialized() && !function.serialized()) {
TRACE_BROKER_MISSING(broker(), "data for JSFunction " << function); TRACE_BROKER_MISSING(broker(), "data for JSFunction " << function);
return NoChange(); return NoChange();
} }
...@@ -725,7 +720,7 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) { ...@@ -725,7 +720,7 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
ZoneVector<PropertyAccessInfo> access_infos(graph()->zone()); ZoneVector<PropertyAccessInfo> access_infos(graph()->zone());
AccessInfoFactory access_info_factory(broker(), dependencies(), AccessInfoFactory access_info_factory(broker(), dependencies(),
graph()->zone()); graph()->zone());
if (!should_disallow_heap_access()) { if (!broker()->is_concurrent_inlining()) {
access_info_factory.ComputePropertyAccessInfos( access_info_factory.ComputePropertyAccessInfos(
resolution_maps, factory()->then_string(), AccessMode::kLoad, resolution_maps, factory()->then_string(), AccessMode::kLoad,
&access_infos); &access_infos);
...@@ -1091,7 +1086,7 @@ Reduction JSNativeContextSpecialization::ReduceMinimorphicPropertyAccess( ...@@ -1091,7 +1086,7 @@ Reduction JSNativeContextSpecialization::ReduceMinimorphicPropertyAccess(
MinimorphicLoadPropertyAccessInfo access_info = MinimorphicLoadPropertyAccessInfo access_info =
broker()->GetPropertyAccessInfo( broker()->GetPropertyAccessInfo(
feedback, source, feedback, source,
should_disallow_heap_access() broker()->is_concurrent_inlining()
? SerializationPolicy::kAssumeSerialized ? SerializationPolicy::kAssumeSerialized
: SerializationPolicy::kSerializeIfNeeded); : SerializationPolicy::kSerializeIfNeeded);
if (access_info.IsInvalid()) return NoChange(); if (access_info.IsInvalid()) return NoChange();
...@@ -1195,7 +1190,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess( ...@@ -1195,7 +1190,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
if (map.is_deprecated()) continue; if (map.is_deprecated()) continue;
PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo( PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
map, feedback.name(), access_mode, dependencies(), map, feedback.name(), access_mode, dependencies(),
should_disallow_heap_access() broker()->is_concurrent_inlining()
? SerializationPolicy::kAssumeSerialized ? SerializationPolicy::kAssumeSerialized
: SerializationPolicy::kSerializeIfNeeded); : SerializationPolicy::kSerializeIfNeeded);
access_infos_for_feedback.push_back(access_info); access_infos_for_feedback.push_back(access_info);
...@@ -1468,7 +1463,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) { ...@@ -1468,7 +1463,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
name.equals(ObjectRef(broker(), factory()->prototype_string()))) { name.equals(ObjectRef(broker(), factory()->prototype_string()))) {
// Optimize "prototype" property of functions. // Optimize "prototype" property of functions.
JSFunctionRef function = object.AsJSFunction(); JSFunctionRef function = object.AsJSFunction();
if (should_disallow_heap_access() && !function.serialized()) { if (function.ShouldHaveBeenSerialized() && !function.serialized()) {
TRACE_BROKER_MISSING(broker(), "data for function " << function); TRACE_BROKER_MISSING(broker(), "data for function " << function);
return NoChange(); return NoChange();
} }
...@@ -1780,7 +1775,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess( ...@@ -1780,7 +1775,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
base::Optional<JSTypedArrayRef> typed_array = base::Optional<JSTypedArrayRef> typed_array =
GetTypedArrayConstant(broker(), receiver); GetTypedArrayConstant(broker(), receiver);
if (typed_array.has_value()) { if (typed_array.has_value()) {
if (should_disallow_heap_access() && !typed_array->serialized()) { if (typed_array->ShouldHaveBeenSerialized() &&
!typed_array->serialized()) {
TRACE_BROKER_MISSING(broker(), "data for typed array " << *typed_array); TRACE_BROKER_MISSING(broker(), "data for typed array " << *typed_array);
return NoChange(); return NoChange();
} }
...@@ -1999,8 +1995,6 @@ Reduction JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant( ...@@ -1999,8 +1995,6 @@ Reduction JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant(
Reduction JSNativeContextSpecialization::ReducePropertyAccess( Reduction JSNativeContextSpecialization::ReducePropertyAccess(
Node* node, Node* key, base::Optional<NameRef> static_name, Node* value, Node* node, Node* key, base::Optional<NameRef> static_name, Node* value,
FeedbackSource const& source, AccessMode access_mode) { FeedbackSource const& source, AccessMode access_mode) {
DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access());
DCHECK_EQ(key == nullptr, static_name.has_value()); DCHECK_EQ(key == nullptr, static_name.has_value());
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty || DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty || node->opcode() == IrOpcode::kJSStoreProperty ||
......
...@@ -261,7 +261,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final ...@@ -261,7 +261,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
CompilationDependencies* dependencies() const { return dependencies_; } CompilationDependencies* dependencies() const { return dependencies_; }
Zone* zone() const { return zone_; } Zone* zone() const { return zone_; }
Zone* shared_zone() const { return shared_zone_; } Zone* shared_zone() const { return shared_zone_; }
bool should_disallow_heap_access() const;
JSGraph* const jsgraph_; JSGraph* const jsgraph_;
JSHeapBroker* const broker_; JSHeapBroker* const broker_;
......
...@@ -2367,8 +2367,6 @@ Reduction JSTypedLowering::ReduceJSResolvePromise(Node* node) { ...@@ -2367,8 +2367,6 @@ Reduction JSTypedLowering::ReduceJSResolvePromise(Node* node) {
} }
Reduction JSTypedLowering::Reduce(Node* node) { Reduction JSTypedLowering::Reduce(Node* node) {
DisallowHeapAccess no_heap_access;
const IrOpcode::Value opcode = node->opcode(); const IrOpcode::Value opcode = node->opcode();
if (broker()->generate_full_feedback_collection() && if (broker()->generate_full_feedback_collection() &&
IrOpcode::IsFeedbackCollectingOpcode(opcode)) { IrOpcode::IsFeedbackCollectingOpcode(opcode)) {
...@@ -2480,7 +2478,6 @@ Reduction JSTypedLowering::Reduce(Node* node) { ...@@ -2480,7 +2478,6 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return NoChange(); return NoChange();
} }
Factory* JSTypedLowering::factory() const { return jsgraph()->factory(); } Factory* JSTypedLowering::factory() const { return jsgraph()->factory(); }
......
...@@ -41,7 +41,7 @@ SimplifiedOperatorReducer::~SimplifiedOperatorReducer() = default; ...@@ -41,7 +41,7 @@ SimplifiedOperatorReducer::~SimplifiedOperatorReducer() = default;
Reduction SimplifiedOperatorReducer::Reduce(Node* node) { Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
DisallowHeapAccess no_heap_access; DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
switch (node->opcode()) { switch (node->opcode()) {
case IrOpcode::kBooleanNot: { case IrOpcode::kBooleanNot: {
HeapObjectMatcher m(node->InputAt(0)); HeapObjectMatcher m(node->InputAt(0));
......
...@@ -17,7 +17,7 @@ TypeNarrowingReducer::TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph, ...@@ -17,7 +17,7 @@ TypeNarrowingReducer::TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph,
TypeNarrowingReducer::~TypeNarrowingReducer() = default; TypeNarrowingReducer::~TypeNarrowingReducer() = default;
Reduction TypeNarrowingReducer::Reduce(Node* node) { Reduction TypeNarrowingReducer::Reduce(Node* node) {
DisallowHeapAccess no_heap_access; DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
Type new_type = Type::Any(); Type new_type = Type::Any();
......
...@@ -34,7 +34,7 @@ TypedOptimization::TypedOptimization(Editor* editor, ...@@ -34,7 +34,7 @@ TypedOptimization::TypedOptimization(Editor* editor,
TypedOptimization::~TypedOptimization() = default; TypedOptimization::~TypedOptimization() = default;
Reduction TypedOptimization::Reduce(Node* node) { Reduction TypedOptimization::Reduce(Node* node) {
DisallowHeapAccess no_heap_access; DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
switch (node->opcode()) { switch (node->opcode()) {
case IrOpcode::kConvertReceiver: case IrOpcode::kConvertReceiver:
return ReduceConvertReceiver(node); return ReduceConvertReceiver(node);
......
...@@ -44,20 +44,10 @@ bool HandleBase::IsDereferenceAllowed() const { ...@@ -44,20 +44,10 @@ bool HandleBase::IsDereferenceAllowed() const {
return true; return true;
} }
if (isolate->IsBuiltinsTableHandleLocation(location_)) return true; if (isolate->IsBuiltinsTableHandleLocation(location_)) return true;
if (!AllowHandleDereference::IsAllowed()) return false;
if (FLAG_local_heaps) { LocalHeap* local_heap = LocalHeap::Current();
LocalHeap* local_heap = LocalHeap::Current(); if (FLAG_local_heaps && local_heap) {
if (local_heap == nullptr) {
// We are on the main thread and can thus load the LocalHeap from the
// Isolate itself. The DCHECK makes sure that we are not accidentally
// dereferencing a handle on a background thread without an active
// LocalHeap.
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
local_heap = isolate->main_thread_local_heap();
DCHECK_NOT_NULL(local_heap);
}
// Local heap can't access handles when parked // Local heap can't access handles when parked
if (!local_heap->IsHandleDereferenceAllowed()) { if (!local_heap->IsHandleDereferenceAllowed()) {
StdoutStream{} << "Cannot dereference handle owned by " StdoutStream{} << "Cannot dereference handle owned by "
...@@ -65,14 +55,20 @@ bool HandleBase::IsDereferenceAllowed() const { ...@@ -65,14 +55,20 @@ bool HandleBase::IsDereferenceAllowed() const {
return false; return false;
} }
if (local_heap->ContainsPersistentHandle(location_) || // The current thread owns the handle and thus can dereference it.
local_heap->ContainsLocalHandle(location_)) { return local_heap->ContainsPersistentHandle(location_) ||
// The current thread owns the handle and thus can dereference it. local_heap->ContainsLocalHandle(location_);
return true; }
} // If the local_heap is null, we're on the main thread -- if we were to check
// main thread HandleScopes here, we should additionally check the main-thread
// LocalHeap.
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
if (FLAG_local_heaps) {
DCHECK_NOT_NULL(isolate->main_thread_local_heap());
} }
return AllowHandleDereference::IsAllowed(); // TODO(leszeks): Check if the main thread owns this handle.
return true;
} }
#endif #endif
......
...@@ -117,6 +117,10 @@ ...@@ -117,6 +117,10 @@
'test-code-stub-assembler/PopAndReturnFromJSBuiltinWithStackParameters' : [FAIL], 'test-code-stub-assembler/PopAndReturnFromJSBuiltinWithStackParameters' : [FAIL],
'test-code-stub-assembler/PopAndReturnFromTFCBuiltinWithStackParameters' : [FAIL], 'test-code-stub-assembler/PopAndReturnFromTFCBuiltinWithStackParameters' : [FAIL],
# These tests are supposed to fail in a slow DCHECK, skip them otherwise.
'test-local-handles/DereferenceLocalHandleFailsWhenDisallowed': [SKIP, ['mode == debug', FAIL]],
'test-persistent-handles/DereferencePersistentHandleFailsWhenDisallowed': [SKIP, ['mode == debug', FAIL]],
############################################################################ ############################################################################
# Slow tests. # Slow tests.
'test-debug/CallFunctionInDebugger': [PASS, ['mode == debug', SLOW]], 'test-debug/CallFunctionInDebugger': [PASS, ['mode == debug', SLOW]],
...@@ -129,7 +133,6 @@ ...@@ -129,7 +133,6 @@
############################################################################## ##############################################################################
['mode == debug', { ['mode == debug', {
# These tests are supposed to fail in a DCHECK.
'test-persistent-handles/NewPersistentHandleFailsWhenParked': [FAIL], 'test-persistent-handles/NewPersistentHandleFailsWhenParked': [FAIL],
'test-persistent-handles/NewPersistentHandleFailsWhenParkedExplicit': [FAIL], 'test-persistent-handles/NewPersistentHandleFailsWhenParkedExplicit': [FAIL],
}], }],
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "src/base/platform/condition-variable.h" #include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h" #include "src/base/platform/semaphore.h"
#include "src/common/assert-scope.h"
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/handles/handles-inl.h" #include "src/handles/handles-inl.h"
#include "src/handles/local-handles-inl.h" #include "src/handles/local-handles-inl.h"
...@@ -128,6 +129,29 @@ TEST(DereferenceLocalHandle) { ...@@ -128,6 +129,29 @@ TEST(DereferenceLocalHandle) {
LocalHandleScope scope(&local_heap); LocalHandleScope scope(&local_heap);
Handle<HeapNumber> local_number = handle(*ph, &local_heap); Handle<HeapNumber> local_number = handle(*ph, &local_heap);
CHECK_EQ(42, local_number->value()); CHECK_EQ(42, local_number->value());
}
}
TEST(DereferenceLocalHandleFailsWhenDisallowed) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
// Create a PersistentHandle to create the LocalHandle, and thus not have a
// HandleScope present to override the LocalHandleScope.
std::unique_ptr<PersistentHandles> phs = isolate->NewPersistentHandles();
Handle<HeapNumber> ph;
{
HandleScope handle_scope(isolate);
Handle<HeapNumber> number = isolate->factory()->NewHeapNumber(42.0);
ph = phs->NewHandle(number);
}
{
LocalHeap local_heap(isolate->heap(), ThreadKind::kBackground,
std::move(phs));
UnparkedScope unparked_scope(&local_heap);
LocalHandleScope scope(&local_heap);
Handle<HeapNumber> local_number = handle(*ph, &local_heap);
DisallowHandleDereference disallow_scope; DisallowHandleDereference disallow_scope;
CHECK_EQ(42, local_number->value()); CHECK_EQ(42, local_number->value());
} }
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "src/base/platform/condition-variable.h" #include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h" #include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h" #include "src/base/platform/semaphore.h"
#include "src/common/assert-scope.h"
#include "src/handles/handles-inl.h" #include "src/handles/handles-inl.h"
#include "src/handles/local-handles-inl.h" #include "src/handles/local-handles-inl.h"
#include "src/handles/persistent-handles.h" #include "src/handles/persistent-handles.h"
...@@ -134,6 +135,25 @@ TEST(DereferencePersistentHandle) { ...@@ -134,6 +135,25 @@ TEST(DereferencePersistentHandle) {
std::move(phs)); std::move(phs));
UnparkedScope scope(&local_heap); UnparkedScope scope(&local_heap);
CHECK_EQ(42, ph->value()); CHECK_EQ(42, ph->value());
}
}
TEST(DereferencePersistentHandleFailsWhenDisallowed) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
std::unique_ptr<PersistentHandles> phs = isolate->NewPersistentHandles();
Handle<HeapNumber> ph;
{
HandleScope handle_scope(isolate);
Handle<HeapNumber> number = isolate->factory()->NewHeapNumber(42.0);
ph = phs->NewHandle(number);
}
{
LocalHeap local_heap(isolate->heap(), ThreadKind::kBackground,
std::move(phs));
UnparkedScope scope(&local_heap);
DisallowHandleDereference disallow_scope; DisallowHandleDereference disallow_scope;
CHECK_EQ(42, ph->value()); CHECK_EQ(42, ph->value());
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment