Commit a6f465d4 authored by Leszek Swirski's avatar Leszek Swirski Committed by Commit Bot

[compiler] Remove disallow scopes

TurboFan creates DisallowHeapAccess scopes, to prevent heap access in
the concurrent parts of the compiler. Then, for parts of the compiler
that do want to access the heap, it either creates Allow* scopes (which
should be avoided since they "punch a hole" in the Disallow* scopes), or
relies on a weakening of Handle::IsDereferenceAllowed which allows
handles owned by a LocalHeap to be dereferenced even if there is a
DisallowHeapDereference scope.

This patch:

  a) Strengthens the implicit requirements around handle dereferencing
     to require a running heap on this thread (either main-thread heap
     or an un-parked, un-safepointed LocalHeap).
  b) Removes the overly strict Disallow scopes in TurboFan, relying
     instead on implicit requirements for allocation/handle
     dereferencing in off-thread code.
  c) Cleans up the "should_disallow_heap_access" predicate to be more
     explicit about what should be disallowed (e.g. property accesses
     can't be computed concurrently)

Change-Id: Icb56b7764913ac17e2db197a70bb189af88a6978
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2554617
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Auto-Submit: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Reviewed-by: 's avatarSantiago Aboy Solanes <solanes@chromium.org>
Cr-Commit-Position: refs/heads/master@{#71600}
parent 9bccbee4
......@@ -119,8 +119,10 @@ OptimizedCompilationJob* OptimizingCompileDispatcher::NextInput(
if (check_if_flushing) {
if (mode_ == FLUSH) {
UnparkedScope scope(local_isolate->heap());
AllowHandleDereference allow_handle_dereference;
local_isolate->heap()->AttachPersistentHandles(
job->compilation_info()->DetachPersistentHandles());
DisposeCompilationJob(job, true);
local_isolate->heap()->DetachPersistentHandles();
return nullptr;
}
}
......
......@@ -855,7 +855,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
tasm()->isolate()->concurrent_recompilation_enabled()) {
buffer << source_position;
} else {
AllowHeapAllocation allocation;
AllowGarbageCollection allocation;
AllowHandleAllocation handles;
AllowHandleDereference deref;
buffer << source_position.InliningStack(info);
......
......@@ -8,6 +8,7 @@
#include "src/base/platform/wrappers.h"
#include "src/codegen/source-position-table.h"
#include "src/codegen/tick-counter.h"
#include "src/common/assert-scope.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/compiler-source-position-table.h"
......@@ -412,10 +413,6 @@ class BytecodeGraphBuilder {
NativeContextRef native_context() const { return native_context_; }
SharedFunctionInfoRef shared_info() const { return shared_info_; }
bool should_disallow_heap_access() const {
return broker_->is_concurrent_inlining();
}
#define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name();
BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
#undef DECLARE_VISIT_BYTECODE
......@@ -1018,7 +1015,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
bytecode_analysis_(broker_->GetBytecodeAnalysis(
bytecode_array().object(), osr_offset,
flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness,
should_disallow_heap_access()
broker->is_concurrent_inlining()
? SerializationPolicy::kAssumeSerialized
: SerializationPolicy::kSerializeIfNeeded)),
environment_(nullptr),
......@@ -1175,7 +1172,6 @@ FeedbackSource BytecodeGraphBuilder::CreateFeedbackSource(FeedbackSlot slot) {
}
void BytecodeGraphBuilder::CreateGraph() {
DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access());
SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
// Set up the basic structure of the graph. Outputs for {Start} are the formal
......@@ -1508,7 +1504,8 @@ void BytecodeGraphBuilder::VisitBytecodes() {
VisitSingleBytecode();
}
if (!should_disallow_heap_access() && has_one_shot_bytecode) {
// TODO(leszeks): Increment usage counter on BG thread.
if (!FLAG_concurrent_inlining && has_one_shot_bytecode) {
// (For concurrent inlining this is done in the serializer instead.)
isolate()->CountUsage(
v8::Isolate::UseCounterFeature::kOptimizedFunctionWithOneShotBytecode);
......@@ -2246,7 +2243,6 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
}
void BytecodeGraphBuilder::VisitCreateBlockContext() {
DisallowHeapAccessIf no_heap_access(should_disallow_heap_access());
ScopeInfoRef scope_info(
broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
const Operator* op = javascript()->CreateBlockContext(scope_info.object());
......@@ -2255,7 +2251,6 @@ void BytecodeGraphBuilder::VisitCreateBlockContext() {
}
void BytecodeGraphBuilder::VisitCreateFunctionContext() {
DisallowHeapAccessIf no_heap_access(should_disallow_heap_access());
ScopeInfoRef scope_info(
broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
......@@ -2266,7 +2261,6 @@ void BytecodeGraphBuilder::VisitCreateFunctionContext() {
}
void BytecodeGraphBuilder::VisitCreateEvalContext() {
DisallowHeapAccessIf no_heap_access(should_disallow_heap_access());
ScopeInfoRef scope_info(
broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
......@@ -2277,7 +2271,6 @@ void BytecodeGraphBuilder::VisitCreateEvalContext() {
}
void BytecodeGraphBuilder::VisitCreateCatchContext() {
DisallowHeapAccessIf no_heap_access(should_disallow_heap_access());
interpreter::Register reg = bytecode_iterator().GetRegisterOperand(0);
Node* exception = environment()->LookupRegister(reg);
ScopeInfoRef scope_info(
......@@ -2289,7 +2282,6 @@ void BytecodeGraphBuilder::VisitCreateCatchContext() {
}
void BytecodeGraphBuilder::VisitCreateWithContext() {
DisallowHeapAccessIf no_heap_access(should_disallow_heap_access());
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
ScopeInfoRef scope_info(
......@@ -2409,7 +2401,6 @@ void BytecodeGraphBuilder::VisitCloneObject() {
}
void BytecodeGraphBuilder::VisitGetTemplateObject() {
DisallowHeapAccessIf no_heap_access(should_disallow_heap_access());
FeedbackSource source =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1));
TemplateObjectDescriptionRef description(
......
......@@ -53,7 +53,7 @@ CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
}
Reduction CommonOperatorReducer::Reduce(Node* node) {
DisallowHeapAccess no_heap_access;
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
switch (node->opcode()) {
case IrOpcode::kBranch:
return ReduceBranch(node);
......
......@@ -63,7 +63,7 @@ ConstantFoldingReducer::ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph,
ConstantFoldingReducer::~ConstantFoldingReducer() = default;
Reduction ConstantFoldingReducer::Reduce(Node* node) {
DisallowHeapAccess no_heap_access;
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) &&
node->op()->HasProperty(Operator::kEliminatable) &&
node->opcode() != IrOpcode::kFinishRegion) {
......
......@@ -46,7 +46,7 @@ Node* FindDeadInput(Node* node) {
} // namespace
Reduction DeadCodeElimination::Reduce(Node* node) {
DisallowHeapAccess no_heap_access;
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
switch (node->opcode()) {
case IrOpcode::kEnd:
return ReduceEnd(node);
......
......@@ -139,6 +139,7 @@ class V8_EXPORT_PRIVATE ObjectRef {
Handle<Object> object() const;
bool equals(const ObjectRef& other) const;
bool ShouldHaveBeenSerialized() const;
bool IsSmi() const;
int AsSmi() const;
......
This diff is collapsed.
......@@ -238,7 +238,6 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
SimplifiedOperatorBuilder* simplified() const;
Flags flags() const { return flags_; }
CompilationDependencies* dependencies() const { return dependencies_; }
bool should_disallow_heap_access() const;
JSGraph* const jsgraph_;
JSHeapBroker* const broker_;
......
......@@ -55,7 +55,6 @@ const int kBlockContextAllocationLimit = 16;
} // namespace
Reduction JSCreateLowering::Reduce(Node* node) {
DisallowHeapAccess disallow_heap_access;
switch (node->opcode()) {
case IrOpcode::kJSCreate:
return ReduceJSCreate(node);
......
This diff is collapsed.
......@@ -142,8 +142,6 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
}
Reduction JSInliningHeuristic::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_acess(broker()->is_concurrent_inlining());
if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
if (total_inlined_bytecode_size_ >= FLAG_max_inlined_bytecode_size_absolute) {
......@@ -246,8 +244,6 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
}
void JSInliningHeuristic::Finalize() {
DisallowHeapAccessIf no_heap_acess(broker()->is_concurrent_inlining());
if (candidates_.empty()) return; // Nothing to do without candidates.
if (FLAG_trace_turbo_inlining) PrintCandidates();
......
......@@ -53,10 +53,6 @@ bool HasOnlyJSArrayMaps(JSHeapBroker* broker,
} // namespace
bool JSNativeContextSpecialization::should_disallow_heap_access() const {
return broker()->is_concurrent_inlining();
}
JSNativeContextSpecialization::JSNativeContextSpecialization(
Editor* editor, JSGraph* jsgraph, JSHeapBroker* broker, Flags flags,
CompilationDependencies* dependencies, Zone* zone, Zone* shared_zone)
......@@ -73,8 +69,6 @@ JSNativeContextSpecialization::JSNativeContextSpecialization(
type_cache_(TypeCache::Get()) {}
Reduction JSNativeContextSpecialization::Reduce(Node* node) {
DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access());
switch (node->opcode()) {
case IrOpcode::kJSAdd:
return ReduceJSAdd(node);
......@@ -360,7 +354,8 @@ Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
}
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
MapRef function_map = function.map();
if (should_disallow_heap_access() && !function_map.serialized_prototype()) {
if (function_map.ShouldHaveBeenSerialized() &&
!function_map.serialized_prototype()) {
TRACE_BROKER_MISSING(broker(), "data for map " << function_map);
return NoChange();
}
......@@ -411,7 +406,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
MapRef receiver_map = receiver_ref.map();
PropertyAccessInfo access_info = PropertyAccessInfo::Invalid(graph()->zone());
if (should_disallow_heap_access()) {
if (broker()->is_concurrent_inlining()) {
access_info = broker()->GetPropertyAccessInfo(
receiver_map,
NameRef(broker(), isolate()->factory()->has_instance_symbol()),
......@@ -545,7 +540,7 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
all = false;
break;
}
if (should_disallow_heap_access() && !map.serialized_prototype()) {
if (map.ShouldHaveBeenSerialized() && !map.serialized_prototype()) {
TRACE_BROKER_MISSING(broker(), "prototype data for map " << map);
return kMayBeInPrototypeChain;
}
......@@ -624,7 +619,7 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// OrdinaryHasInstance on bound functions turns into a recursive invocation
// of the instanceof operator again.
JSBoundFunctionRef function = m.Ref(broker()).AsJSBoundFunction();
if (should_disallow_heap_access() && !function.serialized()) {
if (function.ShouldHaveBeenSerialized() && !function.serialized()) {
TRACE_BROKER_MISSING(broker(), "data for JSBoundFunction " << function);
return NoChange();
}
......@@ -647,7 +642,7 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// Optimize if we currently know the "prototype" property.
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
if (should_disallow_heap_access() && !function.serialized()) {
if (function.ShouldHaveBeenSerialized() && !function.serialized()) {
TRACE_BROKER_MISSING(broker(), "data for JSFunction " << function);
return NoChange();
}
......@@ -725,7 +720,7 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
ZoneVector<PropertyAccessInfo> access_infos(graph()->zone());
AccessInfoFactory access_info_factory(broker(), dependencies(),
graph()->zone());
if (!should_disallow_heap_access()) {
if (!broker()->is_concurrent_inlining()) {
access_info_factory.ComputePropertyAccessInfos(
resolution_maps, factory()->then_string(), AccessMode::kLoad,
&access_infos);
......@@ -1091,7 +1086,7 @@ Reduction JSNativeContextSpecialization::ReduceMinimorphicPropertyAccess(
MinimorphicLoadPropertyAccessInfo access_info =
broker()->GetPropertyAccessInfo(
feedback, source,
should_disallow_heap_access()
broker()->is_concurrent_inlining()
? SerializationPolicy::kAssumeSerialized
: SerializationPolicy::kSerializeIfNeeded);
if (access_info.IsInvalid()) return NoChange();
......@@ -1195,7 +1190,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
if (map.is_deprecated()) continue;
PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
map, feedback.name(), access_mode, dependencies(),
should_disallow_heap_access()
broker()->is_concurrent_inlining()
? SerializationPolicy::kAssumeSerialized
: SerializationPolicy::kSerializeIfNeeded);
access_infos_for_feedback.push_back(access_info);
......@@ -1468,7 +1463,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
name.equals(ObjectRef(broker(), factory()->prototype_string()))) {
// Optimize "prototype" property of functions.
JSFunctionRef function = object.AsJSFunction();
if (should_disallow_heap_access() && !function.serialized()) {
if (function.ShouldHaveBeenSerialized() && !function.serialized()) {
TRACE_BROKER_MISSING(broker(), "data for function " << function);
return NoChange();
}
......@@ -1780,7 +1775,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
base::Optional<JSTypedArrayRef> typed_array =
GetTypedArrayConstant(broker(), receiver);
if (typed_array.has_value()) {
if (should_disallow_heap_access() && !typed_array->serialized()) {
if (typed_array->ShouldHaveBeenSerialized() &&
!typed_array->serialized()) {
TRACE_BROKER_MISSING(broker(), "data for typed array " << *typed_array);
return NoChange();
}
......@@ -1999,8 +1995,6 @@ Reduction JSNativeContextSpecialization::ReduceElementLoadFromHeapConstant(
Reduction JSNativeContextSpecialization::ReducePropertyAccess(
Node* node, Node* key, base::Optional<NameRef> static_name, Node* value,
FeedbackSource const& source, AccessMode access_mode) {
DisallowHeapAccessIf disallow_heap_access(should_disallow_heap_access());
DCHECK_EQ(key == nullptr, static_name.has_value());
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty ||
......
......@@ -261,7 +261,6 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
CompilationDependencies* dependencies() const { return dependencies_; }
Zone* zone() const { return zone_; }
Zone* shared_zone() const { return shared_zone_; }
bool should_disallow_heap_access() const;
JSGraph* const jsgraph_;
JSHeapBroker* const broker_;
......
......@@ -2367,8 +2367,6 @@ Reduction JSTypedLowering::ReduceJSResolvePromise(Node* node) {
}
Reduction JSTypedLowering::Reduce(Node* node) {
DisallowHeapAccess no_heap_access;
const IrOpcode::Value opcode = node->opcode();
if (broker()->generate_full_feedback_collection() &&
IrOpcode::IsFeedbackCollectingOpcode(opcode)) {
......@@ -2480,7 +2478,6 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return NoChange();
}
Factory* JSTypedLowering::factory() const { return jsgraph()->factory(); }
......
......@@ -41,7 +41,7 @@ SimplifiedOperatorReducer::~SimplifiedOperatorReducer() = default;
Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
DisallowHeapAccess no_heap_access;
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
switch (node->opcode()) {
case IrOpcode::kBooleanNot: {
HeapObjectMatcher m(node->InputAt(0));
......
......@@ -17,7 +17,7 @@ TypeNarrowingReducer::TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph,
TypeNarrowingReducer::~TypeNarrowingReducer() = default;
Reduction TypeNarrowingReducer::Reduce(Node* node) {
DisallowHeapAccess no_heap_access;
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
Type new_type = Type::Any();
......
......@@ -34,7 +34,7 @@ TypedOptimization::TypedOptimization(Editor* editor,
TypedOptimization::~TypedOptimization() = default;
Reduction TypedOptimization::Reduce(Node* node) {
DisallowHeapAccess no_heap_access;
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
switch (node->opcode()) {
case IrOpcode::kConvertReceiver:
return ReduceConvertReceiver(node);
......
......@@ -44,20 +44,10 @@ bool HandleBase::IsDereferenceAllowed() const {
return true;
}
if (isolate->IsBuiltinsTableHandleLocation(location_)) return true;
if (!AllowHandleDereference::IsAllowed()) return false;
if (FLAG_local_heaps) {
LocalHeap* local_heap = LocalHeap::Current();
if (local_heap == nullptr) {
// We are on the main thread and can thus load the LocalHeap from the
// Isolate itself. The DCHECK makes sure that we are not accidentally
// dereferencing a handle on a background thread without an active
// LocalHeap.
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
local_heap = isolate->main_thread_local_heap();
DCHECK_NOT_NULL(local_heap);
}
LocalHeap* local_heap = LocalHeap::Current();
if (FLAG_local_heaps && local_heap) {
// Local heap can't access handles when parked
if (!local_heap->IsHandleDereferenceAllowed()) {
StdoutStream{} << "Cannot dereference handle owned by "
......@@ -65,14 +55,20 @@ bool HandleBase::IsDereferenceAllowed() const {
return false;
}
if (local_heap->ContainsPersistentHandle(location_) ||
local_heap->ContainsLocalHandle(location_)) {
// The current thread owns the handle and thus can dereference it.
return true;
}
// The current thread owns the handle and thus can dereference it.
return local_heap->ContainsPersistentHandle(location_) ||
local_heap->ContainsLocalHandle(location_);
}
// If the local_heap is null, we're on the main thread -- if we were to check
// main thread HandleScopes here, we should additionally check the main-thread
// LocalHeap.
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
if (FLAG_local_heaps) {
DCHECK_NOT_NULL(isolate->main_thread_local_heap());
}
return AllowHandleDereference::IsAllowed();
// TODO(leszeks): Check if the main thread owns this handle.
return true;
}
#endif
......
......@@ -117,6 +117,10 @@
'test-code-stub-assembler/PopAndReturnFromJSBuiltinWithStackParameters' : [FAIL],
'test-code-stub-assembler/PopAndReturnFromTFCBuiltinWithStackParameters' : [FAIL],
# These tests are supposed to fail in a slow DCHECK, skip them otherwise.
'test-local-handles/DereferenceLocalHandleFailsWhenDisallowed': [SKIP, ['mode == debug', FAIL]],
'test-persistent-handles/DereferencePersistentHandleFailsWhenDisallowed': [SKIP, ['mode == debug', FAIL]],
############################################################################
# Slow tests.
'test-debug/CallFunctionInDebugger': [PASS, ['mode == debug', SLOW]],
......@@ -129,7 +133,6 @@
##############################################################################
['mode == debug', {
# These tests are supposed to fail in a DCHECK.
'test-persistent-handles/NewPersistentHandleFailsWhenParked': [FAIL],
'test-persistent-handles/NewPersistentHandleFailsWhenParkedExplicit': [FAIL],
}],
......
......@@ -8,6 +8,7 @@
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/handles/handles-inl.h"
#include "src/handles/local-handles-inl.h"
......@@ -128,6 +129,29 @@ TEST(DereferenceLocalHandle) {
LocalHandleScope scope(&local_heap);
Handle<HeapNumber> local_number = handle(*ph, &local_heap);
CHECK_EQ(42, local_number->value());
}
}
TEST(DereferenceLocalHandleFailsWhenDisallowed) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
// Create a PersistentHandle to create the LocalHandle, and thus not have a
// HandleScope present to override the LocalHandleScope.
std::unique_ptr<PersistentHandles> phs = isolate->NewPersistentHandles();
Handle<HeapNumber> ph;
{
HandleScope handle_scope(isolate);
Handle<HeapNumber> number = isolate->factory()->NewHeapNumber(42.0);
ph = phs->NewHandle(number);
}
{
LocalHeap local_heap(isolate->heap(), ThreadKind::kBackground,
std::move(phs));
UnparkedScope unparked_scope(&local_heap);
LocalHandleScope scope(&local_heap);
Handle<HeapNumber> local_number = handle(*ph, &local_heap);
DisallowHandleDereference disallow_scope;
CHECK_EQ(42, local_number->value());
}
......
......@@ -8,6 +8,7 @@
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
#include "src/common/assert-scope.h"
#include "src/handles/handles-inl.h"
#include "src/handles/local-handles-inl.h"
#include "src/handles/persistent-handles.h"
......@@ -134,6 +135,25 @@ TEST(DereferencePersistentHandle) {
std::move(phs));
UnparkedScope scope(&local_heap);
CHECK_EQ(42, ph->value());
}
}
TEST(DereferencePersistentHandleFailsWhenDisallowed) {
heap::EnsureFlagLocalHeapsEnabled();
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
std::unique_ptr<PersistentHandles> phs = isolate->NewPersistentHandles();
Handle<HeapNumber> ph;
{
HandleScope handle_scope(isolate);
Handle<HeapNumber> number = isolate->factory()->NewHeapNumber(42.0);
ph = phs->NewHandle(number);
}
{
LocalHeap local_heap(isolate->heap(), ThreadKind::kBackground,
std::move(phs));
UnparkedScope scope(&local_heap);
DisallowHandleDereference disallow_scope;
CHECK_EQ(42, ph->value());
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment