Commit 4f2f14f8 authored by Jakob Gruber's avatar Jakob Gruber Committed by Commit Bot

[compiler] Remove --turbo-direct-heap-access

On a per-job basis, --turbo-direct-heap-access should be equal to
whether concurrent inlining is enabled. We simplify involved logic by
removing the flag, and replacing all access to

- FLAG_turbo_direct_heap_access, and
- FLAG_concurrent_inlining

inside compiler/ with
OptimizedCompilationInfo::is_concurrent_inlining() (or derived values).

Bug: v8:7790
Change-Id: I64818e0e1004dded08c784ef1c4bdfd2af990a59
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2843345
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Auto-Submit: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74166}
parent 0bc71bc9
...@@ -23,12 +23,16 @@ namespace internal { ...@@ -23,12 +23,16 @@ namespace internal {
OptimizedCompilationInfo::OptimizedCompilationInfo( OptimizedCompilationInfo::OptimizedCompilationInfo(
Zone* zone, Isolate* isolate, Handle<SharedFunctionInfo> shared, Zone* zone, Isolate* isolate, Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure, CodeKind code_kind) Handle<JSFunction> closure, CodeKind code_kind, BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame)
: code_kind_(code_kind), : code_kind_(code_kind),
osr_offset_(osr_offset),
osr_frame_(osr_frame),
zone_(zone), zone_(zone),
optimization_id_(isolate->NextOptimizationId()) { optimization_id_(isolate->NextOptimizationId()) {
DCHECK_EQ(*shared, closure->shared()); DCHECK_EQ(*shared, closure->shared());
DCHECK(shared->is_compiled()); DCHECK(shared->is_compiled());
DCHECK_IMPLIES(is_osr(), IsOptimizing());
bytecode_array_ = handle(shared->GetBytecodeArray(isolate), isolate); bytecode_array_ = handle(shared->GetBytecodeArray(isolate), isolate);
shared_info_ = shared; shared_info_ = shared;
closure_ = closure; closure_ = closure;
...@@ -86,6 +90,10 @@ void OptimizedCompilationInfo::ConfigureFlags() { ...@@ -86,6 +90,10 @@ void OptimizedCompilationInfo::ConfigureFlags() {
if (FLAG_untrusted_code_mitigations) set_untrusted_code_mitigations(); if (FLAG_untrusted_code_mitigations) set_untrusted_code_mitigations();
if (FLAG_turbo_inline_js_wasm_calls) set_inline_js_wasm_calls(); if (FLAG_turbo_inline_js_wasm_calls) set_inline_js_wasm_calls();
if (!is_osr() && (IsTurboprop() || FLAG_concurrent_inlining)) {
set_concurrent_inlining();
}
switch (code_kind_) { switch (code_kind_) {
case CodeKind::TURBOFAN: case CodeKind::TURBOFAN:
if (FLAG_function_context_specialization) { if (FLAG_function_context_specialization) {
......
...@@ -104,7 +104,15 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { ...@@ -104,7 +104,15 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// Construct a compilation info for optimized compilation. // Construct a compilation info for optimized compilation.
OptimizedCompilationInfo(Zone* zone, Isolate* isolate, OptimizedCompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared, Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure, CodeKind code_kind); Handle<JSFunction> closure, CodeKind code_kind,
BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame);
// For testing.
OptimizedCompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure, CodeKind code_kind)
: OptimizedCompilationInfo(zone, isolate, shared, closure, code_kind,
BytecodeOffset::None(), nullptr) {}
// Construct a compilation info for stub compilation, Wasm, and testing. // Construct a compilation info for stub compilation, Wasm, and testing.
OptimizedCompilationInfo(Vector<const char> debug_name, Zone* zone, OptimizedCompilationInfo(Vector<const char> debug_name, Zone* zone,
CodeKind code_kind); CodeKind code_kind);
...@@ -167,13 +175,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { ...@@ -167,13 +175,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
bool IsWasm() const { return code_kind() == CodeKind::WASM_FUNCTION; } bool IsWasm() const { return code_kind() == CodeKind::WASM_FUNCTION; }
#endif // V8_ENABLE_WEBASSEMBLY #endif // V8_ENABLE_WEBASSEMBLY
void SetOptimizingForOsr(BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame) {
DCHECK(IsOptimizing());
osr_offset_ = osr_offset;
osr_frame_ = osr_frame;
}
void set_persistent_handles( void set_persistent_handles(
std::unique_ptr<PersistentHandles> persistent_handles) { std::unique_ptr<PersistentHandles> persistent_handles) {
DCHECK_NULL(ph_); DCHECK_NULL(ph_);
...@@ -292,7 +293,9 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { ...@@ -292,7 +293,9 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
#endif // V8_ENABLE_WEBASSEMBLY #endif // V8_ENABLE_WEBASSEMBLY
// Entry point when compiling for OSR, {BytecodeOffset::None} otherwise. // Entry point when compiling for OSR, {BytecodeOffset::None} otherwise.
BytecodeOffset osr_offset_ = BytecodeOffset::None(); const BytecodeOffset osr_offset_ = BytecodeOffset::None();
// The current OSR frame for specialization or {nullptr}.
JavaScriptFrame* const osr_frame_ = nullptr;
// The zone from which the compilation pipeline working on this // The zone from which the compilation pipeline working on this
// OptimizedCompilationInfo allocates. // OptimizedCompilationInfo allocates.
...@@ -308,9 +311,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { ...@@ -308,9 +311,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
const int optimization_id_; const int optimization_id_;
unsigned inlined_bytecode_size_ = 0; unsigned inlined_bytecode_size_ = 0;
// The current OSR frame for specialization or {nullptr}.
JavaScriptFrame* osr_frame_ = nullptr;
Vector<const char> debug_name_; Vector<const char> debug_name_;
std::unique_ptr<char[]> trace_turbo_filename_; std::unique_ptr<char[]> trace_turbo_filename_;
......
...@@ -1570,7 +1570,7 @@ void BytecodeGraphBuilder::VisitBytecodes() { ...@@ -1570,7 +1570,7 @@ void BytecodeGraphBuilder::VisitBytecodes() {
} }
// TODO(leszeks): Increment usage counter on BG thread. // TODO(leszeks): Increment usage counter on BG thread.
if (!FLAG_concurrent_inlining && has_one_shot_bytecode) { if (!broker()->is_concurrent_inlining() && has_one_shot_bytecode) {
// (For concurrent inlining this is done in the serializer instead.) // (For concurrent inlining this is done in the serializer instead.)
isolate()->CountUsage( isolate()->CountUsage(
v8::Isolate::UseCounterFeature::kOptimizedFunctionWithOneShotBytecode); v8::Isolate::UseCounterFeature::kOptimizedFunctionWithOneShotBytecode);
......
...@@ -8,10 +8,11 @@ ...@@ -8,10 +8,11 @@
#include "src/compiler/common-operator.h" #include "src/compiler/common-operator.h"
#include "src/compiler/graph.h" #include "src/compiler/graph.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/machine-operator.h" #include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/node-matchers.h" #include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h" #include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -55,7 +56,8 @@ CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph, ...@@ -55,7 +56,8 @@ CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
} }
Reduction CommonOperatorReducer::Reduce(Node* node) { Reduction CommonOperatorReducer::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access); DisallowHeapAccessIf no_heap_access(broker() == nullptr ||
!broker()->is_concurrent_inlining());
switch (node->opcode()) { switch (node->opcode()) {
case IrOpcode::kBranch: case IrOpcode::kBranch:
return ReduceBranch(node); return ReduceBranch(node);
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "src/compiler/constant-folding-reducer.h" #include "src/compiler/constant-folding-reducer.h"
#include "src/compiler/js-graph.h" #include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
#include "src/objects/objects-inl.h" #include "src/objects/objects-inl.h"
namespace v8 { namespace v8 {
...@@ -63,7 +64,7 @@ ConstantFoldingReducer::ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph, ...@@ -63,7 +64,7 @@ ConstantFoldingReducer::ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph,
ConstantFoldingReducer::~ConstantFoldingReducer() = default; ConstantFoldingReducer::~ConstantFoldingReducer() = default;
Reduction ConstantFoldingReducer::Reduce(Node* node) { Reduction ConstantFoldingReducer::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access); DisallowHeapAccessIf no_heap_access(!broker()->is_concurrent_inlining());
if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) && if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) &&
node->op()->HasProperty(Operator::kEliminatable) && node->op()->HasProperty(Operator::kEliminatable) &&
node->opcode() != IrOpcode::kFinishRegion) { node->opcode() != IrOpcode::kFinishRegion) {
......
...@@ -16,12 +16,14 @@ namespace compiler { ...@@ -16,12 +16,14 @@ namespace compiler {
DeadCodeElimination::DeadCodeElimination(Editor* editor, Graph* graph, DeadCodeElimination::DeadCodeElimination(Editor* editor, Graph* graph,
CommonOperatorBuilder* common, CommonOperatorBuilder* common,
Zone* temp_zone) Zone* temp_zone,
bool is_concurrent_inlining)
: AdvancedReducer(editor), : AdvancedReducer(editor),
graph_(graph), graph_(graph),
common_(common), common_(common),
dead_(graph->NewNode(common->Dead())), dead_(graph->NewNode(common->Dead())),
zone_(temp_zone) { zone_(temp_zone),
is_concurrent_inlining_(is_concurrent_inlining) {
NodeProperties::SetType(dead_, Type::None()); NodeProperties::SetType(dead_, Type::None());
} }
...@@ -46,7 +48,7 @@ Node* FindDeadInput(Node* node) { ...@@ -46,7 +48,7 @@ Node* FindDeadInput(Node* node) {
} // namespace } // namespace
Reduction DeadCodeElimination::Reduce(Node* node) { Reduction DeadCodeElimination::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access); DisallowHeapAccessIf no_heap_access(!is_concurrent_inlining_);
switch (node->opcode()) { switch (node->opcode()) {
case IrOpcode::kEnd: case IrOpcode::kEnd:
return ReduceEnd(node); return ReduceEnd(node);
......
...@@ -40,7 +40,8 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final ...@@ -40,7 +40,8 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
: public NON_EXPORTED_BASE(AdvancedReducer) { : public NON_EXPORTED_BASE(AdvancedReducer) {
public: public:
DeadCodeElimination(Editor* editor, Graph* graph, DeadCodeElimination(Editor* editor, Graph* graph,
CommonOperatorBuilder* common, Zone* temp_zone); CommonOperatorBuilder* common, Zone* temp_zone,
bool is_concurrent_inlining);
~DeadCodeElimination() final = default; ~DeadCodeElimination() final = default;
DeadCodeElimination(const DeadCodeElimination&) = delete; DeadCodeElimination(const DeadCodeElimination&) = delete;
DeadCodeElimination& operator=(const DeadCodeElimination&) = delete; DeadCodeElimination& operator=(const DeadCodeElimination&) = delete;
...@@ -78,6 +79,8 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final ...@@ -78,6 +79,8 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
CommonOperatorBuilder* const common_; CommonOperatorBuilder* const common_;
Node* const dead_; Node* const dead_;
Zone* zone_; Zone* zone_;
const bool is_concurrent_inlining_;
}; };
} // namespace compiler } // namespace compiler
......
...@@ -112,7 +112,7 @@ Reduction GraphReducer::Reduce(Node* const node) { ...@@ -112,7 +112,7 @@ Reduction GraphReducer::Reduce(Node* const node) {
if (FLAG_trace_turbo_reduction) { if (FLAG_trace_turbo_reduction) {
UnparkedScopeIfNeeded unparked(broker_); UnparkedScopeIfNeeded unparked(broker_);
// TODO(neis): Disallow racy handle dereference once we stop // TODO(neis): Disallow racy handle dereference once we stop
// supporting --no-local-heaps --no-turbo-direct-heap-access. // supporting --no-local-heaps --no-concurrent-inlining.
AllowHandleDereference allow_deref; AllowHandleDereference allow_deref;
StdoutStream{} << "- In-place update of #" << *node << " by reducer " StdoutStream{} << "- In-place update of #" << *node << " by reducer "
<< (*i)->reducer_name() << std::endl; << (*i)->reducer_name() << std::endl;
...@@ -125,7 +125,7 @@ Reduction GraphReducer::Reduce(Node* const node) { ...@@ -125,7 +125,7 @@ Reduction GraphReducer::Reduce(Node* const node) {
if (FLAG_trace_turbo_reduction) { if (FLAG_trace_turbo_reduction) {
UnparkedScopeIfNeeded unparked(broker_); UnparkedScopeIfNeeded unparked(broker_);
// TODO(neis): Disallow racy handle dereference once we stop // TODO(neis): Disallow racy handle dereference once we stop
// supporting --no-local-heaps --no-turbo-direct-heap-access. // supporting --no-local-heaps --no-concurrent-inlining.
AllowHandleDereference allow_deref; AllowHandleDereference allow_deref;
StdoutStream{} << "- Replacement of #" << *node << " with #" StdoutStream{} << "- Replacement of #" << *node << " with #"
<< *(reduction.replacement()) << " by reducer " << *(reduction.replacement()) << " by reducer "
......
...@@ -56,7 +56,7 @@ HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(FORWARD_DECL) ...@@ -56,7 +56,7 @@ HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
// mutable) HeapObject and the data is an instance of ObjectData. Its handle // mutable) HeapObject and the data is an instance of ObjectData. Its handle
// must be persistent so that the GC can update it at a safepoint. Via this // must be persistent so that the GC can update it at a safepoint. Via this
// handle, the object can be accessed concurrently to the main thread. To be // handle, the object can be accessed concurrently to the main thread. To be
// used the flag --turbo-direct-heap-access must be on. // used the flag --concurrent-inlining must be on.
// //
// kUnserializedReadOnlyHeapObject: The underlying V8 object is a read-only // kUnserializedReadOnlyHeapObject: The underlying V8 object is a read-only
// HeapObject and the data is an instance of ObjectData. For // HeapObject and the data is an instance of ObjectData. For
...@@ -100,7 +100,13 @@ class ObjectData : public ZoneObject { ...@@ -100,7 +100,13 @@ class ObjectData : public ZoneObject {
public: public:
ObjectData(JSHeapBroker* broker, ObjectData** storage, Handle<Object> object, ObjectData(JSHeapBroker* broker, ObjectData** storage, Handle<Object> object,
ObjectDataKind kind) ObjectDataKind kind)
: object_(object), kind_(kind) { : object_(object),
kind_(kind)
#ifdef DEBUG
,
broker_(broker)
#endif // DEBUG
{
// This assignment ensures we don't end up inserting the same object // This assignment ensures we don't end up inserting the same object
// in an endless recursion. // in an endless recursion.
*storage = this; *storage = this;
...@@ -154,11 +160,16 @@ class ObjectData : public ZoneObject { ...@@ -154,11 +160,16 @@ class ObjectData : public ZoneObject {
#ifdef DEBUG #ifdef DEBUG
enum class Usage{kUnused, kOnlyIdentityUsed, kDataUsed}; enum class Usage{kUnused, kOnlyIdentityUsed, kDataUsed};
mutable Usage used_status = Usage::kUnused; mutable Usage used_status = Usage::kUnused;
JSHeapBroker* broker() const { return broker_; }
#endif // DEBUG #endif // DEBUG
private: private:
Handle<Object> const object_; Handle<Object> const object_;
ObjectDataKind const kind_; ObjectDataKind const kind_;
#ifdef DEBUG
JSHeapBroker* const broker_; // For DCHECKs.
#endif // DEBUG
}; };
class HeapObjectData : public ObjectData { class HeapObjectData : public ObjectData {
...@@ -255,7 +266,7 @@ FunctionTemplateInfoData::FunctionTemplateInfoData( ...@@ -255,7 +266,7 @@ FunctionTemplateInfoData::FunctionTemplateInfoData(
c_function_(v8::ToCData<Address>(object->GetCFunction())), c_function_(v8::ToCData<Address>(object->GetCFunction())),
c_signature_(v8::ToCData<CFunctionInfo*>(object->GetCSignature())), c_signature_(v8::ToCData<CFunctionInfo*>(object->GetCSignature())),
known_receivers_(broker->zone()) { known_receivers_(broker->zone()) {
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker->is_concurrent_inlining());
auto function_template_info = Handle<FunctionTemplateInfo>::cast(object); auto function_template_info = Handle<FunctionTemplateInfo>::cast(object);
is_signature_undefined_ = is_signature_undefined_ =
function_template_info->signature().IsUndefined(broker->isolate()); function_template_info->signature().IsUndefined(broker->isolate());
...@@ -271,7 +282,7 @@ CallHandlerInfoData::CallHandlerInfoData(JSHeapBroker* broker, ...@@ -271,7 +282,7 @@ CallHandlerInfoData::CallHandlerInfoData(JSHeapBroker* broker,
Handle<CallHandlerInfo> object) Handle<CallHandlerInfo> object)
: HeapObjectData(broker, storage, object), : HeapObjectData(broker, storage, object),
callback_(v8::ToCData<Address>(object->callback())) { callback_(v8::ToCData<Address>(object->callback())) {
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker->is_concurrent_inlining());
} }
PropertyCellData::PropertyCellData(JSHeapBroker* broker, ObjectData** storage, PropertyCellData::PropertyCellData(JSHeapBroker* broker, ObjectData** storage,
...@@ -603,7 +614,7 @@ class ArrayBoilerplateDescriptionData : public HeapObjectData { ...@@ -603,7 +614,7 @@ class ArrayBoilerplateDescriptionData : public HeapObjectData {
Handle<ArrayBoilerplateDescription> object) Handle<ArrayBoilerplateDescription> object)
: HeapObjectData(broker, storage, object), : HeapObjectData(broker, storage, object),
constants_elements_length_(object->constant_elements().length()) { constants_elements_length_(object->constant_elements().length()) {
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker->is_concurrent_inlining());
} }
int constants_elements_length() const { return constants_elements_length_; } int constants_elements_length() const { return constants_elements_length_; }
...@@ -617,7 +628,7 @@ class ObjectBoilerplateDescriptionData : public HeapObjectData { ...@@ -617,7 +628,7 @@ class ObjectBoilerplateDescriptionData : public HeapObjectData {
ObjectBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage, ObjectBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage,
Handle<ObjectBoilerplateDescription> object) Handle<ObjectBoilerplateDescription> object)
: HeapObjectData(broker, storage, object), size_(object->size()) { : HeapObjectData(broker, storage, object), size_(object->size()) {
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker->is_concurrent_inlining());
} }
int size() const { return size_; } int size() const { return size_; }
...@@ -646,15 +657,15 @@ class JSBoundFunctionData : public JSObjectData { ...@@ -646,15 +657,15 @@ class JSBoundFunctionData : public JSObjectData {
bool serialized() const { return serialized_; } bool serialized() const { return serialized_; }
ObjectData* bound_target_function() const { ObjectData* bound_target_function() const {
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker()->is_concurrent_inlining());
return bound_target_function_; return bound_target_function_;
} }
ObjectData* bound_this() const { ObjectData* bound_this() const {
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker()->is_concurrent_inlining());
return bound_this_; return bound_this_;
} }
ObjectData* bound_arguments() const { ObjectData* bound_arguments() const {
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker()->is_concurrent_inlining());
return bound_arguments_; return bound_arguments_;
} }
...@@ -701,7 +712,7 @@ class JSFunctionData : public JSObjectData { ...@@ -701,7 +712,7 @@ class JSFunctionData : public JSObjectData {
} }
ObjectData* code() const { ObjectData* code() const {
DCHECK(serialized_code_and_feedback()); DCHECK(serialized_code_and_feedback());
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker()->is_concurrent_inlining());
return code_; return code_;
} }
int initial_map_instance_size_with_min_slack() const { int initial_map_instance_size_with_min_slack() const {
...@@ -867,7 +878,7 @@ class NameData : public HeapObjectData { ...@@ -867,7 +878,7 @@ class NameData : public HeapObjectData {
public: public:
NameData(JSHeapBroker* broker, ObjectData** storage, Handle<Name> object) NameData(JSHeapBroker* broker, ObjectData** storage, Handle<Name> object)
: HeapObjectData(broker, storage, object) { : HeapObjectData(broker, storage, object) {
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker->is_concurrent_inlining());
} }
}; };
...@@ -902,7 +913,7 @@ class SymbolData : public NameData { ...@@ -902,7 +913,7 @@ class SymbolData : public NameData {
public: public:
SymbolData(JSHeapBroker* broker, ObjectData** storage, Handle<Symbol> object) SymbolData(JSHeapBroker* broker, ObjectData** storage, Handle<Symbol> object)
: NameData(broker, storage, object) { : NameData(broker, storage, object) {
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker->is_concurrent_inlining());
} }
}; };
...@@ -915,7 +926,7 @@ StringData::StringData(JSHeapBroker* broker, ObjectData** storage, ...@@ -915,7 +926,7 @@ StringData::StringData(JSHeapBroker* broker, ObjectData** storage,
is_external_string_(object->IsExternalString()), is_external_string_(object->IsExternalString()),
is_seq_string_(object->IsSeqString()), is_seq_string_(object->IsSeqString()),
chars_as_strings_(broker->zone()) { chars_as_strings_(broker->zone()) {
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker->is_concurrent_inlining());
} }
class InternalizedStringData : public StringData { class InternalizedStringData : public StringData {
...@@ -923,7 +934,7 @@ class InternalizedStringData : public StringData { ...@@ -923,7 +934,7 @@ class InternalizedStringData : public StringData {
InternalizedStringData(JSHeapBroker* broker, ObjectData** storage, InternalizedStringData(JSHeapBroker* broker, ObjectData** storage,
Handle<InternalizedString> object) Handle<InternalizedString> object)
: StringData(broker, storage, object) { : StringData(broker, storage, object) {
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker->is_concurrent_inlining());
} }
}; };
...@@ -1227,7 +1238,7 @@ class MapData : public HeapObjectData { ...@@ -1227,7 +1238,7 @@ class MapData : public HeapObjectData {
AccessorInfoData::AccessorInfoData(JSHeapBroker* broker, ObjectData** storage, AccessorInfoData::AccessorInfoData(JSHeapBroker* broker, ObjectData** storage,
Handle<AccessorInfo> object) Handle<AccessorInfo> object)
: HeapObjectData(broker, storage, object) { : HeapObjectData(broker, storage, object) {
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker->is_concurrent_inlining());
} }
AllocationSiteData::AllocationSiteData(JSHeapBroker* broker, AllocationSiteData::AllocationSiteData(JSHeapBroker* broker,
...@@ -1269,6 +1280,11 @@ void AllocationSiteData::SerializeBoilerplate(JSHeapBroker* broker) { ...@@ -1269,6 +1280,11 @@ void AllocationSiteData::SerializeBoilerplate(JSHeapBroker* broker) {
HeapObjectData::HeapObjectData(JSHeapBroker* broker, ObjectData** storage, HeapObjectData::HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<HeapObject> object, ObjectDataKind kind) Handle<HeapObject> object, ObjectDataKind kind)
: ObjectData(broker, storage, object, kind), : ObjectData(broker, storage, object, kind),
// We have to use a raw cast below instead of AsMap() because of
// recursion. AsMap() would call IsMap(), which accesses the
// instance_type_ member. In the case of constructing the MapData for the
// meta map (whose map is itself), this member has not yet been
// initialized.
map_(broker->GetOrCreateData(object->synchronized_map())) { map_(broker->GetOrCreateData(object->synchronized_map())) {
CHECK_IMPLIES(kind == kSerializedHeapObject, CHECK_IMPLIES(kind == kSerializedHeapObject,
broker->mode() == JSHeapBroker::kSerializing); broker->mode() == JSHeapBroker::kSerializing);
...@@ -1451,7 +1467,7 @@ void JSFunctionData::SerializeCodeAndFeedback(JSHeapBroker* broker) { ...@@ -1451,7 +1467,7 @@ void JSFunctionData::SerializeCodeAndFeedback(JSHeapBroker* broker) {
DCHECK_NULL(feedback_cell_); DCHECK_NULL(feedback_cell_);
DCHECK_NULL(feedback_vector_); DCHECK_NULL(feedback_vector_);
DCHECK_NULL(code_); DCHECK_NULL(code_);
if (!FLAG_turbo_direct_heap_access) { if (!broker->is_concurrent_inlining()) {
// This is conditionalized because Code objects are never serialized now. // This is conditionalized because Code objects are never serialized now.
// We only need to represent the code object in serialized data when // We only need to represent the code object in serialized data when
// we're unable to perform direct heap accesses. // we're unable to perform direct heap accesses.
...@@ -1488,7 +1504,7 @@ class DescriptorArrayData : public HeapObjectData { ...@@ -1488,7 +1504,7 @@ class DescriptorArrayData : public HeapObjectData {
DescriptorArrayData(JSHeapBroker* broker, ObjectData** storage, DescriptorArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<DescriptorArray> object) Handle<DescriptorArray> object)
: HeapObjectData(broker, storage, object), contents_(broker->zone()) { : HeapObjectData(broker, storage, object), contents_(broker->zone()) {
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker->is_concurrent_inlining());
} }
ObjectData* FindFieldOwner(InternalIndex descriptor_index) const { ObjectData* FindFieldOwner(InternalIndex descriptor_index) const {
...@@ -1580,7 +1596,7 @@ FeedbackCellData::FeedbackCellData(JSHeapBroker* broker, ObjectData** storage, ...@@ -1580,7 +1596,7 @@ FeedbackCellData::FeedbackCellData(JSHeapBroker* broker, ObjectData** storage,
value_(object->value().IsFeedbackVector() value_(object->value().IsFeedbackVector()
? broker->GetOrCreateData(object->value()) ? broker->GetOrCreateData(object->value())
: nullptr) { : nullptr) {
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker->is_concurrent_inlining());
} }
class FeedbackVectorData : public HeapObjectData { class FeedbackVectorData : public HeapObjectData {
...@@ -1613,7 +1629,7 @@ FeedbackVectorData::FeedbackVectorData(JSHeapBroker* broker, ...@@ -1613,7 +1629,7 @@ FeedbackVectorData::FeedbackVectorData(JSHeapBroker* broker,
: HeapObjectData(broker, storage, object), : HeapObjectData(broker, storage, object),
invocation_count_(object->invocation_count()), invocation_count_(object->invocation_count()),
closure_feedback_cell_array_(broker->zone()) { closure_feedback_cell_array_(broker->zone()) {
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker->is_concurrent_inlining());
} }
ObjectData* FeedbackVectorData::GetClosureFeedbackCell(JSHeapBroker* broker, ObjectData* FeedbackVectorData::GetClosureFeedbackCell(JSHeapBroker* broker,
...@@ -1849,7 +1865,7 @@ JSArrayData::JSArrayData(JSHeapBroker* broker, ObjectData** storage, ...@@ -1849,7 +1865,7 @@ JSArrayData::JSArrayData(JSHeapBroker* broker, ObjectData** storage,
: JSObjectData(broker, storage, object), own_elements_(broker->zone()) {} : JSObjectData(broker, storage, object), own_elements_(broker->zone()) {}
void JSArrayData::Serialize(JSHeapBroker* broker) { void JSArrayData::Serialize(JSHeapBroker* broker) {
CHECK(!FLAG_turbo_direct_heap_access); CHECK(!broker->is_concurrent_inlining());
if (serialized_) return; if (serialized_) return;
serialized_ = true; serialized_ = true;
...@@ -1907,7 +1923,7 @@ ScopeInfoData::ScopeInfoData(JSHeapBroker* broker, ObjectData** storage, ...@@ -1907,7 +1923,7 @@ ScopeInfoData::ScopeInfoData(JSHeapBroker* broker, ObjectData** storage,
has_context_extension_slot_(object->HasContextExtensionSlot()), has_context_extension_slot_(object->HasContextExtensionSlot()),
has_outer_scope_info_(object->HasOuterScopeInfo()), has_outer_scope_info_(object->HasOuterScopeInfo()),
outer_scope_info_(nullptr) { outer_scope_info_(nullptr) {
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker->is_concurrent_inlining());
} }
void ScopeInfoData::SerializeScopeInfoChain(JSHeapBroker* broker) { void ScopeInfoData::SerializeScopeInfoChain(JSHeapBroker* broker) {
...@@ -2093,7 +2109,7 @@ class CellData : public HeapObjectData { ...@@ -2093,7 +2109,7 @@ class CellData : public HeapObjectData {
public: public:
CellData(JSHeapBroker* broker, ObjectData** storage, Handle<Cell> object) CellData(JSHeapBroker* broker, ObjectData** storage, Handle<Cell> object)
: HeapObjectData(broker, storage, object) { : HeapObjectData(broker, storage, object) {
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker->is_concurrent_inlining());
} }
}; };
...@@ -2184,7 +2200,7 @@ class TemplateObjectDescriptionData : public HeapObjectData { ...@@ -2184,7 +2200,7 @@ class TemplateObjectDescriptionData : public HeapObjectData {
TemplateObjectDescriptionData(JSHeapBroker* broker, ObjectData** storage, TemplateObjectDescriptionData(JSHeapBroker* broker, ObjectData** storage,
Handle<TemplateObjectDescription> object) Handle<TemplateObjectDescription> object)
: HeapObjectData(broker, storage, object) { : HeapObjectData(broker, storage, object) {
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker->is_concurrent_inlining());
} }
}; };
...@@ -2196,7 +2212,7 @@ class CodeData : public HeapObjectData { ...@@ -2196,7 +2212,7 @@ class CodeData : public HeapObjectData {
!object->marked_for_deoptimization() !object->marked_for_deoptimization()
? object->inlined_bytecode_size() ? object->inlined_bytecode_size()
: 0) { : 0) {
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker->is_concurrent_inlining());
} }
unsigned inlined_bytecode_size() const { return inlined_bytecode_size_; } unsigned inlined_bytecode_size() const { return inlined_bytecode_size_; }
...@@ -2233,16 +2249,16 @@ HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_AS) ...@@ -2233,16 +2249,16 @@ HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_AS)
HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_AS) HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_AS)
#undef DEFINE_AS #undef DEFINE_AS
// TODO(solanes, v8:10866): Remove once FLAG_turbo_direct_heap_access is // TODO(solanes, v8:10866): Remove once broker()->is_concurrent_inlining() is
// removed. // removed.
// This macro defines the Asxxx methods for NeverSerialized objects, which // This macro defines the Asxxx methods for NeverSerialized objects, which
// should only be used with direct heap access off. // should only be used with direct heap access off.
#define DEFINE_AS(Name) \ #define DEFINE_AS(Name) \
Name##Data* ObjectData::As##Name() { \ Name##Data* ObjectData::As##Name() { \
DCHECK(!FLAG_turbo_direct_heap_access); \ DCHECK(!broker()->is_concurrent_inlining()); \
CHECK(Is##Name()); \ CHECK(Is##Name()); \
CHECK_EQ(kind_, kSerializedHeapObject); \ CHECK_EQ(kind_, kSerializedHeapObject); \
return static_cast<Name##Data*>(this); \ return static_cast<Name##Data*>(this); \
} }
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEFINE_AS) HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEFINE_AS)
#undef DEFINE_AS #undef DEFINE_AS
...@@ -2483,7 +2499,7 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker, ...@@ -2483,7 +2499,7 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
map()->AsMap()->SerializeOwnDescriptors(broker); map()->AsMap()->SerializeOwnDescriptors(broker);
} }
if (IsJSArray() && !FLAG_turbo_direct_heap_access) { if (IsJSArray() && !broker->is_concurrent_inlining()) {
AsJSArray()->Serialize(broker); AsJSArray()->Serialize(broker);
} }
} }
...@@ -2671,14 +2687,14 @@ void JSHeapBroker::InitializeAndStartSerializing( ...@@ -2671,14 +2687,14 @@ void JSHeapBroker::InitializeAndStartSerializing(
SetTargetNativeContextRef(native_context); SetTargetNativeContextRef(native_context);
target_native_context().Serialize(); target_native_context().Serialize();
if (!FLAG_turbo_direct_heap_access) { if (!is_concurrent_inlining()) {
// Perform full native context serialization now if we can't do it later on // Perform full native context serialization now if we can't do it later on
// the background thread. // the background thread.
target_native_context().SerializeOnBackground(); target_native_context().SerializeOnBackground();
} }
Factory* const f = isolate()->factory(); Factory* const f = isolate()->factory();
if (!FLAG_turbo_direct_heap_access) { if (!is_concurrent_inlining()) {
ObjectData* data; ObjectData* data;
data = GetOrCreateData(f->array_buffer_detaching_protector()); data = GetOrCreateData(f->array_buffer_detaching_protector());
if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this); if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
...@@ -2734,13 +2750,14 @@ ObjectData* JSHeapBroker::TryGetOrCreateData( ...@@ -2734,13 +2750,14 @@ ObjectData* JSHeapBroker::TryGetOrCreateData(
entry = refs_->LookupOrInsert(object.address()); entry = refs_->LookupOrInsert(object.address());
object_data = zone()->New<ObjectData>(this, &(entry->value), object, object_data = zone()->New<ObjectData>(this, &(entry->value), object,
kUnserializedReadOnlyHeapObject); kUnserializedReadOnlyHeapObject);
// TODO(solanes, v8:10866): Remove the if/else in this macro once we remove the // TODO(solanes, v8:10866): Remove the `(mode() == kSerializing)` case in this
// FLAG_turbo_direct_heap_access. // macro when all classes skip serialization. Same for the other macros if we
// end up keeping them.
#define CREATE_DATA_FOR_DIRECT_READ(name) \ #define CREATE_DATA_FOR_DIRECT_READ(name) \
} \ } \
/* NOLINTNEXTLINE(readability/braces) */ \ /* NOLINTNEXTLINE(readability/braces) */ \
else if (object->Is##name()) { \ else if (object->Is##name()) { \
if (FLAG_turbo_direct_heap_access) { \ if (is_concurrent_inlining()) { \
entry = refs_->LookupOrInsert(object.address()); \ entry = refs_->LookupOrInsert(object.address()); \
object_data = zone()->New<ObjectData>(this, &(entry->value), object, \ object_data = zone()->New<ObjectData>(this, &(entry->value), object, \
kNeverSerializedHeapObject); \ kNeverSerializedHeapObject); \
...@@ -2778,7 +2795,7 @@ ObjectData* JSHeapBroker::TryGetOrCreateData( ...@@ -2778,7 +2795,7 @@ ObjectData* JSHeapBroker::TryGetOrCreateData(
} \ } \
/* NOLINTNEXTLINE(readability/braces) */ \ /* NOLINTNEXTLINE(readability/braces) */ \
else if (object->Is##name()) { \ else if (object->Is##name()) { \
if (FLAG_turbo_direct_heap_access) { \ if (is_concurrent_inlining()) { \
entry = refs_->LookupOrInsert(object.address()); \ entry = refs_->LookupOrInsert(object.address()); \
object_data = zone()->New<name##Data>(this, &(entry->value), \ object_data = zone()->New<name##Data>(this, &(entry->value), \
Handle<name>::cast(object), \ Handle<name>::cast(object), \
...@@ -3047,7 +3064,7 @@ void JSObjectRef::EnsureElementsTenured() { ...@@ -3047,7 +3064,7 @@ void JSObjectRef::EnsureElementsTenured() {
FieldIndex MapRef::GetFieldIndexFor(InternalIndex descriptor_index) const { FieldIndex MapRef::GetFieldIndexFor(InternalIndex descriptor_index) const {
CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors()); CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
FieldIndex result = FieldIndex::ForDescriptor(*object(), descriptor_index); FieldIndex result = FieldIndex::ForDescriptor(*object(), descriptor_index);
DCHECK(result.is_inobject()); DCHECK(result.is_inobject());
return result; return result;
...@@ -3089,7 +3106,7 @@ bool MapRef::IsPrimitiveMap() const { ...@@ -3089,7 +3106,7 @@ bool MapRef::IsPrimitiveMap() const {
MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const { MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const {
CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors()); CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
// TODO(solanes, v8:7790): Consider caching the result of the field owner on // TODO(solanes, v8:7790): Consider caching the result of the field owner on
// the descriptor array. It would be useful for same map as well as any // the descriptor array. It would be useful for same map as well as any
// other map sharing that descriptor array. // other map sharing that descriptor array.
...@@ -3244,19 +3261,19 @@ int BytecodeArrayRef::handler_table_size() const { ...@@ -3244,19 +3261,19 @@ int BytecodeArrayRef::handler_table_size() const {
// Like IF_ACCESS_FROM_HEAP[_C] but we also allow direct heap access for // Like IF_ACCESS_FROM_HEAP[_C] but we also allow direct heap access for
// kSerialized only for methods that we identified to be safe. // kSerialized only for methods that we identified to be safe.
#define IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name) \ #define IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name) \
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { \ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { \
return result##Ref(broker(), \ return result##Ref(broker(), \
broker()->CanonicalPersistentHandle(object()->name())); \ broker()->CanonicalPersistentHandle(object()->name())); \
} }
#define IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name) \ #define IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name) \
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { \ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { \
return object()->name(); \ return object()->name(); \
} }
// Like BIMODAL_ACCESSOR[_C] except that we force a direct heap access if // Like BIMODAL_ACCESSOR[_C] except that we force a direct heap access if
// FLAG_turbo_direct_heap_access is true (even for kSerialized). This is because // broker()->is_concurrent_inlining() is true (even for kSerialized). This is
// we identified the method to be safe to use direct heap access, but the // because we identified the method to be safe to use direct heap access, but
// holder##Data class still needs to be serialized. // the holder##Data class still needs to be serialized.
#define BIMODAL_ACCESSOR_WITH_FLAG(holder, result, name) \ #define BIMODAL_ACCESSOR_WITH_FLAG(holder, result, name) \
result##Ref holder##Ref::name() const { \ result##Ref holder##Ref::name() const { \
IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name); \ IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name); \
...@@ -3498,7 +3515,7 @@ BIMODAL_ACCESSOR(ScopeInfo, ScopeInfo, OuterScopeInfo) ...@@ -3498,7 +3515,7 @@ BIMODAL_ACCESSOR(ScopeInfo, ScopeInfo, OuterScopeInfo)
BIMODAL_ACCESSOR_C(SharedFunctionInfo, int, builtin_id) BIMODAL_ACCESSOR_C(SharedFunctionInfo, int, builtin_id)
BytecodeArrayRef SharedFunctionInfoRef::GetBytecodeArray() const { BytecodeArrayRef SharedFunctionInfoRef::GetBytecodeArray() const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
BytecodeArray bytecode_array; BytecodeArray bytecode_array;
if (!broker()->IsMainThread()) { if (!broker()->IsMainThread()) {
bytecode_array = object()->GetBytecodeArray(broker()->local_isolate()); bytecode_array = object()->GetBytecodeArray(broker()->local_isolate());
...@@ -3553,7 +3570,7 @@ base::Optional<ObjectRef> MapRef::GetStrongValue( ...@@ -3553,7 +3570,7 @@ base::Optional<ObjectRef> MapRef::GetStrongValue(
} }
DescriptorArrayRef MapRef::instance_descriptors() const { DescriptorArrayRef MapRef::instance_descriptors() const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
return DescriptorArrayRef( return DescriptorArrayRef(
broker(), broker(),
broker()->CanonicalPersistentHandle( broker()->CanonicalPersistentHandle(
...@@ -3572,7 +3589,7 @@ void MapRef::SerializeRootMap() { ...@@ -3572,7 +3589,7 @@ void MapRef::SerializeRootMap() {
// TODO(solanes, v8:7790): Remove base::Optional from the return type when // TODO(solanes, v8:7790): Remove base::Optional from the return type when
// deleting serialization. // deleting serialization.
base::Optional<MapRef> MapRef::FindRootMap() const { base::Optional<MapRef> MapRef::FindRootMap() const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
// TODO(solanes): Remove the TryGetOrCreateData part when Map is moved to // TODO(solanes): Remove the TryGetOrCreateData part when Map is moved to
// kNeverSerialized. // kNeverSerialized.
ObjectData* root_map = ObjectData* root_map =
...@@ -3594,7 +3611,7 @@ base::Optional<MapRef> MapRef::FindRootMap() const { ...@@ -3594,7 +3611,7 @@ base::Optional<MapRef> MapRef::FindRootMap() const {
} }
bool JSTypedArrayRef::is_on_heap() const { bool JSTypedArrayRef::is_on_heap() const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
// Safe to read concurrently because: // Safe to read concurrently because:
// - host object seen by serializer. // - host object seen by serializer.
// - underlying field written 1. during initialization or 2. with // - underlying field written 1. during initialization or 2. with
...@@ -3606,7 +3623,7 @@ bool JSTypedArrayRef::is_on_heap() const { ...@@ -3606,7 +3623,7 @@ bool JSTypedArrayRef::is_on_heap() const {
size_t JSTypedArrayRef::length() const { size_t JSTypedArrayRef::length() const {
CHECK(!is_on_heap()); CHECK(!is_on_heap());
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
// Safe to read concurrently because: // Safe to read concurrently because:
// - immutable after initialization. // - immutable after initialization.
// - host object seen by serializer. // - host object seen by serializer.
...@@ -3617,7 +3634,7 @@ size_t JSTypedArrayRef::length() const { ...@@ -3617,7 +3634,7 @@ size_t JSTypedArrayRef::length() const {
HeapObjectRef JSTypedArrayRef::buffer() const { HeapObjectRef JSTypedArrayRef::buffer() const {
CHECK(!is_on_heap()); CHECK(!is_on_heap());
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
// Safe to read concurrently because: // Safe to read concurrently because:
// - immutable after initialization. // - immutable after initialization.
// - host object seen by serializer. // - host object seen by serializer.
...@@ -3630,7 +3647,7 @@ HeapObjectRef JSTypedArrayRef::buffer() const { ...@@ -3630,7 +3647,7 @@ HeapObjectRef JSTypedArrayRef::buffer() const {
void* JSTypedArrayRef::data_ptr() const { void* JSTypedArrayRef::data_ptr() const {
CHECK(!is_on_heap()); CHECK(!is_on_heap());
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
// Safe to read concurrently because: // Safe to read concurrently because:
// - host object seen by serializer. // - host object seen by serializer.
// - underlying field written 1. during initialization or 2. protected by // - underlying field written 1. during initialization or 2. protected by
...@@ -3839,7 +3856,7 @@ Maybe<double> ObjectRef::OddballToNumber() const { ...@@ -3839,7 +3856,7 @@ Maybe<double> ObjectRef::OddballToNumber() const {
base::Optional<ObjectRef> JSObjectRef::GetOwnConstantElement( base::Optional<ObjectRef> JSObjectRef::GetOwnConstantElement(
uint32_t index, SerializationPolicy policy) const { uint32_t index, SerializationPolicy policy) const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
// `elements` are currently still serialized as members of JSObjectRef. // `elements` are currently still serialized as members of JSObjectRef.
// TODO(jgruber,v8:7790): Once JSObject is no longer serialized, we must // TODO(jgruber,v8:7790): Once JSObject is no longer serialized, we must
// guarantee consistency between `object`, `elements_kind` and `elements` // guarantee consistency between `object`, `elements_kind` and `elements`
...@@ -3917,7 +3934,7 @@ ObjectRef JSArrayRef::GetBoilerplateLength() const { ...@@ -3917,7 +3934,7 @@ ObjectRef JSArrayRef::GetBoilerplateLength() const {
} }
ObjectRef JSArrayRef::length_unsafe() const { ObjectRef JSArrayRef::length_unsafe() const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
Object o = object()->length(broker()->isolate(), kRelaxedLoad); Object o = object()->length(broker()->isolate(), kRelaxedLoad);
return ObjectRef{broker(), broker()->CanonicalPersistentHandle(o)}; return ObjectRef{broker(), broker()->CanonicalPersistentHandle(o)};
} else { } else {
...@@ -3928,7 +3945,7 @@ ObjectRef JSArrayRef::length_unsafe() const { ...@@ -3928,7 +3945,7 @@ ObjectRef JSArrayRef::length_unsafe() const {
base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement( base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
FixedArrayBaseRef elements_ref, uint32_t index, FixedArrayBaseRef elements_ref, uint32_t index,
SerializationPolicy policy) const { SerializationPolicy policy) const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
// `elements` are currently still serialized as members of JSObjectRef. // `elements` are currently still serialized as members of JSObjectRef.
// TODO(jgruber,v8:7790): Remove the elements equality DCHECK below once // TODO(jgruber,v8:7790): Remove the elements equality DCHECK below once
// JSObject is no longer serialized. // JSObject is no longer serialized.
...@@ -3971,7 +3988,7 @@ base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement( ...@@ -3971,7 +3988,7 @@ base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
broker()->CanonicalPersistentHandle(result.value())}; broker()->CanonicalPersistentHandle(result.value())};
} else { } else {
DCHECK(!data_->should_access_heap()); DCHECK(!data_->should_access_heap());
DCHECK(!FLAG_turbo_direct_heap_access); DCHECK(!broker()->is_concurrent_inlining());
// Just to clarify that `elements_ref` is not used on this path. // Just to clarify that `elements_ref` is not used on this path.
// GetOwnElement accesses the serialized `elements` field on its own. // GetOwnElement accesses the serialized `elements` field on its own.
...@@ -3987,7 +4004,7 @@ base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement( ...@@ -3987,7 +4004,7 @@ base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
} }
base::Optional<CellRef> SourceTextModuleRef::GetCell(int cell_index) const { base::Optional<CellRef> SourceTextModuleRef::GetCell(int cell_index) const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
return CellRef(broker(), broker()->CanonicalPersistentHandle( return CellRef(broker(), broker()->CanonicalPersistentHandle(
object()->GetCell(cell_index))); object()->GetCell(cell_index)));
} }
...@@ -4369,7 +4386,7 @@ bool JSFunctionRef::serialized_code_and_feedback() const { ...@@ -4369,7 +4386,7 @@ bool JSFunctionRef::serialized_code_and_feedback() const {
} }
CodeRef JSFunctionRef::code() const { CodeRef JSFunctionRef::code() const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
return CodeRef(broker(), broker()->CanonicalPersistentHandle( return CodeRef(broker(), broker()->CanonicalPersistentHandle(
object()->code(kAcquireLoad))); object()->code(kAcquireLoad)));
} }
...@@ -4492,7 +4509,7 @@ void NativeContextRef::SerializeOnBackground() { ...@@ -4492,7 +4509,7 @@ void NativeContextRef::SerializeOnBackground() {
} }
void JSTypedArrayRef::Serialize() { void JSTypedArrayRef::Serialize() {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
// Even if the typed array object itself is no longer serialized (besides // Even if the typed array object itself is no longer serialized (besides
// the JSObject parts), the `buffer` field still is and thus we need to // the JSObject parts), the `buffer` field still is and thus we need to
// make sure to visit it. // make sure to visit it.
...@@ -4514,7 +4531,7 @@ bool JSTypedArrayRef::serialized() const { ...@@ -4514,7 +4531,7 @@ bool JSTypedArrayRef::serialized() const {
} }
bool JSTypedArrayRef::ShouldHaveBeenSerialized() const { bool JSTypedArrayRef::ShouldHaveBeenSerialized() const {
if (FLAG_turbo_direct_heap_access) return false; if (broker()->is_concurrent_inlining()) return false;
return ObjectRef::ShouldHaveBeenSerialized(); return ObjectRef::ShouldHaveBeenSerialized();
} }
......
...@@ -67,8 +67,8 @@ enum class OddballType : uint8_t { ...@@ -67,8 +67,8 @@ enum class OddballType : uint8_t {
// too. For example, it CANNOT contain FixedArrayBase if it doesn't contain // too. For example, it CANNOT contain FixedArrayBase if it doesn't contain
// FixedDoubleArray, BytecodeArray and FixedArray. // FixedDoubleArray, BytecodeArray and FixedArray.
// DO NOT VIOLATE THESE TWO PROPERTIES! // DO NOT VIOLATE THESE TWO PROPERTIES!
// Classes on this list will skip serialization when // Classes on this list will skip serialization when --concurrent-inlining is
// FLAG_turbo_direct_heap_access is on. Otherwise, they might get serialized. // on. Otherwise, they might get serialized.
#define HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(V) \ #define HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(V) \
/* Subtypes of FixedArray */ \ /* Subtypes of FixedArray */ \
V(ObjectBoilerplateDescription) \ V(ObjectBoilerplateDescription) \
......
...@@ -57,7 +57,6 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone, ...@@ -57,7 +57,6 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
// immediately with a larger-capacity one. It doesn't seem to affect the // immediately with a larger-capacity one. It doesn't seem to affect the
// performance in a noticeable way though. // performance in a noticeable way though.
TRACE(this, "Constructing heap broker"); TRACE(this, "Constructing heap broker");
DCHECK_IMPLIES(is_concurrent_inlining_, FLAG_turbo_direct_heap_access);
} }
JSHeapBroker::~JSHeapBroker() { DCHECK_NULL(local_isolate_); } JSHeapBroker::~JSHeapBroker() { DCHECK_NULL(local_isolate_); }
...@@ -99,7 +98,7 @@ void JSHeapBroker::AttachLocalIsolate(OptimizedCompilationInfo* info, ...@@ -99,7 +98,7 @@ void JSHeapBroker::AttachLocalIsolate(OptimizedCompilationInfo* info,
local_isolate_->heap()->AttachPersistentHandles( local_isolate_->heap()->AttachPersistentHandles(
info->DetachPersistentHandles()); info->DetachPersistentHandles());
if (FLAG_turbo_direct_heap_access) { if (is_concurrent_inlining()) {
// Ensure any serialization that happens on the background has been // Ensure any serialization that happens on the background has been
// performed. // performed.
target_native_context().SerializeOnBackground(); target_native_context().SerializeOnBackground();
......
...@@ -139,8 +139,7 @@ class PipelineData { ...@@ -139,8 +139,7 @@ class PipelineData {
// For main entry point. // For main entry point.
PipelineData(ZoneStats* zone_stats, Isolate* isolate, PipelineData(ZoneStats* zone_stats, Isolate* isolate,
OptimizedCompilationInfo* info, OptimizedCompilationInfo* info,
PipelineStatistics* pipeline_statistics, PipelineStatistics* pipeline_statistics)
bool is_concurrent_inlining)
: isolate_(isolate), : isolate_(isolate),
allocator_(isolate->allocator()), allocator_(isolate->allocator()),
info_(info), info_(info),
...@@ -157,9 +156,9 @@ class PipelineData { ...@@ -157,9 +156,9 @@ class PipelineData {
instruction_zone_(instruction_zone_scope_.zone()), instruction_zone_(instruction_zone_scope_.zone()),
codegen_zone_scope_(zone_stats_, kCodegenZoneName), codegen_zone_scope_(zone_stats_, kCodegenZoneName),
codegen_zone_(codegen_zone_scope_.zone()), codegen_zone_(codegen_zone_scope_.zone()),
broker_(new JSHeapBroker(isolate_, info_->zone(), broker_(new JSHeapBroker(
info_->trace_heap_broker(), isolate_, info_->zone(), info_->trace_heap_broker(),
is_concurrent_inlining, info->code_kind())), info_->concurrent_inlining(), info->code_kind())),
register_allocation_zone_scope_(zone_stats_, register_allocation_zone_scope_(zone_stats_,
kRegisterAllocationZoneName), kRegisterAllocationZoneName),
register_allocation_zone_(register_allocation_zone_scope_.zone()), register_allocation_zone_(register_allocation_zone_scope_.zone()),
...@@ -1105,15 +1104,6 @@ class PipelineCompilationJob final : public OptimizedCompilationJob { ...@@ -1105,15 +1104,6 @@ class PipelineCompilationJob final : public OptimizedCompilationJob {
Linkage* linkage_; Linkage* linkage_;
}; };
namespace {
bool ShouldUseConcurrentInlining(CodeKind code_kind, bool is_osr) {
if (is_osr) return false;
return code_kind == CodeKind::TURBOPROP || FLAG_concurrent_inlining;
}
} // namespace
PipelineCompilationJob::PipelineCompilationJob( PipelineCompilationJob::PipelineCompilationJob(
Isolate* isolate, Handle<SharedFunctionInfo> shared_info, Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
Handle<JSFunction> function, BytecodeOffset osr_offset, Handle<JSFunction> function, BytecodeOffset osr_offset,
...@@ -1126,17 +1116,14 @@ PipelineCompilationJob::PipelineCompilationJob( ...@@ -1126,17 +1116,14 @@ PipelineCompilationJob::PipelineCompilationJob(
kPipelineCompilationJobZoneName), kPipelineCompilationJobZoneName),
zone_stats_(function->GetIsolate()->allocator()), zone_stats_(function->GetIsolate()->allocator()),
compilation_info_(&zone_, function->GetIsolate(), shared_info, function, compilation_info_(&zone_, function->GetIsolate(), shared_info, function,
code_kind), code_kind, osr_offset, osr_frame),
pipeline_statistics_(CreatePipelineStatistics( pipeline_statistics_(CreatePipelineStatistics(
handle(Script::cast(shared_info->script()), isolate), handle(Script::cast(shared_info->script()), isolate),
compilation_info(), function->GetIsolate(), &zone_stats_)), compilation_info(), function->GetIsolate(), &zone_stats_)),
data_(&zone_stats_, function->GetIsolate(), compilation_info(), data_(&zone_stats_, function->GetIsolate(), compilation_info(),
pipeline_statistics_.get(), pipeline_statistics_.get()),
ShouldUseConcurrentInlining(code_kind, !osr_offset.IsNone())),
pipeline_(&data_), pipeline_(&data_),
linkage_(nullptr) { linkage_(nullptr) {}
compilation_info_.SetOptimizingForOsr(osr_offset, osr_frame);
}
PipelineCompilationJob::~PipelineCompilationJob() = default; PipelineCompilationJob::~PipelineCompilationJob() = default;
...@@ -1229,7 +1216,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl( ...@@ -1229,7 +1216,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
} }
} }
if (FLAG_turbo_direct_heap_access) { if (compilation_info()->concurrent_inlining()) {
isolate->heap()->PublishPendingAllocations(); isolate->heap()->PublishPendingAllocations();
} }
...@@ -1376,7 +1363,8 @@ struct InliningPhase { ...@@ -1376,7 +1363,8 @@ struct InliningPhase {
data->broker(), data->jsgraph()->Dead(), data->broker(), data->jsgraph()->Dead(),
data->observe_node_manager()); data->observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone); data->common(), temp_zone,
info->concurrent_inlining());
CheckpointElimination checkpoint_elimination(&graph_reducer); CheckpointElimination checkpoint_elimination(&graph_reducer);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(), CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->broker(), data->common(), data->broker(), data->common(),
...@@ -1446,7 +1434,8 @@ struct WasmInliningPhase { ...@@ -1446,7 +1434,8 @@ struct WasmInliningPhase {
GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(), GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
data->broker(), data->jsgraph()->Dead()); data->broker(), data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone); data->common(), temp_zone,
info->concurrent_inlining());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(), CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->broker(), data->common(), data->broker(), data->common(),
data->machine(), temp_zone); data->machine(), temp_zone);
...@@ -1579,8 +1568,9 @@ struct TypedLoweringPhase { ...@@ -1579,8 +1568,9 @@ struct TypedLoweringPhase {
GraphReducer graph_reducer( GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(), temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager()); data->jsgraph()->Dead(), data->observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), DeadCodeElimination dead_code_elimination(
data->common(), temp_zone); &graph_reducer, data->graph(), data->common(), temp_zone,
data->info()->concurrent_inlining());
JSCreateLowering create_lowering(&graph_reducer, data->dependencies(), JSCreateLowering create_lowering(&graph_reducer, data->dependencies(),
data->jsgraph(), data->broker(), data->jsgraph(), data->broker(),
temp_zone); temp_zone);
...@@ -1766,8 +1756,9 @@ struct EarlyOptimizationPhase { ...@@ -1766,8 +1756,9 @@ struct EarlyOptimizationPhase {
GraphReducer graph_reducer( GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(), temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager()); data->jsgraph()->Dead(), data->observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), DeadCodeElimination dead_code_elimination(
data->common(), temp_zone); &graph_reducer, data->graph(), data->common(), temp_zone,
data->info()->concurrent_inlining());
SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(), SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
data->broker()); data->broker());
RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone); RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
...@@ -1848,8 +1839,9 @@ struct EffectControlLinearizationPhase { ...@@ -1848,8 +1839,9 @@ struct EffectControlLinearizationPhase {
&data->info()->tick_counter(), data->broker(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->jsgraph()->Dead(),
data->observe_node_manager()); data->observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), DeadCodeElimination dead_code_elimination(
data->common(), temp_zone); &graph_reducer, data->graph(), data->common(), temp_zone,
data->info()->concurrent_inlining());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(), CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->broker(), data->common(), data->broker(), data->common(),
data->machine(), temp_zone); data->machine(), temp_zone);
...@@ -1887,8 +1879,9 @@ struct LoadEliminationPhase { ...@@ -1887,8 +1879,9 @@ struct LoadEliminationPhase {
BranchElimination branch_condition_elimination(&graph_reducer, BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone, data->jsgraph(), temp_zone,
BranchElimination::kEARLY); BranchElimination::kEARLY);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), DeadCodeElimination dead_code_elimination(
data->common(), temp_zone); &graph_reducer, data->graph(), data->common(), temp_zone,
data->info()->concurrent_inlining());
RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone); RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
LoadElimination load_elimination(&graph_reducer, data->jsgraph(), LoadElimination load_elimination(&graph_reducer, data->jsgraph(),
temp_zone); temp_zone);
...@@ -1955,8 +1948,9 @@ struct LateOptimizationPhase { ...@@ -1955,8 +1948,9 @@ struct LateOptimizationPhase {
data->jsgraph()->Dead(), data->observe_node_manager()); data->jsgraph()->Dead(), data->observe_node_manager());
BranchElimination branch_condition_elimination(&graph_reducer, BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone); data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), DeadCodeElimination dead_code_elimination(
data->common(), temp_zone); &graph_reducer, data->graph(), data->common(), temp_zone,
data->info()->concurrent_inlining());
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone()); ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph()); MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(), CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
...@@ -2070,8 +2064,9 @@ struct CsaEarlyOptimizationPhase { ...@@ -2070,8 +2064,9 @@ struct CsaEarlyOptimizationPhase {
allow_signalling_nan); allow_signalling_nan);
BranchElimination branch_condition_elimination(&graph_reducer, BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone); data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), DeadCodeElimination dead_code_elimination(
data->common(), temp_zone); &graph_reducer, data->graph(), data->common(), temp_zone,
data->info()->concurrent_inlining());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(), CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->broker(), data->common(), data->broker(), data->common(),
data->machine(), temp_zone); data->machine(), temp_zone);
...@@ -2097,8 +2092,9 @@ struct CsaOptimizationPhase { ...@@ -2097,8 +2092,9 @@ struct CsaOptimizationPhase {
data->jsgraph()->Dead(), data->observe_node_manager()); data->jsgraph()->Dead(), data->observe_node_manager());
BranchElimination branch_condition_elimination(&graph_reducer, BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone); data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(), DeadCodeElimination dead_code_elimination(
data->common(), temp_zone); &graph_reducer, data->graph(), data->common(), temp_zone,
data->info()->concurrent_inlining());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(), MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
allow_signalling_nan); allow_signalling_nan);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(), CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
...@@ -3308,8 +3304,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting( ...@@ -3308,8 +3304,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
CreatePipelineStatistics(Handle<Script>::null(), info, isolate, CreatePipelineStatistics(Handle<Script>::null(), info, isolate,
&zone_stats)); &zone_stats));
PipelineData data(&zone_stats, isolate, info, pipeline_statistics.get(), PipelineData data(&zone_stats, isolate, info, pipeline_statistics.get());
i::FLAG_concurrent_inlining);
PipelineImpl pipeline(&data); PipelineImpl pipeline(&data);
Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info)); Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
......
...@@ -1535,7 +1535,7 @@ void SerializerForBackgroundCompilation::VisitLdaConstant( ...@@ -1535,7 +1535,7 @@ void SerializerForBackgroundCompilation::VisitLdaConstant(
iterator->GetConstantForIndexOperand(0, broker()->isolate()); iterator->GetConstantForIndexOperand(0, broker()->isolate());
// TODO(v8:7790): FixedArrays still need to be serialized until they are // TODO(v8:7790): FixedArrays still need to be serialized until they are
// moved to kNeverSerialized. // moved to kNeverSerialized.
if (!FLAG_turbo_direct_heap_access || constant->IsFixedArray()) { if (!broker()->is_concurrent_inlining() || constant->IsFixedArray()) {
ObjectRef(broker(), constant); ObjectRef(broker(), constant);
} }
environment()->accumulator_hints() = Hints::SingleConstant(constant, zone()); environment()->accumulator_hints() = Hints::SingleConstant(constant, zone());
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "src/compiler/simplified-operator-reducer.h" #include "src/compiler/simplified-operator-reducer.h"
#include "src/compiler/js-graph.h" #include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/machine-operator.h" #include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h" #include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h" #include "src/compiler/operator-properties.h"
...@@ -41,7 +42,7 @@ SimplifiedOperatorReducer::~SimplifiedOperatorReducer() = default; ...@@ -41,7 +42,7 @@ SimplifiedOperatorReducer::~SimplifiedOperatorReducer() = default;
Reduction SimplifiedOperatorReducer::Reduce(Node* node) { Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access); DisallowHeapAccessIf no_heap_access(!broker()->is_concurrent_inlining());
switch (node->opcode()) { switch (node->opcode()) {
case IrOpcode::kBooleanNot: { case IrOpcode::kBooleanNot: {
HeapObjectMatcher m(node->InputAt(0)); HeapObjectMatcher m(node->InputAt(0));
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include "src/compiler/type-narrowing-reducer.h" #include "src/compiler/type-narrowing-reducer.h"
#include "src/compiler/js-graph.h" #include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -12,12 +13,15 @@ namespace compiler { ...@@ -12,12 +13,15 @@ namespace compiler {
TypeNarrowingReducer::TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph, TypeNarrowingReducer::TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph,
JSHeapBroker* broker) JSHeapBroker* broker)
: AdvancedReducer(editor), jsgraph_(jsgraph), op_typer_(broker, zone()) {} : AdvancedReducer(editor),
jsgraph_(jsgraph),
broker_(broker),
op_typer_(broker, zone()) {}
TypeNarrowingReducer::~TypeNarrowingReducer() = default; TypeNarrowingReducer::~TypeNarrowingReducer() = default;
Reduction TypeNarrowingReducer::Reduce(Node* node) { Reduction TypeNarrowingReducer::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access); DisallowHeapAccessIf no_heap_access(!broker_->is_concurrent_inlining());
Type new_type = Type::Any(); Type new_type = Type::Any();
......
...@@ -34,6 +34,7 @@ class V8_EXPORT_PRIVATE TypeNarrowingReducer final ...@@ -34,6 +34,7 @@ class V8_EXPORT_PRIVATE TypeNarrowingReducer final
Zone* zone() const; Zone* zone() const;
JSGraph* const jsgraph_; JSGraph* const jsgraph_;
const JSHeapBroker* const broker_;
OperationTyper op_typer_; OperationTyper op_typer_;
}; };
......
...@@ -34,7 +34,7 @@ TypedOptimization::TypedOptimization(Editor* editor, ...@@ -34,7 +34,7 @@ TypedOptimization::TypedOptimization(Editor* editor,
TypedOptimization::~TypedOptimization() = default; TypedOptimization::~TypedOptimization() = default;
Reduction TypedOptimization::Reduce(Node* node) { Reduction TypedOptimization::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access); DisallowHeapAccessIf no_heap_access(!broker()->is_concurrent_inlining());
switch (node->opcode()) { switch (node->opcode()) {
case IrOpcode::kConvertReceiver: case IrOpcode::kConvertReceiver:
return ReduceConvertReceiver(node); return ReduceConvertReceiver(node);
......
...@@ -589,7 +589,6 @@ DEFINE_BOOL(trace_generalization, false, "trace map generalization") ...@@ -589,7 +589,6 @@ DEFINE_BOOL(trace_generalization, false, "trace map generalization")
// Flags for TurboProp. // Flags for TurboProp.
DEFINE_BOOL(turboprop, false, "enable experimental turboprop mid-tier compiler") DEFINE_BOOL(turboprop, false, "enable experimental turboprop mid-tier compiler")
DEFINE_IMPLICATION(turboprop, turbo_direct_heap_access)
DEFINE_BOOL(turboprop_mid_tier_reg_alloc, true, DEFINE_BOOL(turboprop_mid_tier_reg_alloc, true,
"enable mid-tier register allocator for turboprop") "enable mid-tier register allocator for turboprop")
DEFINE_BOOL( DEFINE_BOOL(
...@@ -648,13 +647,10 @@ DEFINE_BOOL(concurrent_inlining, false, ...@@ -648,13 +647,10 @@ DEFINE_BOOL(concurrent_inlining, false,
"run optimizing compiler's inlining phase on a separate thread") "run optimizing compiler's inlining phase on a separate thread")
DEFINE_BOOL(stress_concurrent_inlining, false, DEFINE_BOOL(stress_concurrent_inlining, false,
"makes concurrent inlining more likely to trigger in tests") "makes concurrent inlining more likely to trigger in tests")
DEFINE_BOOL(turbo_direct_heap_access, false,
"access kNeverSerialized objects directly from the heap")
DEFINE_IMPLICATION(stress_concurrent_inlining, concurrent_inlining) DEFINE_IMPLICATION(stress_concurrent_inlining, concurrent_inlining)
DEFINE_NEG_IMPLICATION(stress_concurrent_inlining, lazy_feedback_allocation) DEFINE_NEG_IMPLICATION(stress_concurrent_inlining, lazy_feedback_allocation)
DEFINE_WEAK_VALUE_IMPLICATION(stress_concurrent_inlining, interrupt_budget, DEFINE_WEAK_VALUE_IMPLICATION(stress_concurrent_inlining, interrupt_budget,
15 * KB) 15 * KB)
DEFINE_IMPLICATION(concurrent_inlining, turbo_direct_heap_access)
DEFINE_BOOL( DEFINE_BOOL(
turbo_concurrent_get_property_access_info, false, turbo_concurrent_get_property_access_info, false,
"concurrently call GetPropertyAccessInfo (only with --concurrent-inlining)") "concurrently call GetPropertyAccessInfo (only with --concurrent-inlining)")
......
...@@ -88,8 +88,6 @@ class BackgroundCompilationThread final : public v8::base::Thread { ...@@ -88,8 +88,6 @@ class BackgroundCompilationThread final : public v8::base::Thread {
TEST(TestConcurrentSharedFunctionInfo) { TEST(TestConcurrentSharedFunctionInfo) {
FlagScope<bool> allow_natives_syntax(&i::FLAG_allow_natives_syntax, true); FlagScope<bool> allow_natives_syntax(&i::FLAG_allow_natives_syntax, true);
FlagScope<bool> concurrent_inlining(&i::FLAG_concurrent_inlining, true); FlagScope<bool> concurrent_inlining(&i::FLAG_concurrent_inlining, true);
FlagScope<bool> turbo_direct_heap_access(&i::FLAG_turbo_direct_heap_access,
true);
HandleAndZoneScope scope; HandleAndZoneScope scope;
Isolate* isolate = scope.main_isolate(); Isolate* isolate = scope.main_isolate();
......
...@@ -1269,7 +1269,6 @@ TEST(Regress10774) { ...@@ -1269,7 +1269,6 @@ TEST(Regress10774) {
i::FLAG_allow_natives_syntax = true; i::FLAG_allow_natives_syntax = true;
i::FLAG_turboprop = true; i::FLAG_turboprop = true;
i::FLAG_turbo_dynamic_map_checks = true; i::FLAG_turbo_dynamic_map_checks = true;
i::FLAG_turbo_direct_heap_access = true;
#ifdef VERIFY_HEAP #ifdef VERIFY_HEAP
i::FLAG_verify_heap = true; i::FLAG_verify_heap = true;
#endif #endif
......
...@@ -28,7 +28,8 @@ ...@@ -28,7 +28,8 @@
// Flags: --allow-natives-syntax // Flags: --allow-natives-syntax
// Flags: --concurrent-recompilation --block-concurrent-recompilation // Flags: --concurrent-recompilation --block-concurrent-recompilation
// Flags: --nostress-opt --no-always-opt // Flags: --nostress-opt --no-always-opt
// Flags: --no-turbo-direct-heap-access // Flags: --no-turboprop
// Flags: --no-concurrent-inlining
// Flags: --no-turbo-concurrent-get-property-access-info // Flags: --no-turbo-concurrent-get-property-access-info
// --nostress-opt is in place because this particular optimization // --nostress-opt is in place because this particular optimization
......
...@@ -24,7 +24,8 @@ class DeadCodeEliminationTest : public GraphTest { ...@@ -24,7 +24,8 @@ class DeadCodeEliminationTest : public GraphTest {
protected: protected:
Reduction Reduce(AdvancedReducer::Editor* editor, Node* node) { Reduction Reduce(AdvancedReducer::Editor* editor, Node* node) {
DeadCodeElimination reducer(editor, graph(), common(), zone()); DeadCodeElimination reducer(editor, graph(), common(), zone(),
FLAG_concurrent_inlining);
return reducer.Reduce(node); return reducer.Reduce(node);
} }
......
...@@ -59,9 +59,8 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = { ...@@ -59,9 +59,8 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = {
"slow_path": ["--no-force-slow-path"], "slow_path": ["--no-force-slow-path"],
"stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"], "stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"],
"stress_concurrent_inlining": ["--single-threaded", "--predictable", "stress_concurrent_inlining": ["--single-threaded", "--predictable",
"--no-turbo-direct-heap-access"], "--no-concurrent-inlining"],
"stress_incremental_marking": ["--no-stress-incremental-marking"], "stress_incremental_marking": ["--no-stress-incremental-marking"],
"future": ["--no-turbo-direct-heap-access"],
"stress_js_bg_compile_wasm_code_gc": ["--no-stress-background-compile"], "stress_js_bg_compile_wasm_code_gc": ["--no-stress-background-compile"],
"stress": ["--no-stress-opt", "--always-opt", "--no-always-opt", "--liftoff", "stress": ["--no-stress-opt", "--always-opt", "--no-always-opt", "--liftoff",
"--max-inlined-bytecode-size=*", "--max-inlined-bytecode-size=*",
...@@ -69,10 +68,8 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = { ...@@ -69,10 +68,8 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = {
"--wasm-generic-wrapper"], "--wasm-generic-wrapper"],
"sparkplug": ["--jitless", "--no-sparkplug" ], "sparkplug": ["--jitless", "--no-sparkplug" ],
"always_sparkplug": ["--jitless", "--no-sparkplug", "--no-always-sparkplug"], "always_sparkplug": ["--jitless", "--no-sparkplug", "--no-always-sparkplug"],
"turboprop": ["--interrupt-budget=*", "--no-turbo-direct-heap-access", "turboprop": ["--interrupt-budget=*", "--no-turboprop"],
"--no-turboprop"], "turboprop_as_toptier": ["--interrupt-budget=*", "--no-turboprop",
"turboprop_as_toptier": ["--interrupt-budget=*",
"--no-turbo-direct-heap-access", "--no-turboprop",
"--no-turboprop-as-toptier"], "--no-turboprop-as-toptier"],
"code_serializer": ["--cache=after-execute", "--cache=full-code-cache", "code_serializer": ["--cache=after-execute", "--cache=full-code-cache",
"--cache=none"], "--cache=none"],
...@@ -109,9 +106,9 @@ INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG = { ...@@ -109,9 +106,9 @@ INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG = {
"--no-enable-sse4-1": ["--enable-sse4-1"], "--no-enable-sse4-1": ["--enable-sse4-1"],
"--optimize-for-size": ["--max-semi-space-size=*"], "--optimize-for-size": ["--max-semi-space-size=*"],
"--stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"], "--stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"],
"--stress_concurrent_inlining": ["--single-threaded", "--predictable"], "--stress-concurrent-inlining":
INCOMPATIBLE_FLAGS_PER_VARIANT["stress_concurrent_inlining"],
"--stress-flush-bytecode": ["--no-stress-flush-bytecode"], "--stress-flush-bytecode": ["--no-stress-flush-bytecode"],
"--future": ["--no-turbo-direct-heap-access"],
"--stress-incremental-marking": INCOMPATIBLE_FLAGS_PER_VARIANT["stress_incremental_marking"], "--stress-incremental-marking": INCOMPATIBLE_FLAGS_PER_VARIANT["stress_incremental_marking"],
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment