Commit 4f2f14f8 authored by Jakob Gruber's avatar Jakob Gruber Committed by Commit Bot

[compiler] Remove --turbo-direct-heap-access

On a per-job basis, --turbo-direct-heap-access should be equal to
whether concurrent inlining is enabled. We simplify involved logic by
removing the flag, and replacing all access to

- FLAG_turbo_direct_heap_access, and
- FLAG_concurrent_inlining

inside compiler/ with
OptimizedCompilationInfo::is_concurrent_inlining() (or derived values).

Bug: v8:7790
Change-Id: I64818e0e1004dded08c784ef1c4bdfd2af990a59
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2843345
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Auto-Submit: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74166}
parent 0bc71bc9
......@@ -23,12 +23,16 @@ namespace internal {
OptimizedCompilationInfo::OptimizedCompilationInfo(
Zone* zone, Isolate* isolate, Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure, CodeKind code_kind)
Handle<JSFunction> closure, CodeKind code_kind, BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame)
: code_kind_(code_kind),
osr_offset_(osr_offset),
osr_frame_(osr_frame),
zone_(zone),
optimization_id_(isolate->NextOptimizationId()) {
DCHECK_EQ(*shared, closure->shared());
DCHECK(shared->is_compiled());
DCHECK_IMPLIES(is_osr(), IsOptimizing());
bytecode_array_ = handle(shared->GetBytecodeArray(isolate), isolate);
shared_info_ = shared;
closure_ = closure;
......@@ -86,6 +90,10 @@ void OptimizedCompilationInfo::ConfigureFlags() {
if (FLAG_untrusted_code_mitigations) set_untrusted_code_mitigations();
if (FLAG_turbo_inline_js_wasm_calls) set_inline_js_wasm_calls();
if (!is_osr() && (IsTurboprop() || FLAG_concurrent_inlining)) {
set_concurrent_inlining();
}
switch (code_kind_) {
case CodeKind::TURBOFAN:
if (FLAG_function_context_specialization) {
......
......@@ -104,7 +104,15 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// Construct a compilation info for optimized compilation.
OptimizedCompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure, CodeKind code_kind);
Handle<JSFunction> closure, CodeKind code_kind,
BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame);
// For testing.
OptimizedCompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure, CodeKind code_kind)
: OptimizedCompilationInfo(zone, isolate, shared, closure, code_kind,
BytecodeOffset::None(), nullptr) {}
// Construct a compilation info for stub compilation, Wasm, and testing.
OptimizedCompilationInfo(Vector<const char> debug_name, Zone* zone,
CodeKind code_kind);
......@@ -167,13 +175,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
bool IsWasm() const { return code_kind() == CodeKind::WASM_FUNCTION; }
#endif // V8_ENABLE_WEBASSEMBLY
void SetOptimizingForOsr(BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame) {
DCHECK(IsOptimizing());
osr_offset_ = osr_offset;
osr_frame_ = osr_frame;
}
void set_persistent_handles(
std::unique_ptr<PersistentHandles> persistent_handles) {
DCHECK_NULL(ph_);
......@@ -292,7 +293,9 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
#endif // V8_ENABLE_WEBASSEMBLY
// Entry point when compiling for OSR, {BytecodeOffset::None} otherwise.
BytecodeOffset osr_offset_ = BytecodeOffset::None();
const BytecodeOffset osr_offset_ = BytecodeOffset::None();
// The current OSR frame for specialization or {nullptr}.
JavaScriptFrame* const osr_frame_ = nullptr;
// The zone from which the compilation pipeline working on this
// OptimizedCompilationInfo allocates.
......@@ -308,9 +311,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
const int optimization_id_;
unsigned inlined_bytecode_size_ = 0;
// The current OSR frame for specialization or {nullptr}.
JavaScriptFrame* osr_frame_ = nullptr;
Vector<const char> debug_name_;
std::unique_ptr<char[]> trace_turbo_filename_;
......
......@@ -1570,7 +1570,7 @@ void BytecodeGraphBuilder::VisitBytecodes() {
}
// TODO(leszeks): Increment usage counter on BG thread.
if (!FLAG_concurrent_inlining && has_one_shot_bytecode) {
if (!broker()->is_concurrent_inlining() && has_one_shot_bytecode) {
// (For concurrent inlining this is done in the serializer instead.)
isolate()->CountUsage(
v8::Isolate::UseCounterFeature::kOptimizedFunctionWithOneShotBytecode);
......
......@@ -8,10 +8,11 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
namespace v8 {
namespace internal {
......@@ -55,7 +56,8 @@ CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
}
Reduction CommonOperatorReducer::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
DisallowHeapAccessIf no_heap_access(broker() == nullptr ||
!broker()->is_concurrent_inlining());
switch (node->opcode()) {
case IrOpcode::kBranch:
return ReduceBranch(node);
......
......@@ -5,6 +5,7 @@
#include "src/compiler/constant-folding-reducer.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
#include "src/objects/objects-inl.h"
namespace v8 {
......@@ -63,7 +64,7 @@ ConstantFoldingReducer::ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph,
ConstantFoldingReducer::~ConstantFoldingReducer() = default;
Reduction ConstantFoldingReducer::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
DisallowHeapAccessIf no_heap_access(!broker()->is_concurrent_inlining());
if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) &&
node->op()->HasProperty(Operator::kEliminatable) &&
node->opcode() != IrOpcode::kFinishRegion) {
......
......@@ -16,12 +16,14 @@ namespace compiler {
DeadCodeElimination::DeadCodeElimination(Editor* editor, Graph* graph,
CommonOperatorBuilder* common,
Zone* temp_zone)
Zone* temp_zone,
bool is_concurrent_inlining)
: AdvancedReducer(editor),
graph_(graph),
common_(common),
dead_(graph->NewNode(common->Dead())),
zone_(temp_zone) {
zone_(temp_zone),
is_concurrent_inlining_(is_concurrent_inlining) {
NodeProperties::SetType(dead_, Type::None());
}
......@@ -46,7 +48,7 @@ Node* FindDeadInput(Node* node) {
} // namespace
Reduction DeadCodeElimination::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
DisallowHeapAccessIf no_heap_access(!is_concurrent_inlining_);
switch (node->opcode()) {
case IrOpcode::kEnd:
return ReduceEnd(node);
......
......@@ -40,7 +40,8 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
DeadCodeElimination(Editor* editor, Graph* graph,
CommonOperatorBuilder* common, Zone* temp_zone);
CommonOperatorBuilder* common, Zone* temp_zone,
bool is_concurrent_inlining);
~DeadCodeElimination() final = default;
DeadCodeElimination(const DeadCodeElimination&) = delete;
DeadCodeElimination& operator=(const DeadCodeElimination&) = delete;
......@@ -78,6 +79,8 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
CommonOperatorBuilder* const common_;
Node* const dead_;
Zone* zone_;
const bool is_concurrent_inlining_;
};
} // namespace compiler
......
......@@ -112,7 +112,7 @@ Reduction GraphReducer::Reduce(Node* const node) {
if (FLAG_trace_turbo_reduction) {
UnparkedScopeIfNeeded unparked(broker_);
// TODO(neis): Disallow racy handle dereference once we stop
// supporting --no-local-heaps --no-turbo-direct-heap-access.
// supporting --no-local-heaps --no-concurrent-inlining.
AllowHandleDereference allow_deref;
StdoutStream{} << "- In-place update of #" << *node << " by reducer "
<< (*i)->reducer_name() << std::endl;
......@@ -125,7 +125,7 @@ Reduction GraphReducer::Reduce(Node* const node) {
if (FLAG_trace_turbo_reduction) {
UnparkedScopeIfNeeded unparked(broker_);
// TODO(neis): Disallow racy handle dereference once we stop
// supporting --no-local-heaps --no-turbo-direct-heap-access.
// supporting --no-local-heaps --no-concurrent-inlining.
AllowHandleDereference allow_deref;
StdoutStream{} << "- Replacement of #" << *node << " with #"
<< *(reduction.replacement()) << " by reducer "
......
......@@ -56,7 +56,7 @@ HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
// mutable) HeapObject and the data is an instance of ObjectData. Its handle
// must be persistent so that the GC can update it at a safepoint. Via this
// handle, the object can be accessed concurrently to the main thread. To be
// used the flag --turbo-direct-heap-access must be on.
// used the flag --concurrent-inlining must be on.
//
// kUnserializedReadOnlyHeapObject: The underlying V8 object is a read-only
// HeapObject and the data is an instance of ObjectData. For
......@@ -100,7 +100,13 @@ class ObjectData : public ZoneObject {
public:
ObjectData(JSHeapBroker* broker, ObjectData** storage, Handle<Object> object,
ObjectDataKind kind)
: object_(object), kind_(kind) {
: object_(object),
kind_(kind)
#ifdef DEBUG
,
broker_(broker)
#endif // DEBUG
{
// This assignment ensures we don't end up inserting the same object
// in an endless recursion.
*storage = this;
......@@ -154,11 +160,16 @@ class ObjectData : public ZoneObject {
#ifdef DEBUG
enum class Usage{kUnused, kOnlyIdentityUsed, kDataUsed};
mutable Usage used_status = Usage::kUnused;
JSHeapBroker* broker() const { return broker_; }
#endif // DEBUG
private:
Handle<Object> const object_;
ObjectDataKind const kind_;
#ifdef DEBUG
JSHeapBroker* const broker_; // For DCHECKs.
#endif // DEBUG
};
class HeapObjectData : public ObjectData {
......@@ -255,7 +266,7 @@ FunctionTemplateInfoData::FunctionTemplateInfoData(
c_function_(v8::ToCData<Address>(object->GetCFunction())),
c_signature_(v8::ToCData<CFunctionInfo*>(object->GetCSignature())),
known_receivers_(broker->zone()) {
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker->is_concurrent_inlining());
auto function_template_info = Handle<FunctionTemplateInfo>::cast(object);
is_signature_undefined_ =
function_template_info->signature().IsUndefined(broker->isolate());
......@@ -271,7 +282,7 @@ CallHandlerInfoData::CallHandlerInfoData(JSHeapBroker* broker,
Handle<CallHandlerInfo> object)
: HeapObjectData(broker, storage, object),
callback_(v8::ToCData<Address>(object->callback())) {
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker->is_concurrent_inlining());
}
PropertyCellData::PropertyCellData(JSHeapBroker* broker, ObjectData** storage,
......@@ -603,7 +614,7 @@ class ArrayBoilerplateDescriptionData : public HeapObjectData {
Handle<ArrayBoilerplateDescription> object)
: HeapObjectData(broker, storage, object),
constants_elements_length_(object->constant_elements().length()) {
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker->is_concurrent_inlining());
}
int constants_elements_length() const { return constants_elements_length_; }
......@@ -617,7 +628,7 @@ class ObjectBoilerplateDescriptionData : public HeapObjectData {
ObjectBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage,
Handle<ObjectBoilerplateDescription> object)
: HeapObjectData(broker, storage, object), size_(object->size()) {
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker->is_concurrent_inlining());
}
int size() const { return size_; }
......@@ -646,15 +657,15 @@ class JSBoundFunctionData : public JSObjectData {
bool serialized() const { return serialized_; }
ObjectData* bound_target_function() const {
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker()->is_concurrent_inlining());
return bound_target_function_;
}
ObjectData* bound_this() const {
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker()->is_concurrent_inlining());
return bound_this_;
}
ObjectData* bound_arguments() const {
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker()->is_concurrent_inlining());
return bound_arguments_;
}
......@@ -701,7 +712,7 @@ class JSFunctionData : public JSObjectData {
}
ObjectData* code() const {
DCHECK(serialized_code_and_feedback());
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker()->is_concurrent_inlining());
return code_;
}
int initial_map_instance_size_with_min_slack() const {
......@@ -867,7 +878,7 @@ class NameData : public HeapObjectData {
public:
NameData(JSHeapBroker* broker, ObjectData** storage, Handle<Name> object)
: HeapObjectData(broker, storage, object) {
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker->is_concurrent_inlining());
}
};
......@@ -902,7 +913,7 @@ class SymbolData : public NameData {
public:
SymbolData(JSHeapBroker* broker, ObjectData** storage, Handle<Symbol> object)
: NameData(broker, storage, object) {
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker->is_concurrent_inlining());
}
};
......@@ -915,7 +926,7 @@ StringData::StringData(JSHeapBroker* broker, ObjectData** storage,
is_external_string_(object->IsExternalString()),
is_seq_string_(object->IsSeqString()),
chars_as_strings_(broker->zone()) {
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker->is_concurrent_inlining());
}
class InternalizedStringData : public StringData {
......@@ -923,7 +934,7 @@ class InternalizedStringData : public StringData {
InternalizedStringData(JSHeapBroker* broker, ObjectData** storage,
Handle<InternalizedString> object)
: StringData(broker, storage, object) {
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker->is_concurrent_inlining());
}
};
......@@ -1227,7 +1238,7 @@ class MapData : public HeapObjectData {
AccessorInfoData::AccessorInfoData(JSHeapBroker* broker, ObjectData** storage,
Handle<AccessorInfo> object)
: HeapObjectData(broker, storage, object) {
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker->is_concurrent_inlining());
}
AllocationSiteData::AllocationSiteData(JSHeapBroker* broker,
......@@ -1269,6 +1280,11 @@ void AllocationSiteData::SerializeBoilerplate(JSHeapBroker* broker) {
HeapObjectData::HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<HeapObject> object, ObjectDataKind kind)
: ObjectData(broker, storage, object, kind),
// We have to use a raw cast below instead of AsMap() because of
// recursion. AsMap() would call IsMap(), which accesses the
// instance_type_ member. In the case of constructing the MapData for the
// meta map (whose map is itself), this member has not yet been
// initialized.
map_(broker->GetOrCreateData(object->synchronized_map())) {
CHECK_IMPLIES(kind == kSerializedHeapObject,
broker->mode() == JSHeapBroker::kSerializing);
......@@ -1451,7 +1467,7 @@ void JSFunctionData::SerializeCodeAndFeedback(JSHeapBroker* broker) {
DCHECK_NULL(feedback_cell_);
DCHECK_NULL(feedback_vector_);
DCHECK_NULL(code_);
if (!FLAG_turbo_direct_heap_access) {
if (!broker->is_concurrent_inlining()) {
// This is conditionalized because Code objects are never serialized now.
// We only need to represent the code object in serialized data when
// we're unable to perform direct heap accesses.
......@@ -1488,7 +1504,7 @@ class DescriptorArrayData : public HeapObjectData {
DescriptorArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<DescriptorArray> object)
: HeapObjectData(broker, storage, object), contents_(broker->zone()) {
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker->is_concurrent_inlining());
}
ObjectData* FindFieldOwner(InternalIndex descriptor_index) const {
......@@ -1580,7 +1596,7 @@ FeedbackCellData::FeedbackCellData(JSHeapBroker* broker, ObjectData** storage,
value_(object->value().IsFeedbackVector()
? broker->GetOrCreateData(object->value())
: nullptr) {
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker->is_concurrent_inlining());
}
class FeedbackVectorData : public HeapObjectData {
......@@ -1613,7 +1629,7 @@ FeedbackVectorData::FeedbackVectorData(JSHeapBroker* broker,
: HeapObjectData(broker, storage, object),
invocation_count_(object->invocation_count()),
closure_feedback_cell_array_(broker->zone()) {
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker->is_concurrent_inlining());
}
ObjectData* FeedbackVectorData::GetClosureFeedbackCell(JSHeapBroker* broker,
......@@ -1849,7 +1865,7 @@ JSArrayData::JSArrayData(JSHeapBroker* broker, ObjectData** storage,
: JSObjectData(broker, storage, object), own_elements_(broker->zone()) {}
void JSArrayData::Serialize(JSHeapBroker* broker) {
CHECK(!FLAG_turbo_direct_heap_access);
CHECK(!broker->is_concurrent_inlining());
if (serialized_) return;
serialized_ = true;
......@@ -1907,7 +1923,7 @@ ScopeInfoData::ScopeInfoData(JSHeapBroker* broker, ObjectData** storage,
has_context_extension_slot_(object->HasContextExtensionSlot()),
has_outer_scope_info_(object->HasOuterScopeInfo()),
outer_scope_info_(nullptr) {
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker->is_concurrent_inlining());
}
void ScopeInfoData::SerializeScopeInfoChain(JSHeapBroker* broker) {
......@@ -2093,7 +2109,7 @@ class CellData : public HeapObjectData {
public:
CellData(JSHeapBroker* broker, ObjectData** storage, Handle<Cell> object)
: HeapObjectData(broker, storage, object) {
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker->is_concurrent_inlining());
}
};
......@@ -2184,7 +2200,7 @@ class TemplateObjectDescriptionData : public HeapObjectData {
TemplateObjectDescriptionData(JSHeapBroker* broker, ObjectData** storage,
Handle<TemplateObjectDescription> object)
: HeapObjectData(broker, storage, object) {
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker->is_concurrent_inlining());
}
};
......@@ -2196,7 +2212,7 @@ class CodeData : public HeapObjectData {
!object->marked_for_deoptimization()
? object->inlined_bytecode_size()
: 0) {
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker->is_concurrent_inlining());
}
unsigned inlined_bytecode_size() const { return inlined_bytecode_size_; }
......@@ -2233,16 +2249,16 @@ HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_AS)
HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_AS)
#undef DEFINE_AS
// TODO(solanes, v8:10866): Remove once FLAG_turbo_direct_heap_access is
// TODO(solanes, v8:10866): Remove once broker()->is_concurrent_inlining() is
// removed.
// This macro defines the Asxxx methods for NeverSerialized objects, which
// should only be used with direct heap access off.
#define DEFINE_AS(Name) \
Name##Data* ObjectData::As##Name() { \
DCHECK(!FLAG_turbo_direct_heap_access); \
CHECK(Is##Name()); \
CHECK_EQ(kind_, kSerializedHeapObject); \
return static_cast<Name##Data*>(this); \
#define DEFINE_AS(Name) \
Name##Data* ObjectData::As##Name() { \
DCHECK(!broker()->is_concurrent_inlining()); \
CHECK(Is##Name()); \
CHECK_EQ(kind_, kSerializedHeapObject); \
return static_cast<Name##Data*>(this); \
}
HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEFINE_AS)
#undef DEFINE_AS
......@@ -2483,7 +2499,7 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
map()->AsMap()->SerializeOwnDescriptors(broker);
}
if (IsJSArray() && !FLAG_turbo_direct_heap_access) {
if (IsJSArray() && !broker->is_concurrent_inlining()) {
AsJSArray()->Serialize(broker);
}
}
......@@ -2671,14 +2687,14 @@ void JSHeapBroker::InitializeAndStartSerializing(
SetTargetNativeContextRef(native_context);
target_native_context().Serialize();
if (!FLAG_turbo_direct_heap_access) {
if (!is_concurrent_inlining()) {
// Perform full native context serialization now if we can't do it later on
// the background thread.
target_native_context().SerializeOnBackground();
}
Factory* const f = isolate()->factory();
if (!FLAG_turbo_direct_heap_access) {
if (!is_concurrent_inlining()) {
ObjectData* data;
data = GetOrCreateData(f->array_buffer_detaching_protector());
if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
......@@ -2734,13 +2750,14 @@ ObjectData* JSHeapBroker::TryGetOrCreateData(
entry = refs_->LookupOrInsert(object.address());
object_data = zone()->New<ObjectData>(this, &(entry->value), object,
kUnserializedReadOnlyHeapObject);
// TODO(solanes, v8:10866): Remove the if/else in this macro once we remove the
// FLAG_turbo_direct_heap_access.
// TODO(solanes, v8:10866): Remove the `(mode() == kSerializing)` case in this
// macro when all classes skip serialization. Same for the other macros if we
// end up keeping them.
#define CREATE_DATA_FOR_DIRECT_READ(name) \
} \
/* NOLINTNEXTLINE(readability/braces) */ \
else if (object->Is##name()) { \
if (FLAG_turbo_direct_heap_access) { \
if (is_concurrent_inlining()) { \
entry = refs_->LookupOrInsert(object.address()); \
object_data = zone()->New<ObjectData>(this, &(entry->value), object, \
kNeverSerializedHeapObject); \
......@@ -2778,7 +2795,7 @@ ObjectData* JSHeapBroker::TryGetOrCreateData(
} \
/* NOLINTNEXTLINE(readability/braces) */ \
else if (object->Is##name()) { \
if (FLAG_turbo_direct_heap_access) { \
if (is_concurrent_inlining()) { \
entry = refs_->LookupOrInsert(object.address()); \
object_data = zone()->New<name##Data>(this, &(entry->value), \
Handle<name>::cast(object), \
......@@ -3047,7 +3064,7 @@ void JSObjectRef::EnsureElementsTenured() {
FieldIndex MapRef::GetFieldIndexFor(InternalIndex descriptor_index) const {
CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
FieldIndex result = FieldIndex::ForDescriptor(*object(), descriptor_index);
DCHECK(result.is_inobject());
return result;
......@@ -3089,7 +3106,7 @@ bool MapRef::IsPrimitiveMap() const {
MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const {
CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
// TODO(solanes, v8:7790): Consider caching the result of the field owner on
// the descriptor array. It would be useful for same map as well as any
// other map sharing that descriptor array.
......@@ -3244,19 +3261,19 @@ int BytecodeArrayRef::handler_table_size() const {
// Like IF_ACCESS_FROM_HEAP[_C] but we also allow direct heap access for
// kSerialized only for methods that we identified to be safe.
#define IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name) \
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { \
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { \
return result##Ref(broker(), \
broker()->CanonicalPersistentHandle(object()->name())); \
}
#define IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name) \
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { \
return object()->name(); \
#define IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name) \
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { \
return object()->name(); \
}
// Like BIMODAL_ACCESSOR[_C] except that we force a direct heap access if
// FLAG_turbo_direct_heap_access is true (even for kSerialized). This is because
// we identified the method to be safe to use direct heap access, but the
// holder##Data class still needs to be serialized.
// broker()->is_concurrent_inlining() is true (even for kSerialized). This is
// because we identified the method to be safe to use direct heap access, but
// the holder##Data class still needs to be serialized.
#define BIMODAL_ACCESSOR_WITH_FLAG(holder, result, name) \
result##Ref holder##Ref::name() const { \
IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name); \
......@@ -3498,7 +3515,7 @@ BIMODAL_ACCESSOR(ScopeInfo, ScopeInfo, OuterScopeInfo)
BIMODAL_ACCESSOR_C(SharedFunctionInfo, int, builtin_id)
BytecodeArrayRef SharedFunctionInfoRef::GetBytecodeArray() const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
BytecodeArray bytecode_array;
if (!broker()->IsMainThread()) {
bytecode_array = object()->GetBytecodeArray(broker()->local_isolate());
......@@ -3553,7 +3570,7 @@ base::Optional<ObjectRef> MapRef::GetStrongValue(
}
DescriptorArrayRef MapRef::instance_descriptors() const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
return DescriptorArrayRef(
broker(),
broker()->CanonicalPersistentHandle(
......@@ -3572,7 +3589,7 @@ void MapRef::SerializeRootMap() {
// TODO(solanes, v8:7790): Remove base::Optional from the return type when
// deleting serialization.
base::Optional<MapRef> MapRef::FindRootMap() const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
// TODO(solanes): Remove the TryGetOrCreateData part when Map is moved to
// kNeverSerialized.
ObjectData* root_map =
......@@ -3594,7 +3611,7 @@ base::Optional<MapRef> MapRef::FindRootMap() const {
}
bool JSTypedArrayRef::is_on_heap() const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
// Safe to read concurrently because:
// - host object seen by serializer.
// - underlying field written 1. during initialization or 2. with
......@@ -3606,7 +3623,7 @@ bool JSTypedArrayRef::is_on_heap() const {
size_t JSTypedArrayRef::length() const {
CHECK(!is_on_heap());
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
// Safe to read concurrently because:
// - immutable after initialization.
// - host object seen by serializer.
......@@ -3617,7 +3634,7 @@ size_t JSTypedArrayRef::length() const {
HeapObjectRef JSTypedArrayRef::buffer() const {
CHECK(!is_on_heap());
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
// Safe to read concurrently because:
// - immutable after initialization.
// - host object seen by serializer.
......@@ -3630,7 +3647,7 @@ HeapObjectRef JSTypedArrayRef::buffer() const {
void* JSTypedArrayRef::data_ptr() const {
CHECK(!is_on_heap());
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
// Safe to read concurrently because:
// - host object seen by serializer.
// - underlying field written 1. during initialization or 2. protected by
......@@ -3839,7 +3856,7 @@ Maybe<double> ObjectRef::OddballToNumber() const {
base::Optional<ObjectRef> JSObjectRef::GetOwnConstantElement(
uint32_t index, SerializationPolicy policy) const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
// `elements` are currently still serialized as members of JSObjectRef.
// TODO(jgruber,v8:7790): Once JSObject is no longer serialized, we must
// guarantee consistency between `object`, `elements_kind` and `elements`
......@@ -3917,7 +3934,7 @@ ObjectRef JSArrayRef::GetBoilerplateLength() const {
}
ObjectRef JSArrayRef::length_unsafe() const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
Object o = object()->length(broker()->isolate(), kRelaxedLoad);
return ObjectRef{broker(), broker()->CanonicalPersistentHandle(o)};
} else {
......@@ -3928,7 +3945,7 @@ ObjectRef JSArrayRef::length_unsafe() const {
base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
FixedArrayBaseRef elements_ref, uint32_t index,
SerializationPolicy policy) const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
// `elements` are currently still serialized as members of JSObjectRef.
// TODO(jgruber,v8:7790): Remove the elements equality DCHECK below once
// JSObject is no longer serialized.
......@@ -3971,7 +3988,7 @@ base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
broker()->CanonicalPersistentHandle(result.value())};
} else {
DCHECK(!data_->should_access_heap());
DCHECK(!FLAG_turbo_direct_heap_access);
DCHECK(!broker()->is_concurrent_inlining());
// Just to clarify that `elements_ref` is not used on this path.
// GetOwnElement accesses the serialized `elements` field on its own.
......@@ -3987,7 +4004,7 @@ base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
}
base::Optional<CellRef> SourceTextModuleRef::GetCell(int cell_index) const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
return CellRef(broker(), broker()->CanonicalPersistentHandle(
object()->GetCell(cell_index)));
}
......@@ -4369,7 +4386,7 @@ bool JSFunctionRef::serialized_code_and_feedback() const {
}
CodeRef JSFunctionRef::code() const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
return CodeRef(broker(), broker()->CanonicalPersistentHandle(
object()->code(kAcquireLoad)));
}
......@@ -4492,7 +4509,7 @@ void NativeContextRef::SerializeOnBackground() {
}
void JSTypedArrayRef::Serialize() {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
// Even if the typed array object itself is no longer serialized (besides
// the JSObject parts), the `buffer` field still is and thus we need to
// make sure to visit it.
......@@ -4514,7 +4531,7 @@ bool JSTypedArrayRef::serialized() const {
}
bool JSTypedArrayRef::ShouldHaveBeenSerialized() const {
if (FLAG_turbo_direct_heap_access) return false;
if (broker()->is_concurrent_inlining()) return false;
return ObjectRef::ShouldHaveBeenSerialized();
}
......
......@@ -67,8 +67,8 @@ enum class OddballType : uint8_t {
// too. For example, it CANNOT contain FixedArrayBase if it doesn't contain
// FixedDoubleArray, BytecodeArray and FixedArray.
// DO NOT VIOLATE THESE TWO PROPERTIES!
// Classes on this list will skip serialization when
// FLAG_turbo_direct_heap_access is on. Otherwise, they might get serialized.
// Classes on this list will skip serialization when --concurrent-inlining is
// on. Otherwise, they might get serialized.
#define HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(V) \
/* Subtypes of FixedArray */ \
V(ObjectBoilerplateDescription) \
......
......@@ -57,7 +57,6 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
// immediately with a larger-capacity one. It doesn't seem to affect the
// performance in a noticeable way though.
TRACE(this, "Constructing heap broker");
DCHECK_IMPLIES(is_concurrent_inlining_, FLAG_turbo_direct_heap_access);
}
JSHeapBroker::~JSHeapBroker() { DCHECK_NULL(local_isolate_); }
......@@ -99,7 +98,7 @@ void JSHeapBroker::AttachLocalIsolate(OptimizedCompilationInfo* info,
local_isolate_->heap()->AttachPersistentHandles(
info->DetachPersistentHandles());
if (FLAG_turbo_direct_heap_access) {
if (is_concurrent_inlining()) {
// Ensure any serialization that happens on the background has been
// performed.
target_native_context().SerializeOnBackground();
......
......@@ -139,8 +139,7 @@ class PipelineData {
// For main entry point.
PipelineData(ZoneStats* zone_stats, Isolate* isolate,
OptimizedCompilationInfo* info,
PipelineStatistics* pipeline_statistics,
bool is_concurrent_inlining)
PipelineStatistics* pipeline_statistics)
: isolate_(isolate),
allocator_(isolate->allocator()),
info_(info),
......@@ -157,9 +156,9 @@ class PipelineData {
instruction_zone_(instruction_zone_scope_.zone()),
codegen_zone_scope_(zone_stats_, kCodegenZoneName),
codegen_zone_(codegen_zone_scope_.zone()),
broker_(new JSHeapBroker(isolate_, info_->zone(),
info_->trace_heap_broker(),
is_concurrent_inlining, info->code_kind())),
broker_(new JSHeapBroker(
isolate_, info_->zone(), info_->trace_heap_broker(),
info_->concurrent_inlining(), info->code_kind())),
register_allocation_zone_scope_(zone_stats_,
kRegisterAllocationZoneName),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
......@@ -1105,15 +1104,6 @@ class PipelineCompilationJob final : public OptimizedCompilationJob {
Linkage* linkage_;
};
namespace {
bool ShouldUseConcurrentInlining(CodeKind code_kind, bool is_osr) {
if (is_osr) return false;
return code_kind == CodeKind::TURBOPROP || FLAG_concurrent_inlining;
}
} // namespace
PipelineCompilationJob::PipelineCompilationJob(
Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
Handle<JSFunction> function, BytecodeOffset osr_offset,
......@@ -1126,17 +1116,14 @@ PipelineCompilationJob::PipelineCompilationJob(
kPipelineCompilationJobZoneName),
zone_stats_(function->GetIsolate()->allocator()),
compilation_info_(&zone_, function->GetIsolate(), shared_info, function,
code_kind),
code_kind, osr_offset, osr_frame),
pipeline_statistics_(CreatePipelineStatistics(
handle(Script::cast(shared_info->script()), isolate),
compilation_info(), function->GetIsolate(), &zone_stats_)),
data_(&zone_stats_, function->GetIsolate(), compilation_info(),
pipeline_statistics_.get(),
ShouldUseConcurrentInlining(code_kind, !osr_offset.IsNone())),
pipeline_statistics_.get()),
pipeline_(&data_),
linkage_(nullptr) {
compilation_info_.SetOptimizingForOsr(osr_offset, osr_frame);
}
linkage_(nullptr) {}
PipelineCompilationJob::~PipelineCompilationJob() = default;
......@@ -1229,7 +1216,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
}
}
if (FLAG_turbo_direct_heap_access) {
if (compilation_info()->concurrent_inlining()) {
isolate->heap()->PublishPendingAllocations();
}
......@@ -1376,7 +1363,8 @@ struct InliningPhase {
data->broker(), data->jsgraph()->Dead(),
data->observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
data->common(), temp_zone,
info->concurrent_inlining());
CheckpointElimination checkpoint_elimination(&graph_reducer);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->broker(), data->common(),
......@@ -1446,7 +1434,8 @@ struct WasmInliningPhase {
GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
data->broker(), data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
data->common(), temp_zone,
info->concurrent_inlining());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->broker(), data->common(),
data->machine(), temp_zone);
......@@ -1579,8 +1568,9 @@ struct TypedLoweringPhase {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
DeadCodeElimination dead_code_elimination(
&graph_reducer, data->graph(), data->common(), temp_zone,
data->info()->concurrent_inlining());
JSCreateLowering create_lowering(&graph_reducer, data->dependencies(),
data->jsgraph(), data->broker(),
temp_zone);
......@@ -1766,8 +1756,9 @@ struct EarlyOptimizationPhase {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
DeadCodeElimination dead_code_elimination(
&graph_reducer, data->graph(), data->common(), temp_zone,
data->info()->concurrent_inlining());
SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
data->broker());
RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
......@@ -1848,8 +1839,9 @@ struct EffectControlLinearizationPhase {
&data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(),
data->observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
DeadCodeElimination dead_code_elimination(
&graph_reducer, data->graph(), data->common(), temp_zone,
data->info()->concurrent_inlining());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->broker(), data->common(),
data->machine(), temp_zone);
......@@ -1887,8 +1879,9 @@ struct LoadEliminationPhase {
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone,
BranchElimination::kEARLY);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
DeadCodeElimination dead_code_elimination(
&graph_reducer, data->graph(), data->common(), temp_zone,
data->info()->concurrent_inlining());
RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
LoadElimination load_elimination(&graph_reducer, data->jsgraph(),
temp_zone);
......@@ -1955,8 +1948,9 @@ struct LateOptimizationPhase {
data->jsgraph()->Dead(), data->observe_node_manager());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
DeadCodeElimination dead_code_elimination(
&graph_reducer, data->graph(), data->common(), temp_zone,
data->info()->concurrent_inlining());
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
......@@ -2070,8 +2064,9 @@ struct CsaEarlyOptimizationPhase {
allow_signalling_nan);
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
DeadCodeElimination dead_code_elimination(
&graph_reducer, data->graph(), data->common(), temp_zone,
data->info()->concurrent_inlining());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->broker(), data->common(),
data->machine(), temp_zone);
......@@ -2097,8 +2092,9 @@ struct CsaOptimizationPhase {
data->jsgraph()->Dead(), data->observe_node_manager());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
DeadCodeElimination dead_code_elimination(
&graph_reducer, data->graph(), data->common(), temp_zone,
data->info()->concurrent_inlining());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
allow_signalling_nan);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
......@@ -3308,8 +3304,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
CreatePipelineStatistics(Handle<Script>::null(), info, isolate,
&zone_stats));
PipelineData data(&zone_stats, isolate, info, pipeline_statistics.get(),
i::FLAG_concurrent_inlining);
PipelineData data(&zone_stats, isolate, info, pipeline_statistics.get());
PipelineImpl pipeline(&data);
Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
......
......@@ -1535,7 +1535,7 @@ void SerializerForBackgroundCompilation::VisitLdaConstant(
iterator->GetConstantForIndexOperand(0, broker()->isolate());
// TODO(v8:7790): FixedArrays still need to be serialized until they are
// moved to kNeverSerialized.
if (!FLAG_turbo_direct_heap_access || constant->IsFixedArray()) {
if (!broker()->is_concurrent_inlining() || constant->IsFixedArray()) {
ObjectRef(broker(), constant);
}
environment()->accumulator_hints() = Hints::SingleConstant(constant, zone());
......
......@@ -5,6 +5,7 @@
#include "src/compiler/simplified-operator-reducer.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
......@@ -41,7 +42,7 @@ SimplifiedOperatorReducer::~SimplifiedOperatorReducer() = default;
Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
DisallowHeapAccessIf no_heap_access(!broker()->is_concurrent_inlining());
switch (node->opcode()) {
case IrOpcode::kBooleanNot: {
HeapObjectMatcher m(node->InputAt(0));
......
......@@ -5,6 +5,7 @@
#include "src/compiler/type-narrowing-reducer.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
namespace v8 {
namespace internal {
......@@ -12,12 +13,15 @@ namespace compiler {
TypeNarrowingReducer::TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph,
JSHeapBroker* broker)
: AdvancedReducer(editor), jsgraph_(jsgraph), op_typer_(broker, zone()) {}
: AdvancedReducer(editor),
jsgraph_(jsgraph),
broker_(broker),
op_typer_(broker, zone()) {}
TypeNarrowingReducer::~TypeNarrowingReducer() = default;
Reduction TypeNarrowingReducer::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
DisallowHeapAccessIf no_heap_access(!broker_->is_concurrent_inlining());
Type new_type = Type::Any();
......
......@@ -34,6 +34,7 @@ class V8_EXPORT_PRIVATE TypeNarrowingReducer final
Zone* zone() const;
JSGraph* const jsgraph_;
const JSHeapBroker* const broker_;
OperationTyper op_typer_;
};
......
......@@ -34,7 +34,7 @@ TypedOptimization::TypedOptimization(Editor* editor,
TypedOptimization::~TypedOptimization() = default;
Reduction TypedOptimization::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
DisallowHeapAccessIf no_heap_access(!broker()->is_concurrent_inlining());
switch (node->opcode()) {
case IrOpcode::kConvertReceiver:
return ReduceConvertReceiver(node);
......
......@@ -589,7 +589,6 @@ DEFINE_BOOL(trace_generalization, false, "trace map generalization")
// Flags for TurboProp.
DEFINE_BOOL(turboprop, false, "enable experimental turboprop mid-tier compiler")
DEFINE_IMPLICATION(turboprop, turbo_direct_heap_access)
DEFINE_BOOL(turboprop_mid_tier_reg_alloc, true,
"enable mid-tier register allocator for turboprop")
DEFINE_BOOL(
......@@ -648,13 +647,10 @@ DEFINE_BOOL(concurrent_inlining, false,
"run optimizing compiler's inlining phase on a separate thread")
DEFINE_BOOL(stress_concurrent_inlining, false,
"makes concurrent inlining more likely to trigger in tests")
DEFINE_BOOL(turbo_direct_heap_access, false,
"access kNeverSerialized objects directly from the heap")
DEFINE_IMPLICATION(stress_concurrent_inlining, concurrent_inlining)
DEFINE_NEG_IMPLICATION(stress_concurrent_inlining, lazy_feedback_allocation)
DEFINE_WEAK_VALUE_IMPLICATION(stress_concurrent_inlining, interrupt_budget,
15 * KB)
DEFINE_IMPLICATION(concurrent_inlining, turbo_direct_heap_access)
DEFINE_BOOL(
turbo_concurrent_get_property_access_info, false,
"concurrently call GetPropertyAccessInfo (only with --concurrent-inlining)")
......
......@@ -88,8 +88,6 @@ class BackgroundCompilationThread final : public v8::base::Thread {
TEST(TestConcurrentSharedFunctionInfo) {
FlagScope<bool> allow_natives_syntax(&i::FLAG_allow_natives_syntax, true);
FlagScope<bool> concurrent_inlining(&i::FLAG_concurrent_inlining, true);
FlagScope<bool> turbo_direct_heap_access(&i::FLAG_turbo_direct_heap_access,
true);
HandleAndZoneScope scope;
Isolate* isolate = scope.main_isolate();
......
......@@ -1269,7 +1269,6 @@ TEST(Regress10774) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_turboprop = true;
i::FLAG_turbo_dynamic_map_checks = true;
i::FLAG_turbo_direct_heap_access = true;
#ifdef VERIFY_HEAP
i::FLAG_verify_heap = true;
#endif
......
......@@ -28,7 +28,8 @@
// Flags: --allow-natives-syntax
// Flags: --concurrent-recompilation --block-concurrent-recompilation
// Flags: --nostress-opt --no-always-opt
// Flags: --no-turbo-direct-heap-access
// Flags: --no-turboprop
// Flags: --no-concurrent-inlining
// Flags: --no-turbo-concurrent-get-property-access-info
// --nostress-opt is in place because this particular optimization
......
......@@ -24,7 +24,8 @@ class DeadCodeEliminationTest : public GraphTest {
protected:
Reduction Reduce(AdvancedReducer::Editor* editor, Node* node) {
DeadCodeElimination reducer(editor, graph(), common(), zone());
DeadCodeElimination reducer(editor, graph(), common(), zone(),
FLAG_concurrent_inlining);
return reducer.Reduce(node);
}
......
......@@ -59,9 +59,8 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = {
"slow_path": ["--no-force-slow-path"],
"stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"],
"stress_concurrent_inlining": ["--single-threaded", "--predictable",
"--no-turbo-direct-heap-access"],
"--no-concurrent-inlining"],
"stress_incremental_marking": ["--no-stress-incremental-marking"],
"future": ["--no-turbo-direct-heap-access"],
"stress_js_bg_compile_wasm_code_gc": ["--no-stress-background-compile"],
"stress": ["--no-stress-opt", "--always-opt", "--no-always-opt", "--liftoff",
"--max-inlined-bytecode-size=*",
......@@ -69,10 +68,8 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = {
"--wasm-generic-wrapper"],
"sparkplug": ["--jitless", "--no-sparkplug" ],
"always_sparkplug": ["--jitless", "--no-sparkplug", "--no-always-sparkplug"],
"turboprop": ["--interrupt-budget=*", "--no-turbo-direct-heap-access",
"--no-turboprop"],
"turboprop_as_toptier": ["--interrupt-budget=*",
"--no-turbo-direct-heap-access", "--no-turboprop",
"turboprop": ["--interrupt-budget=*", "--no-turboprop"],
"turboprop_as_toptier": ["--interrupt-budget=*", "--no-turboprop",
"--no-turboprop-as-toptier"],
"code_serializer": ["--cache=after-execute", "--cache=full-code-cache",
"--cache=none"],
......@@ -109,9 +106,9 @@ INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG = {
"--no-enable-sse4-1": ["--enable-sse4-1"],
"--optimize-for-size": ["--max-semi-space-size=*"],
"--stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"],
"--stress_concurrent_inlining": ["--single-threaded", "--predictable"],
"--stress-concurrent-inlining":
INCOMPATIBLE_FLAGS_PER_VARIANT["stress_concurrent_inlining"],
"--stress-flush-bytecode": ["--no-stress-flush-bytecode"],
"--future": ["--no-turbo-direct-heap-access"],
"--stress-incremental-marking": INCOMPATIBLE_FLAGS_PER_VARIANT["stress_incremental_marking"],
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment