Commit 4f2f14f8 authored by Jakob Gruber's avatar Jakob Gruber Committed by Commit Bot

[compiler] Remove --turbo-direct-heap-access

On a per-job basis, --turbo-direct-heap-access should be equal to
whether concurrent inlining is enabled. We simplify involved logic by
removing the flag, and replacing all access to

- FLAG_turbo_direct_heap_access, and
- FLAG_concurrent_inlining

inside compiler/ with
OptimizedCompilationInfo::is_concurrent_inlining() (or derived values).

Bug: v8:7790
Change-Id: I64818e0e1004dded08c784ef1c4bdfd2af990a59
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2843345
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Auto-Submit: Jakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74166}
parent 0bc71bc9
......@@ -23,12 +23,16 @@ namespace internal {
OptimizedCompilationInfo::OptimizedCompilationInfo(
Zone* zone, Isolate* isolate, Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure, CodeKind code_kind)
Handle<JSFunction> closure, CodeKind code_kind, BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame)
: code_kind_(code_kind),
osr_offset_(osr_offset),
osr_frame_(osr_frame),
zone_(zone),
optimization_id_(isolate->NextOptimizationId()) {
DCHECK_EQ(*shared, closure->shared());
DCHECK(shared->is_compiled());
DCHECK_IMPLIES(is_osr(), IsOptimizing());
bytecode_array_ = handle(shared->GetBytecodeArray(isolate), isolate);
shared_info_ = shared;
closure_ = closure;
......@@ -86,6 +90,10 @@ void OptimizedCompilationInfo::ConfigureFlags() {
if (FLAG_untrusted_code_mitigations) set_untrusted_code_mitigations();
if (FLAG_turbo_inline_js_wasm_calls) set_inline_js_wasm_calls();
if (!is_osr() && (IsTurboprop() || FLAG_concurrent_inlining)) {
set_concurrent_inlining();
}
switch (code_kind_) {
case CodeKind::TURBOFAN:
if (FLAG_function_context_specialization) {
......
......@@ -104,7 +104,15 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// Construct a compilation info for optimized compilation.
OptimizedCompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure, CodeKind code_kind);
Handle<JSFunction> closure, CodeKind code_kind,
BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame);
// For testing.
OptimizedCompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared,
Handle<JSFunction> closure, CodeKind code_kind)
: OptimizedCompilationInfo(zone, isolate, shared, closure, code_kind,
BytecodeOffset::None(), nullptr) {}
// Construct a compilation info for stub compilation, Wasm, and testing.
OptimizedCompilationInfo(Vector<const char> debug_name, Zone* zone,
CodeKind code_kind);
......@@ -167,13 +175,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
bool IsWasm() const { return code_kind() == CodeKind::WASM_FUNCTION; }
#endif // V8_ENABLE_WEBASSEMBLY
void SetOptimizingForOsr(BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame) {
DCHECK(IsOptimizing());
osr_offset_ = osr_offset;
osr_frame_ = osr_frame;
}
void set_persistent_handles(
std::unique_ptr<PersistentHandles> persistent_handles) {
DCHECK_NULL(ph_);
......@@ -292,7 +293,9 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
#endif // V8_ENABLE_WEBASSEMBLY
// Entry point when compiling for OSR, {BytecodeOffset::None} otherwise.
BytecodeOffset osr_offset_ = BytecodeOffset::None();
const BytecodeOffset osr_offset_ = BytecodeOffset::None();
// The current OSR frame for specialization or {nullptr}.
JavaScriptFrame* const osr_frame_ = nullptr;
// The zone from which the compilation pipeline working on this
// OptimizedCompilationInfo allocates.
......@@ -308,9 +311,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
const int optimization_id_;
unsigned inlined_bytecode_size_ = 0;
// The current OSR frame for specialization or {nullptr}.
JavaScriptFrame* osr_frame_ = nullptr;
Vector<const char> debug_name_;
std::unique_ptr<char[]> trace_turbo_filename_;
......
......@@ -1570,7 +1570,7 @@ void BytecodeGraphBuilder::VisitBytecodes() {
}
// TODO(leszeks): Increment usage counter on BG thread.
if (!FLAG_concurrent_inlining && has_one_shot_bytecode) {
if (!broker()->is_concurrent_inlining() && has_one_shot_bytecode) {
// (For concurrent inlining this is done in the serializer instead.)
isolate()->CountUsage(
v8::Isolate::UseCounterFeature::kOptimizedFunctionWithOneShotBytecode);
......
......@@ -8,10 +8,11 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
namespace v8 {
namespace internal {
......@@ -55,7 +56,8 @@ CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
}
Reduction CommonOperatorReducer::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
DisallowHeapAccessIf no_heap_access(broker() == nullptr ||
!broker()->is_concurrent_inlining());
switch (node->opcode()) {
case IrOpcode::kBranch:
return ReduceBranch(node);
......
......@@ -5,6 +5,7 @@
#include "src/compiler/constant-folding-reducer.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
#include "src/objects/objects-inl.h"
namespace v8 {
......@@ -63,7 +64,7 @@ ConstantFoldingReducer::ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph,
ConstantFoldingReducer::~ConstantFoldingReducer() = default;
Reduction ConstantFoldingReducer::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
DisallowHeapAccessIf no_heap_access(!broker()->is_concurrent_inlining());
if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) &&
node->op()->HasProperty(Operator::kEliminatable) &&
node->opcode() != IrOpcode::kFinishRegion) {
......
......@@ -16,12 +16,14 @@ namespace compiler {
DeadCodeElimination::DeadCodeElimination(Editor* editor, Graph* graph,
CommonOperatorBuilder* common,
Zone* temp_zone)
Zone* temp_zone,
bool is_concurrent_inlining)
: AdvancedReducer(editor),
graph_(graph),
common_(common),
dead_(graph->NewNode(common->Dead())),
zone_(temp_zone) {
zone_(temp_zone),
is_concurrent_inlining_(is_concurrent_inlining) {
NodeProperties::SetType(dead_, Type::None());
}
......@@ -46,7 +48,7 @@ Node* FindDeadInput(Node* node) {
} // namespace
Reduction DeadCodeElimination::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
DisallowHeapAccessIf no_heap_access(!is_concurrent_inlining_);
switch (node->opcode()) {
case IrOpcode::kEnd:
return ReduceEnd(node);
......
......@@ -40,7 +40,8 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
DeadCodeElimination(Editor* editor, Graph* graph,
CommonOperatorBuilder* common, Zone* temp_zone);
CommonOperatorBuilder* common, Zone* temp_zone,
bool is_concurrent_inlining);
~DeadCodeElimination() final = default;
DeadCodeElimination(const DeadCodeElimination&) = delete;
DeadCodeElimination& operator=(const DeadCodeElimination&) = delete;
......@@ -78,6 +79,8 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
CommonOperatorBuilder* const common_;
Node* const dead_;
Zone* zone_;
const bool is_concurrent_inlining_;
};
} // namespace compiler
......
......@@ -112,7 +112,7 @@ Reduction GraphReducer::Reduce(Node* const node) {
if (FLAG_trace_turbo_reduction) {
UnparkedScopeIfNeeded unparked(broker_);
// TODO(neis): Disallow racy handle dereference once we stop
// supporting --no-local-heaps --no-turbo-direct-heap-access.
// supporting --no-local-heaps --no-concurrent-inlining.
AllowHandleDereference allow_deref;
StdoutStream{} << "- In-place update of #" << *node << " by reducer "
<< (*i)->reducer_name() << std::endl;
......@@ -125,7 +125,7 @@ Reduction GraphReducer::Reduce(Node* const node) {
if (FLAG_trace_turbo_reduction) {
UnparkedScopeIfNeeded unparked(broker_);
// TODO(neis): Disallow racy handle dereference once we stop
// supporting --no-local-heaps --no-turbo-direct-heap-access.
// supporting --no-local-heaps --no-concurrent-inlining.
AllowHandleDereference allow_deref;
StdoutStream{} << "- Replacement of #" << *node << " with #"
<< *(reduction.replacement()) << " by reducer "
......
This diff is collapsed.
......@@ -67,8 +67,8 @@ enum class OddballType : uint8_t {
// too. For example, it CANNOT contain FixedArrayBase if it doesn't contain
// FixedDoubleArray, BytecodeArray and FixedArray.
// DO NOT VIOLATE THESE TWO PROPERTIES!
// Classes on this list will skip serialization when
// FLAG_turbo_direct_heap_access is on. Otherwise, they might get serialized.
// Classes on this list will skip serialization when --concurrent-inlining is
// on. Otherwise, they might get serialized.
#define HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(V) \
/* Subtypes of FixedArray */ \
V(ObjectBoilerplateDescription) \
......
......@@ -57,7 +57,6 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
// immediately with a larger-capacity one. It doesn't seem to affect the
// performance in a noticeable way though.
TRACE(this, "Constructing heap broker");
DCHECK_IMPLIES(is_concurrent_inlining_, FLAG_turbo_direct_heap_access);
}
JSHeapBroker::~JSHeapBroker() { DCHECK_NULL(local_isolate_); }
......@@ -99,7 +98,7 @@ void JSHeapBroker::AttachLocalIsolate(OptimizedCompilationInfo* info,
local_isolate_->heap()->AttachPersistentHandles(
info->DetachPersistentHandles());
if (FLAG_turbo_direct_heap_access) {
if (is_concurrent_inlining()) {
// Ensure any serialization that happens on the background has been
// performed.
target_native_context().SerializeOnBackground();
......
This diff is collapsed.
......@@ -1535,7 +1535,7 @@ void SerializerForBackgroundCompilation::VisitLdaConstant(
iterator->GetConstantForIndexOperand(0, broker()->isolate());
// TODO(v8:7790): FixedArrays still need to be serialized until they are
// moved to kNeverSerialized.
if (!FLAG_turbo_direct_heap_access || constant->IsFixedArray()) {
if (!broker()->is_concurrent_inlining() || constant->IsFixedArray()) {
ObjectRef(broker(), constant);
}
environment()->accumulator_hints() = Hints::SingleConstant(constant, zone());
......
......@@ -5,6 +5,7 @@
#include "src/compiler/simplified-operator-reducer.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
......@@ -41,7 +42,7 @@ SimplifiedOperatorReducer::~SimplifiedOperatorReducer() = default;
Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
DisallowHeapAccessIf no_heap_access(!broker()->is_concurrent_inlining());
switch (node->opcode()) {
case IrOpcode::kBooleanNot: {
HeapObjectMatcher m(node->InputAt(0));
......
......@@ -5,6 +5,7 @@
#include "src/compiler/type-narrowing-reducer.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
namespace v8 {
namespace internal {
......@@ -12,12 +13,15 @@ namespace compiler {
TypeNarrowingReducer::TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph,
JSHeapBroker* broker)
: AdvancedReducer(editor), jsgraph_(jsgraph), op_typer_(broker, zone()) {}
: AdvancedReducer(editor),
jsgraph_(jsgraph),
broker_(broker),
op_typer_(broker, zone()) {}
TypeNarrowingReducer::~TypeNarrowingReducer() = default;
Reduction TypeNarrowingReducer::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
DisallowHeapAccessIf no_heap_access(!broker_->is_concurrent_inlining());
Type new_type = Type::Any();
......
......@@ -34,6 +34,7 @@ class V8_EXPORT_PRIVATE TypeNarrowingReducer final
Zone* zone() const;
JSGraph* const jsgraph_;
const JSHeapBroker* const broker_;
OperationTyper op_typer_;
};
......
......@@ -34,7 +34,7 @@ TypedOptimization::TypedOptimization(Editor* editor,
TypedOptimization::~TypedOptimization() = default;
Reduction TypedOptimization::Reduce(Node* node) {
DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
DisallowHeapAccessIf no_heap_access(!broker()->is_concurrent_inlining());
switch (node->opcode()) {
case IrOpcode::kConvertReceiver:
return ReduceConvertReceiver(node);
......
......@@ -589,7 +589,6 @@ DEFINE_BOOL(trace_generalization, false, "trace map generalization")
// Flags for TurboProp.
DEFINE_BOOL(turboprop, false, "enable experimental turboprop mid-tier compiler")
DEFINE_IMPLICATION(turboprop, turbo_direct_heap_access)
DEFINE_BOOL(turboprop_mid_tier_reg_alloc, true,
"enable mid-tier register allocator for turboprop")
DEFINE_BOOL(
......@@ -648,13 +647,10 @@ DEFINE_BOOL(concurrent_inlining, false,
"run optimizing compiler's inlining phase on a separate thread")
DEFINE_BOOL(stress_concurrent_inlining, false,
"makes concurrent inlining more likely to trigger in tests")
DEFINE_BOOL(turbo_direct_heap_access, false,
"access kNeverSerialized objects directly from the heap")
DEFINE_IMPLICATION(stress_concurrent_inlining, concurrent_inlining)
DEFINE_NEG_IMPLICATION(stress_concurrent_inlining, lazy_feedback_allocation)
DEFINE_WEAK_VALUE_IMPLICATION(stress_concurrent_inlining, interrupt_budget,
15 * KB)
DEFINE_IMPLICATION(concurrent_inlining, turbo_direct_heap_access)
DEFINE_BOOL(
turbo_concurrent_get_property_access_info, false,
"concurrently call GetPropertyAccessInfo (only with --concurrent-inlining)")
......
......@@ -88,8 +88,6 @@ class BackgroundCompilationThread final : public v8::base::Thread {
TEST(TestConcurrentSharedFunctionInfo) {
FlagScope<bool> allow_natives_syntax(&i::FLAG_allow_natives_syntax, true);
FlagScope<bool> concurrent_inlining(&i::FLAG_concurrent_inlining, true);
FlagScope<bool> turbo_direct_heap_access(&i::FLAG_turbo_direct_heap_access,
true);
HandleAndZoneScope scope;
Isolate* isolate = scope.main_isolate();
......
......@@ -1269,7 +1269,6 @@ TEST(Regress10774) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_turboprop = true;
i::FLAG_turbo_dynamic_map_checks = true;
i::FLAG_turbo_direct_heap_access = true;
#ifdef VERIFY_HEAP
i::FLAG_verify_heap = true;
#endif
......
......@@ -28,7 +28,8 @@
// Flags: --allow-natives-syntax
// Flags: --concurrent-recompilation --block-concurrent-recompilation
// Flags: --nostress-opt --no-always-opt
// Flags: --no-turbo-direct-heap-access
// Flags: --no-turboprop
// Flags: --no-concurrent-inlining
// Flags: --no-turbo-concurrent-get-property-access-info
// --nostress-opt is in place because this particular optimization
......
......@@ -24,7 +24,8 @@ class DeadCodeEliminationTest : public GraphTest {
protected:
Reduction Reduce(AdvancedReducer::Editor* editor, Node* node) {
DeadCodeElimination reducer(editor, graph(), common(), zone());
DeadCodeElimination reducer(editor, graph(), common(), zone(),
FLAG_concurrent_inlining);
return reducer.Reduce(node);
}
......
......@@ -59,9 +59,8 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = {
"slow_path": ["--no-force-slow-path"],
"stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"],
"stress_concurrent_inlining": ["--single-threaded", "--predictable",
"--no-turbo-direct-heap-access"],
"--no-concurrent-inlining"],
"stress_incremental_marking": ["--no-stress-incremental-marking"],
"future": ["--no-turbo-direct-heap-access"],
"stress_js_bg_compile_wasm_code_gc": ["--no-stress-background-compile"],
"stress": ["--no-stress-opt", "--always-opt", "--no-always-opt", "--liftoff",
"--max-inlined-bytecode-size=*",
......@@ -69,10 +68,8 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = {
"--wasm-generic-wrapper"],
"sparkplug": ["--jitless", "--no-sparkplug" ],
"always_sparkplug": ["--jitless", "--no-sparkplug", "--no-always-sparkplug"],
"turboprop": ["--interrupt-budget=*", "--no-turbo-direct-heap-access",
"--no-turboprop"],
"turboprop_as_toptier": ["--interrupt-budget=*",
"--no-turbo-direct-heap-access", "--no-turboprop",
"turboprop": ["--interrupt-budget=*", "--no-turboprop"],
"turboprop_as_toptier": ["--interrupt-budget=*", "--no-turboprop",
"--no-turboprop-as-toptier"],
"code_serializer": ["--cache=after-execute", "--cache=full-code-cache",
"--cache=none"],
......@@ -109,9 +106,9 @@ INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG = {
"--no-enable-sse4-1": ["--enable-sse4-1"],
"--optimize-for-size": ["--max-semi-space-size=*"],
"--stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"],
"--stress_concurrent_inlining": ["--single-threaded", "--predictable"],
"--stress-concurrent-inlining":
INCOMPATIBLE_FLAGS_PER_VARIANT["stress_concurrent_inlining"],
"--stress-flush-bytecode": ["--no-stress-flush-bytecode"],
"--future": ["--no-turbo-direct-heap-access"],
"--stress-incremental-marking": INCOMPATIBLE_FLAGS_PER_VARIANT["stress_incremental_marking"],
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment