Commit bd1cc7b0 authored by Jakob Gruber's avatar Jakob Gruber Committed by V8 LUCI CQ

[compiler] Remove support for --no-concurrent-inlining

Now that concurrent inlining is shipping on stable, remove support
--no-concurrent-inlining.

Note that it's still possible to run Turbofan exclusively on the
main thread by passing --no-concurrent-recompilation.

Bug: v8:7790, v8:12142, chromium:1240585
Change-Id: I1943bbbcad7dea7e3a3c337c239f14f7d96c23cd
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3308798Reviewed-by: 's avatarLiviu Rau <liviurau@chromium.org>
Reviewed-by: 's avatarMaya Lekova <mslekova@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78644}
parent 1f2acdba
......@@ -2600,8 +2600,6 @@ filegroup(
"src/compiler/js-graph.h",
"src/compiler/js-heap-broker.cc",
"src/compiler/js-heap-broker.h",
"src/compiler/js-heap-copy-reducer.cc",
"src/compiler/js-heap-copy-reducer.h",
"src/compiler/js-inlining.cc",
"src/compiler/js-inlining.h",
"src/compiler/js-inlining-heuristic.cc",
......
......@@ -2784,7 +2784,6 @@ v8_header_set("v8_internal_headers") {
"src/compiler/js-generic-lowering.h",
"src/compiler/js-graph.h",
"src/compiler/js-heap-broker.h",
"src/compiler/js-heap-copy-reducer.h",
"src/compiler/js-inlining-heuristic.h",
"src/compiler/js-inlining.h",
"src/compiler/js-intrinsic-lowering.h",
......@@ -3845,7 +3844,6 @@ v8_compiler_sources = [
"src/compiler/js-generic-lowering.cc",
"src/compiler/js-graph.cc",
"src/compiler/js-heap-broker.cc",
"src/compiler/js-heap-copy-reducer.cc",
"src/compiler/js-inlining-heuristic.cc",
"src/compiler/js-inlining.cc",
"src/compiler/js-intrinsic-lowering.cc",
......
......@@ -361,7 +361,6 @@
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation'},
{'name': 'v8testing', 'variant': 'stress_concurrent_inlining'},
{'name': 'v8testing', 'variant': 'no_concurrent_inlining'},
],
},
'v8_linux64_dict_tracking_dbg_ng_triggered': {
......@@ -474,7 +473,6 @@
{'name': 'v8testing', 'variant': 'extra'},
{'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
{'name': 'v8testing', 'variant': 'no_concurrent_inlining'},
],
},
'v8_linux64_perfetto_dbg_ng_triggered': {
......@@ -520,7 +518,6 @@
{'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'slow_path'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
{'name': 'v8testing', 'variant': 'no_concurrent_inlining'},
],
},
'v8_linux64_tsan_rel_ng_triggered': {
......@@ -1153,7 +1150,6 @@
{'name': 'v8testing', 'variant': 'minor_mc'},
{'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
{'name': 'v8testing', 'variant': 'no_concurrent_inlining'},
# Noavx.
{
'name': 'mozilla',
......@@ -1215,7 +1211,6 @@
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation'},
{'name': 'v8testing', 'variant': 'stress_concurrent_inlining'},
{'name': 'v8testing', 'variant': 'no_concurrent_inlining'},
# Noavx.
{
'name': 'mozilla',
......
......@@ -66,10 +66,6 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(
void OptimizedCompilationInfo::ConfigureFlags() {
if (FLAG_turbo_inline_js_wasm_calls) set_inline_js_wasm_calls();
if (IsTurboprop() || FLAG_concurrent_inlining) {
set_concurrent_inlining();
}
switch (code_kind_) {
case CodeKind::TURBOFAN:
if (FLAG_function_context_specialization) {
......
......@@ -68,9 +68,8 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
V(TraceTurboAllocation, trace_turbo_allocation, 14) \
V(TraceHeapBroker, trace_heap_broker, 15) \
V(WasmRuntimeExceptionSupport, wasm_runtime_exception_support, 16) \
V(ConcurrentInlining, concurrent_inlining, 17) \
V(DiscardResultForTesting, discard_result_for_testing, 18) \
V(InlineJSWasmCalls, inline_js_wasm_calls, 19)
V(DiscardResultForTesting, discard_result_for_testing, 17) \
V(InlineJSWasmCalls, inline_js_wasm_calls, 18)
enum Flag {
#define DEF_ENUM(Camel, Lower, Bit) k##Camel = 1 << Bit,
......
......@@ -730,8 +730,7 @@ bool AccessInfoFactory::TryLoadPropertyDetails(
}
} else {
DescriptorArray descriptors = *map.instance_descriptors().object();
*index_out = descriptors.Search(*name.object(), *map.object(),
broker()->is_concurrent_inlining());
*index_out = descriptors.Search(*name.object(), *map.object(), true);
if (index_out->is_found()) {
*details_out = descriptors.GetDetails(*index_out);
}
......@@ -744,10 +743,8 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
MapRef map, NameRef name, AccessMode access_mode) const {
CHECK(name.IsUniqueName());
// Dictionary property const tracking is unsupported when concurrent inlining
// is enabled.
CHECK_IMPLIES(V8_DICT_PROPERTY_CONST_TRACKING_BOOL,
!broker()->is_concurrent_inlining());
// Dictionary property const tracking is unsupported with concurrent inlining.
CHECK(!V8_DICT_PROPERTY_CONST_TRACKING_BOOL);
JSHeapBroker::MapUpdaterGuardIfNeeded mumd_scope(broker());
......@@ -911,12 +908,6 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
}
// Walk up the prototype chain.
if (!broker()->is_concurrent_inlining()) {
if (!map.TrySerializePrototype(NotConcurrentInliningTag{broker()})) {
return Invalid();
}
}
// Load the map's prototype's map to guarantee that every time we use it,
// we use the same Map.
base::Optional<HeapObjectRef> prototype = map.prototype();
......@@ -1129,8 +1120,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
PropertyAttributes attrs) const {
// Check if the {map} has a data transition with the given {name}.
Map transition =
TransitionsAccessor(isolate(), map.object(),
broker()->is_concurrent_inlining())
TransitionsAccessor(isolate(), map.object(), true)
.SearchTransition(*name.object(), PropertyKind::kData, attrs);
if (transition.is_null()) return Invalid();
......@@ -1202,11 +1192,6 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
unrecorded_dependencies.push_back(
dependencies()->TransitionDependencyOffTheRecord(transition_map));
if (!broker()->is_concurrent_inlining()) {
transition_map.SerializeBackPointer(
NotConcurrentInliningTag{broker()}); // For BuildPropertyStore.
}
// Transitioning stores *may* store to const fields. The resulting
// DataConstant access infos can be distinguished from later, i.e. redundant,
// stores to the same constant field by the presence of a transition map.
......
......@@ -1112,9 +1112,6 @@ void CompilationDependencies::DependOnElementsKind(
void CompilationDependencies::DependOnOwnConstantElement(
const JSObjectRef& holder, uint32_t index, const ObjectRef& element) {
// Only valid if the holder can use direct reads, since validation uses
// GetOwnConstantElementFromHeap.
DCHECK(holder.should_access_heap() || broker_->is_concurrent_inlining());
RecordDependency(
zone_->New<OwnConstantElementDependency>(holder, index, element));
}
......@@ -1286,7 +1283,6 @@ void CompilationDependencies::DependOnElementsKinds(
void CompilationDependencies::DependOnConsistentJSFunctionView(
const JSFunctionRef& function) {
DCHECK(broker_->is_concurrent_inlining());
RecordDependency(zone_->New<ConsistentJSFunctionViewDependency>(function));
}
......
......@@ -50,8 +50,7 @@ namespace compiler {
// kNeverSerializedHeapObject: The underlying V8 object is a (potentially
// mutable) HeapObject and the data is an instance of ObjectData. Its handle
// must be persistent so that the GC can update it at a safepoint. Via this
// handle, the object can be accessed concurrently to the main thread. To be
// used the flag --concurrent-inlining must be on.
// handle, the object can be accessed concurrently to the main thread.
//
// kUnserializedReadOnlyHeapObject: The underlying V8 object is a read-only
// HeapObject and the data is an instance of ObjectData. For
......@@ -79,10 +78,6 @@ bool IsReadOnlyHeapObjectForCompiler(HeapObject object) {
} // namespace
NotConcurrentInliningTag::NotConcurrentInliningTag(JSHeapBroker* broker) {
CHECK(!broker->is_concurrent_inlining());
}
class ObjectData : public ZoneObject {
public:
ObjectData(JSHeapBroker* broker, ObjectData** storage, Handle<Object> object,
......@@ -286,77 +281,10 @@ class JSReceiverData : public HeapObjectData {
class JSObjectData : public JSReceiverData {
public:
JSObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSObject> object, ObjectDataKind kind);
// Recursive serialization of all reachable JSObjects.
bool SerializeAsBoilerplateRecursive(JSHeapBroker* broker,
NotConcurrentInliningTag,
int max_depth = kMaxFastLiteralDepth);
ObjectData* GetInobjectField(int property_index) const;
// Shallow serialization of {elements}.
void SerializeElements(JSHeapBroker* broker, NotConcurrentInliningTag);
bool serialized_elements() const { return serialized_elements_; }
ObjectData* elements() const;
ObjectData* raw_properties_or_hash() const { return raw_properties_or_hash_; }
void SerializeObjectCreateMap(JSHeapBroker* broker, NotConcurrentInliningTag);
// Can be nullptr.
ObjectData* object_create_map(JSHeapBroker* broker) const {
if (!serialized_object_create_map_) {
DCHECK_NULL(object_create_map_);
TRACE_MISSING(broker, "object_create_map on " << this);
}
return object_create_map_;
}
// This method is only used to assert our invariants.
bool cow_or_empty_elements_tenured() const;
bool has_extra_serialized_data() const {
return serialized_as_boilerplate_ || serialized_elements_ ||
serialized_object_create_map_;
}
private:
ObjectData* elements_ = nullptr;
ObjectData* raw_properties_or_hash_ = nullptr;
bool cow_or_empty_elements_tenured_ = false;
// The {serialized_as_boilerplate} flag is set when all recursively
// reachable JSObjects are serialized.
bool serialized_as_boilerplate_ = false;
bool serialized_elements_ = false;
ZoneVector<ObjectData*> inobject_fields_;
bool serialized_object_create_map_ = false;
ObjectData* object_create_map_ = nullptr;
Handle<JSObject> object, ObjectDataKind kind)
: JSReceiverData(broker, storage, object, kind) {}
};
void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker,
NotConcurrentInliningTag) {
if (serialized_object_create_map_) return;
serialized_object_create_map_ = true;
TraceScope tracer(broker, this, "JSObjectData::SerializeObjectCreateMap");
Handle<JSObject> jsobject = Handle<JSObject>::cast(object());
if (jsobject->map().is_prototype_map()) {
Handle<Object> maybe_proto_info(jsobject->map().prototype_info(),
broker->isolate());
if (maybe_proto_info->IsPrototypeInfo()) {
auto proto_info = Handle<PrototypeInfo>::cast(maybe_proto_info);
if (proto_info->HasObjectCreateMap()) {
DCHECK_NULL(object_create_map_);
object_create_map_ =
broker->GetOrCreateData(proto_info->ObjectCreateMap());
}
}
}
}
namespace {
base::Optional<ObjectRef> GetOwnFastDataPropertyFromHeap(
......@@ -460,59 +388,13 @@ class JSTypedArrayData : public JSObjectData {
JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSTypedArray> object, ObjectDataKind kind)
: JSObjectData(broker, storage, object, kind) {}
void Serialize(JSHeapBroker* broker, NotConcurrentInliningTag tag);
bool serialized() const { return serialized_; }
bool is_on_heap() const { return is_on_heap_; }
size_t length() const { return length_; }
void* data_ptr() const { return data_ptr_; }
ObjectData* buffer() const { return buffer_; }
private:
bool serialized_ = false;
bool is_on_heap_ = false;
size_t length_ = 0;
void* data_ptr_ = nullptr;
ObjectData* buffer_ = nullptr;
};
void JSTypedArrayData::Serialize(JSHeapBroker* broker,
NotConcurrentInliningTag) {
if (serialized_) return;
serialized_ = true;
TraceScope tracer(broker, this, "JSTypedArrayData::Serialize");
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object());
is_on_heap_ = typed_array->is_on_heap();
length_ = typed_array->length();
data_ptr_ = typed_array->DataPtr();
if (!is_on_heap()) {
DCHECK_NULL(buffer_);
buffer_ = broker->GetOrCreateData(typed_array->buffer());
}
}
class JSDataViewData : public JSObjectData {
public:
JSDataViewData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSDataView> object, ObjectDataKind kind)
: JSObjectData(broker, storage, object, kind) {
DCHECK_EQ(kind, kBackgroundSerializedHeapObject);
if (!broker->is_concurrent_inlining()) {
byte_length_ = object->byte_length();
}
}
size_t byte_length() const {
return byte_length_;
}
private:
size_t byte_length_ = 0; // Only valid if not concurrent inlining.
: JSObjectData(broker, storage, object, kind) {}
};
class JSBoundFunctionData : public JSObjectData {
......@@ -641,100 +523,27 @@ class MapData : public HeapObjectData {
InstanceType instance_type() const { return instance_type_; }
int instance_size() const { return instance_size_; }
byte bit_field() const { return bit_field_; }
byte bit_field2() const { return bit_field2_; }
uint32_t bit_field3() const { return bit_field3_; }
bool can_be_deprecated() const { return can_be_deprecated_; }
bool can_transition() const { return can_transition_; }
int in_object_properties_start_in_words() const {
CHECK(InstanceTypeChecker::IsJSObject(instance_type()));
return in_object_properties_start_in_words_;
}
int in_object_properties() const {
CHECK(InstanceTypeChecker::IsJSObject(instance_type()));
return in_object_properties_;
}
int constructor_function_index() const { return constructor_function_index_; }
int NextFreePropertyIndex() const { return next_free_property_index_; }
int UnusedPropertyFields() const { return unused_property_fields_; }
bool supports_fast_array_iteration() const {
return supports_fast_array_iteration_;
}
bool supports_fast_array_resize() const {
return supports_fast_array_resize_;
}
bool is_abandoned_prototype_map() const {
return is_abandoned_prototype_map_;
}
void SerializeConstructor(JSHeapBroker* broker, NotConcurrentInliningTag tag);
ObjectData* GetConstructor() const {
CHECK(serialized_constructor_);
return constructor_;
}
void SerializeBackPointer(JSHeapBroker* broker, NotConcurrentInliningTag tag);
ObjectData* GetBackPointer() const {
CHECK(serialized_backpointer_);
return backpointer_;
}
bool TrySerializePrototype(JSHeapBroker* broker,
NotConcurrentInliningTag tag);
void SerializePrototype(JSHeapBroker* broker, NotConcurrentInliningTag tag) {
CHECK(TrySerializePrototype(broker, tag));
}
ObjectData* prototype() const {
DCHECK_EQ(serialized_prototype_, prototype_ != nullptr);
return prototype_;
}
void SerializeForElementStore(JSHeapBroker* broker,
NotConcurrentInliningTag tag);
bool has_extra_serialized_data() const {
return serialized_constructor_ || serialized_backpointer_ ||
serialized_prototype_ || serialized_for_element_store_;
}
private:
// The following fields should be const in principle, but construction
// requires locking the MapUpdater lock. For this reason, it's easier to
// initialize these inside the constructor body, not in the initializer list.
// This block of fields will always be serialized.
InstanceType instance_type_;
int instance_size_;
uint32_t bit_field3_;
int unused_property_fields_;
bool is_abandoned_prototype_map_;
int in_object_properties_;
// These fields will only serialized if we are not concurrent inlining.
byte bit_field_;
byte bit_field2_;
bool can_be_deprecated_;
bool can_transition_;
int in_object_properties_start_in_words_;
int constructor_function_index_;
int next_free_property_index_;
bool supports_fast_array_iteration_;
bool supports_fast_array_resize_;
// These extra fields still have to be serialized (e.g prototype_), since
// those classes have fields themselves which are not being directly read.
// This means that, for example, even though we can get the prototype itself
// with direct reads, some of its fields require serialization.
bool serialized_constructor_ = false;
ObjectData* constructor_ = nullptr;
bool serialized_backpointer_ = false;
ObjectData* backpointer_ = nullptr;
bool serialized_prototype_ = false;
ObjectData* prototype_ = nullptr;
bool serialized_for_element_store_ = false;
};
namespace {
......@@ -813,17 +622,6 @@ void JSFunctionData::Cache(JSHeapBroker* broker) {
initial_map_ref.instance_size();
}
CHECK_GT(initial_map_instance_size_with_min_slack_, 0);
if (!initial_map_->should_access_heap() &&
!broker->is_concurrent_inlining()) {
// TODO(neis): This is currently only needed for native_context's
// object_function, as used by GetObjectCreateMap. If no further use
// sites show up, we should move this into NativeContextData::Serialize.
initial_map_->SerializePrototype(broker,
NotConcurrentInliningTag{broker});
initial_map_->SerializeConstructor(broker,
NotConcurrentInliningTag{broker});
}
}
if (has_initial_map_) {
......@@ -925,7 +723,6 @@ bool JSFunctionData::IsConsistentWithHeapState(JSHeapBroker* broker) const {
}
bool JSFunctionRef::IsConsistentWithHeapState() const {
DCHECK(broker()->is_concurrent_inlining());
DCHECK(broker()->IsMainThread());
return data()->AsJSFunction()->IsConsistentWithHeapState(broker());
}
......@@ -1044,26 +841,6 @@ MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object,
is_abandoned_prototype_map_ = object->is_abandoned_prototype_map();
in_object_properties_ =
object->IsJSObjectMap() ? object->GetInObjectProperties() : 0;
// These fields are only needed to be serialized when not concurrent inlining
// and thus disabling direct reads.
if (!broker->is_concurrent_inlining()) {
bit_field_ = object->relaxed_bit_field();
bit_field2_ = object->bit_field2();
can_be_deprecated_ = object->NumberOfOwnDescriptors() > 0
? object->CanBeDeprecated()
: false;
can_transition_ = object->CanTransition();
in_object_properties_start_in_words_ =
object->IsJSObjectMap() ? object->GetInObjectPropertiesStartInWords()
: 0;
next_free_property_index_ = object->NextFreePropertyIndex();
constructor_function_index_ = object->IsPrimitiveMap()
? object->GetConstructorFunctionIndex()
: Map::kNoConstructorFunctionIndex;
supports_fast_array_iteration_ = SupportsFastArrayIteration(broker, object);
supports_fast_array_resize_ = SupportsFastArrayResize(broker, object);
}
}
class FixedArrayBaseData : public HeapObjectData {
......@@ -1094,40 +871,13 @@ class ScriptContextTableData : public FixedArrayData {
: FixedArrayData(broker, storage, object, kind) {}
};
JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSObject> object, ObjectDataKind kind)
: JSReceiverData(broker, storage, object, kind),
inobject_fields_(broker->zone()) {}
class JSArrayData : public JSObjectData {
public:
JSArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSArray> object, ObjectDataKind kind)
: JSObjectData(broker, storage, object, kind) {}
void Serialize(JSHeapBroker* broker, NotConcurrentInliningTag tag);
ObjectData* length() const {
CHECK(serialized_);
return length_;
}
private:
bool serialized_ = false;
ObjectData* length_ = nullptr;
};
void JSArrayData::Serialize(JSHeapBroker* broker,
NotConcurrentInliningTag tag) {
if (serialized_) return;
serialized_ = true;
TraceScope tracer(broker, this, "JSArrayData::Serialize");
Handle<JSArray> jsarray = Handle<JSArray>::cast(object());
DCHECK_NULL(length_);
length_ = broker->GetOrCreateData(jsarray->length());
}
class JSGlobalObjectData : public JSObjectData {
public:
JSGlobalObjectData(JSHeapBroker* broker, ObjectData** storage,
......@@ -1164,169 +914,6 @@ HEAP_BROKER_OBJECT_LIST(DEFINE_IS)
HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_AS)
#undef DEFINE_AS
ObjectData* JSObjectData::GetInobjectField(int property_index) const {
CHECK_LT(static_cast<size_t>(property_index), inobject_fields_.size());
return inobject_fields_[property_index];
}
bool JSObjectData::cow_or_empty_elements_tenured() const {
return cow_or_empty_elements_tenured_;
}
ObjectData* JSObjectData::elements() const {
CHECK(serialized_elements_);
return elements_;
}
void JSObjectData::SerializeElements(JSHeapBroker* broker,
NotConcurrentInliningTag) {
if (serialized_elements_) return;
serialized_elements_ = true;
TraceScope tracer(broker, this, "JSObjectData::SerializeElements");
Handle<JSObject> boilerplate = Handle<JSObject>::cast(object());
Handle<FixedArrayBase> elements_object(boilerplate->elements(),
broker->isolate());
DCHECK_NULL(elements_);
elements_ = broker->GetOrCreateData(elements_object);
DCHECK(elements_->IsFixedArrayBase());
}
void MapData::SerializeConstructor(JSHeapBroker* broker,
NotConcurrentInliningTag tag) {
if (serialized_constructor_) return;
serialized_constructor_ = true;
TraceScope tracer(broker, this, "MapData::SerializeConstructor");
Handle<Map> map = Handle<Map>::cast(object());
DCHECK(!map->IsContextMap());
DCHECK_NULL(constructor_);
constructor_ = broker->GetOrCreateData(map->GetConstructor());
}
void MapData::SerializeBackPointer(JSHeapBroker* broker,
NotConcurrentInliningTag tag) {
if (serialized_backpointer_) return;
serialized_backpointer_ = true;
TraceScope tracer(broker, this, "MapData::SerializeBackPointer");
Handle<Map> map = Handle<Map>::cast(object());
DCHECK_NULL(backpointer_);
DCHECK(!map->IsContextMap());
backpointer_ = broker->GetOrCreateData(map->GetBackPointer());
}
bool MapData::TrySerializePrototype(JSHeapBroker* broker,
NotConcurrentInliningTag tag) {
if (serialized_prototype_) return true;
TraceScope tracer(broker, this, "MapData::SerializePrototype");
Handle<Map> map = Handle<Map>::cast(object());
DCHECK_NULL(prototype_);
prototype_ = broker->TryGetOrCreateData(map->prototype());
if (prototype_ == nullptr) return false;
serialized_prototype_ = true;
return true;
}
bool JSObjectData::SerializeAsBoilerplateRecursive(JSHeapBroker* broker,
NotConcurrentInliningTag tag,
int max_depth) {
if (serialized_as_boilerplate_) return true;
// If serialization succeeds, we set this to true at the end.
TraceScope tracer(broker, this,
"JSObjectData::SerializeAsBoilerplateRecursive");
Handle<JSObject> boilerplate = Handle<JSObject>::cast(object());
DCHECK_GE(max_depth, 0);
if (max_depth == 0) return false;
// Serialize the elements.
Isolate* const isolate = broker->isolate();
Handle<FixedArrayBase> elements_object(boilerplate->elements(), isolate);
// Boilerplate objects should only be reachable from their allocation site,
// so it is safe to assume that the elements have not been serialized yet.
bool const empty_or_cow =
elements_object->length() == 0 ||
elements_object->map() == ReadOnlyRoots(isolate).fixed_cow_array_map();
if (empty_or_cow) {
cow_or_empty_elements_tenured_ = !ObjectInYoungGeneration(*elements_object);
}
raw_properties_or_hash_ =
broker->GetOrCreateData(boilerplate->raw_properties_or_hash());
serialized_elements_ = true;
elements_ = broker->GetOrCreateData(elements_object);
DCHECK(elements_->IsFixedArrayBase());
if (!boilerplate->HasFastProperties() ||
boilerplate->property_array().length() != 0) {
return false;
}
// Check the in-object properties.
inobject_fields_.clear();
Handle<DescriptorArray> descriptors(
boilerplate->map().instance_descriptors(isolate), isolate);
for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.location() != PropertyLocation::kField) continue;
DCHECK_EQ(PropertyKind::kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
// Make sure {field_index} agrees with {inobject_properties} on the index of
// this field.
DCHECK_EQ(field_index.property_index(),
static_cast<int>(inobject_fields_.size()));
Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
ObjectData* value_data = broker->GetOrCreateData(value);
inobject_fields_.push_back(value_data);
if (value_data->IsJSObject() && !value_data->should_access_heap()) {
if (!value_data->AsJSObject()->SerializeAsBoilerplateRecursive(
broker, tag, max_depth - 1))
return false;
}
}
TRACE(broker, "Copied " << inobject_fields_.size() << " in-object fields");
if (empty_or_cow || elements_->should_access_heap()) {
// No need to do anything here. Empty or copy-on-write elements
// do not need to be serialized because we only need to store the elements
// reference to the allocated object.
} else if (boilerplate->HasSmiOrObjectElements()) {
Handle<FixedArray> fast_elements =
Handle<FixedArray>::cast(elements_object);
int length = elements_object->length();
for (int i = 0; i < length; i++) {
Handle<Object> value(fast_elements->get(i), isolate);
if (value->IsJSObject()) {
ObjectData* value_data = broker->GetOrCreateData(value);
if (!value_data->should_access_heap()) {
if (!value_data->AsJSObject()->SerializeAsBoilerplateRecursive(
broker, tag, max_depth - 1)) {
return false;
}
}
}
}
} else {
if (!boilerplate->HasDoubleElements()) return false;
int const size = FixedDoubleArray::SizeFor(elements_object->length());
if (size > kMaxRegularHeapObjectSize) return false;
}
if (IsJSArray() && !broker->is_concurrent_inlining()) {
AsJSArray()->Serialize(broker, NotConcurrentInliningTag{broker});
}
serialized_as_boilerplate_ = true;
return true;
}
bool ObjectRef::equals(const ObjectRef& other) const {
return data_ == other.data_;
}
......@@ -1368,50 +955,6 @@ void JSHeapBroker::InitializeAndStartSerializing() {
CollectArrayAndObjectPrototypes();
SetTargetNativeContextRef(target_native_context().object());
if (!is_concurrent_inlining()) {
Factory* const f = isolate()->factory();
ObjectData* data;
data = GetOrCreateData(f->array_buffer_detaching_protector());
if (!data->should_access_heap()) {
data->AsPropertyCell()->Cache(this);
}
data = GetOrCreateData(f->array_constructor_protector());
if (!data->should_access_heap()) {
data->AsPropertyCell()->Cache(this);
}
data = GetOrCreateData(f->array_iterator_protector());
if (!data->should_access_heap()) {
data->AsPropertyCell()->Cache(this);
}
data = GetOrCreateData(f->array_species_protector());
if (!data->should_access_heap()) {
data->AsPropertyCell()->Cache(this);
}
data = GetOrCreateData(f->no_elements_protector());
if (!data->should_access_heap()) {
data->AsPropertyCell()->Cache(this);
}
data = GetOrCreateData(f->promise_hook_protector());
if (!data->should_access_heap()) {
data->AsPropertyCell()->Cache(this);
}
data = GetOrCreateData(f->promise_species_protector());
if (!data->should_access_heap()) {
data->AsPropertyCell()->Cache(this);
}
data = GetOrCreateData(f->promise_then_protector());
if (!data->should_access_heap()) {
data->AsPropertyCell()->Cache(this);
}
data = GetOrCreateData(f->string_length_protector());
if (!data->should_access_heap()) {
data->AsPropertyCell()->Cache(this);
}
GetOrCreateData(f->many_closures_cell());
GetOrCreateData(CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
ArgvMode::kStack, true));
TRACE(this, "Finished serializing standard objects");
}
}
namespace {
......@@ -1551,29 +1094,6 @@ base::Optional<MapRef> MapRef::AsElementsKind(ElementsKind kind) const {
return MakeRefAssumeMemoryFence(broker(), maybe_result.value());
}
void MapRef::SerializeForElementStore(NotConcurrentInliningTag tag) {
if (data()->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsMap()->SerializeForElementStore(broker(), tag);
}
void MapData::SerializeForElementStore(JSHeapBroker* broker,
NotConcurrentInliningTag tag) {
if (serialized_for_element_store_) return;
serialized_for_element_store_ = true;
TraceScope tracer(broker, this, "MapData::SerializeForElementStore");
// TODO(solanes, v8:7790): This should use MapData methods rather than
// constructing MapRefs, but it involves non-trivial refactoring and this
// method should go away anyway once the compiler is fully concurrent.
MapRef map(broker, this);
do {
map.SerializePrototype(tag);
map = map.prototype().value().map();
} while (map.IsJSObjectMap() && map.is_stable() &&
IsFastElementsKind(map.elements_kind()));
}
bool MapRef::HasOnlyStablePrototypesWithFastElements(
ZoneVector<MapRef>* prototype_maps) {
DCHECK_NOT_NULL(prototype_maps);
......@@ -1590,17 +1110,11 @@ bool MapRef::HasOnlyStablePrototypesWithFastElements(
}
bool MapRef::supports_fast_array_iteration() const {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
return SupportsFastArrayIteration(broker(), object());
}
return data()->AsMap()->supports_fast_array_iteration();
return SupportsFastArrayIteration(broker(), object());
}
bool MapRef::supports_fast_array_resize() const {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
return SupportsFastArrayResize(broker(), object());
}
return data()->AsMap()->supports_fast_array_resize();
return SupportsFastArrayResize(broker(), object());
}
namespace {
......@@ -1608,7 +1122,6 @@ namespace {
void RecordConsistentJSFunctionViewDependencyIfNeeded(
const JSHeapBroker* broker, const JSFunctionRef& ref, JSFunctionData* data,
JSFunctionData::UsedField used_field) {
if (!broker->is_concurrent_inlining()) return;
if (!data->has_any_used_field()) {
// Deduplicate dependencies.
broker->dependencies()->DependOnConsistentJSFunctionView(ref);
......@@ -1667,75 +1180,39 @@ FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const {
}
base::Optional<ObjectRef> JSObjectRef::raw_properties_or_hash() const {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
return TryMakeRef(broker(), object()->raw_properties_or_hash());
}
return ObjectRef(broker(), data()->AsJSObject()->raw_properties_or_hash());
return TryMakeRef(broker(), object()->raw_properties_or_hash());
}
base::Optional<ObjectRef> JSObjectRef::RawInobjectPropertyAt(
FieldIndex index) const {
CHECK(index.is_inobject());
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
Handle<Object> value;
{
DisallowGarbageCollection no_gc;
PtrComprCageBase cage_base = broker()->cage_base();
Map current_map = object()->map(cage_base, kAcquireLoad);
// If the map changed in some prior GC epoch, our {index} could be
// outside the valid bounds of the cached map.
if (*map().object() != current_map) {
TRACE_BROKER_MISSING(broker(), "Map change detected in " << *this);
return {};
}
Handle<Object> value;
{
DisallowGarbageCollection no_gc;
PtrComprCageBase cage_base = broker()->cage_base();
Map current_map = object()->map(cage_base, kAcquireLoad);
base::Optional<Object> maybe_value =
object()->RawInobjectPropertyAt(cage_base, current_map, index);
if (!maybe_value.has_value()) {
TRACE_BROKER_MISSING(broker(),
"Unable to safely read property in " << *this);
return {};
}
value = broker()->CanonicalPersistentHandle(maybe_value.value());
// If the map changed in some prior GC epoch, our {index} could be
// outside the valid bounds of the cached map.
if (*map().object() != current_map) {
TRACE_BROKER_MISSING(broker(), "Map change detected in " << *this);
return {};
}
return TryMakeRef(broker(), value);
}
JSObjectData* object_data = data()->AsJSObject();
return ObjectRef(broker(),
object_data->GetInobjectField(index.property_index()));
}
void JSObjectRef::SerializeAsBoilerplateRecursive(
NotConcurrentInliningTag tag) {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsJSObject()->SerializeAsBoilerplateRecursive(broker(), tag);
}
void AllocationSiteRef::SerializeRecursive(NotConcurrentInliningTag tag) {
DCHECK(data_->should_access_heap());
if (broker()->mode() == JSHeapBroker::kDisabled) return;
DCHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
if (boilerplate().has_value()) {
boilerplate()->SerializeAsBoilerplateRecursive(tag);
}
if (nested_site().IsAllocationSite()) {
nested_site().AsAllocationSite().SerializeRecursive(tag);
base::Optional<Object> maybe_value =
object()->RawInobjectPropertyAt(cage_base, current_map, index);
if (!maybe_value.has_value()) {
TRACE_BROKER_MISSING(broker(),
"Unable to safely read property in " << *this);
return {};
}
value = broker()->CanonicalPersistentHandle(maybe_value.value());
}
}
void JSObjectRef::SerializeElements(NotConcurrentInliningTag tag) {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsJSObject()->SerializeElements(broker(), tag);
return TryMakeRef(broker(), value);
}
bool JSObjectRef::IsElementsTenured(const FixedArrayBaseRef& elements) {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
return !ObjectInYoungGeneration(*elements.object());
}
return data()->AsJSObject()->cow_or_empty_elements_tenured();
return !ObjectInYoungGeneration(*elements.object());
}
FieldIndex MapRef::GetFieldIndexFor(InternalIndex descriptor_index) const {
......@@ -1746,10 +1223,7 @@ FieldIndex MapRef::GetFieldIndexFor(InternalIndex descriptor_index) const {
}
int MapRef::GetInObjectPropertyOffset(int i) const {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
return object()->GetInObjectPropertyOffset(i);
}
return (GetInObjectPropertiesStartInWords() + i) * kTaggedSize;
return object()->GetInObjectPropertyOffset(i);
}
PropertyDetails MapRef::GetPropertyDetails(
......@@ -1785,7 +1259,6 @@ MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const {
base::Optional<ObjectRef> StringRef::GetCharAsStringOrUndefined(
uint32_t index) const {
DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
String maybe_char;
auto result = ConcurrentLookupIterator::TryGetOwnChar(
&maybe_char, broker()->isolate(), broker()->local_isolate(), *object(),
......@@ -1802,7 +1275,6 @@ base::Optional<ObjectRef> StringRef::GetCharAsStringOrUndefined(
}
bool StringRef::SupportedStringKind() const {
if (!broker()->is_concurrent_inlining()) return true;
return IsInternalizedString() || object()->IsThinString();
}
......@@ -1927,31 +1399,14 @@ int BytecodeArrayRef::handler_table_size() const {
return BitField::decode(ObjectRef::data()->As##holder()->field()); \
}
// Like IF_ACCESS_FROM_HEAP but we also allow direct heap access for
// kBackgroundSerialized only for methods that we identified to be safe.
#define IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name) \
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { \
return object()->name(); \
}
// Like BIMODAL_ACCESSOR except that we force a direct heap access if
// broker()->is_concurrent_inlining() is true (even for kBackgroundSerialized).
// This is because we identified the method to be safe to use direct heap
// access, but the holder##Data class still needs to be serialized.
#define BIMODAL_ACCESSOR_WITH_FLAG_C(holder, result, name) \
result holder##Ref::name() const { \
IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name); \
return ObjectRef::data()->As##holder()->name(); \
}
#define BIMODAL_ACCESSOR_WITH_FLAG_B(holder, field, name, BitField) \
typename BitField::FieldType holder##Ref::name() const { \
IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name); \
return BitField::decode(ObjectRef::data()->As##holder()->field()); \
}
#define HEAP_ACCESSOR_C(holder, result, name) \
result holder##Ref::name() const { return object()->name(); }
#define HEAP_ACCESSOR_B(holder, field, name, BitField) \
typename BitField::FieldType holder##Ref::name() const { \
return object()->name(); \
}
ObjectRef AllocationSiteRef::nested_site() const {
return MakeRefAssumeMemoryFence(broker(), object()->nested_site());
}
......@@ -1983,52 +1438,44 @@ uint64_t HeapNumberRef::value_as_bits() const {
}
JSReceiverRef JSBoundFunctionRef::bound_target_function() const {
DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
// Immutable after initialization.
return MakeRefAssumeMemoryFence(broker(), object()->bound_target_function());
}
ObjectRef JSBoundFunctionRef::bound_this() const {
DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
// Immutable after initialization.
return MakeRefAssumeMemoryFence(broker(), object()->bound_this());
}
FixedArrayRef JSBoundFunctionRef::bound_arguments() const {
DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
// Immutable after initialization.
return MakeRefAssumeMemoryFence(broker(), object()->bound_arguments());
}
// Immutable after initialization.
BIMODAL_ACCESSOR_WITH_FLAG_C(JSDataView, size_t, byte_length)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field2, elements_kind,
Map::Bits2::ElementsKindBits)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, is_dictionary_map,
Map::Bits3::IsDictionaryMapBit)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, is_deprecated,
Map::Bits3::IsDeprecatedBit)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, NumberOfOwnDescriptors,
Map::Bits3::NumberOfOwnDescriptorsBits)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, is_migration_target,
Map::Bits3::IsMigrationTargetBit)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, has_prototype_slot,
Map::Bits1::HasPrototypeSlotBit)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_access_check_needed,
Map::Bits1::IsAccessCheckNeededBit)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_callable,
Map::Bits1::IsCallableBit)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, has_indexed_interceptor,
Map::Bits1::HasIndexedInterceptorBit)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_constructor,
Map::Bits1::IsConstructorBit)
BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_undetectable,
Map::Bits1::IsUndetectableBit)
HEAP_ACCESSOR_C(JSDataView, size_t, byte_length)
HEAP_ACCESSOR_B(Map, bit_field2, elements_kind, Map::Bits2::ElementsKindBits)
HEAP_ACCESSOR_B(Map, bit_field3, is_dictionary_map,
Map::Bits3::IsDictionaryMapBit)
HEAP_ACCESSOR_B(Map, bit_field3, is_deprecated, Map::Bits3::IsDeprecatedBit)
HEAP_ACCESSOR_B(Map, bit_field3, NumberOfOwnDescriptors,
Map::Bits3::NumberOfOwnDescriptorsBits)
HEAP_ACCESSOR_B(Map, bit_field3, is_migration_target,
Map::Bits3::IsMigrationTargetBit)
HEAP_ACCESSOR_B(Map, bit_field, has_prototype_slot,
Map::Bits1::HasPrototypeSlotBit)
HEAP_ACCESSOR_B(Map, bit_field, is_access_check_needed,
Map::Bits1::IsAccessCheckNeededBit)
HEAP_ACCESSOR_B(Map, bit_field, is_callable, Map::Bits1::IsCallableBit)
HEAP_ACCESSOR_B(Map, bit_field, has_indexed_interceptor,
Map::Bits1::HasIndexedInterceptorBit)
HEAP_ACCESSOR_B(Map, bit_field, is_constructor, Map::Bits1::IsConstructorBit)
HEAP_ACCESSOR_B(Map, bit_field, is_undetectable, Map::Bits1::IsUndetectableBit)
BIMODAL_ACCESSOR_C(Map, int, instance_size)
BIMODAL_ACCESSOR_WITH_FLAG_C(Map, int, NextFreePropertyIndex)
HEAP_ACCESSOR_C(Map, int, NextFreePropertyIndex)
BIMODAL_ACCESSOR_C(Map, int, UnusedPropertyFields)
BIMODAL_ACCESSOR_WITH_FLAG_C(Map, InstanceType, instance_type)
HEAP_ACCESSOR_C(Map, InstanceType, instance_type)
BIMODAL_ACCESSOR_C(Map, bool, is_abandoned_prototype_map)
int ObjectBoilerplateDescriptionRef::size() const { return object()->size(); }
......@@ -2149,64 +1596,45 @@ DescriptorArrayRef MapRef::instance_descriptors() const {
}
base::Optional<HeapObjectRef> MapRef::prototype() const {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
return TryMakeRef(broker(), HeapObject::cast(object()->prototype()),
kAssumeMemoryFence);
}
ObjectData* prototype_data = data()->AsMap()->prototype();
if (prototype_data == nullptr) {
TRACE_BROKER_MISSING(broker(), "prototype for map " << *this);
return {};
}
return HeapObjectRef(broker(), prototype_data);
return TryMakeRef(broker(), HeapObject::cast(object()->prototype()),
kAssumeMemoryFence);
}
MapRef MapRef::FindRootMap() const {
DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
// TODO(solanes, v8:7790): Consider caching the result of the root map.
return MakeRefAssumeMemoryFence(broker(),
object()->FindRootMap(broker()->isolate()));
}
ObjectRef MapRef::GetConstructor() const {
if (data()->should_access_heap() || broker()->is_concurrent_inlining()) {
// Immutable after initialization.
return MakeRefAssumeMemoryFence(broker(), object()->GetConstructor());
}
return ObjectRef(broker(), data()->AsMap()->GetConstructor());
// Immutable after initialization.
return MakeRefAssumeMemoryFence(broker(), object()->GetConstructor());
}
HeapObjectRef MapRef::GetBackPointer() const {
if (data()->should_access_heap() || broker()->is_concurrent_inlining()) {
// Immutable after initialization.
return MakeRefAssumeMemoryFence(
broker(), HeapObject::cast(object()->GetBackPointer()));
}
return HeapObjectRef(broker(), ObjectRef::data()->AsMap()->GetBackPointer());
// Immutable after initialization.
return MakeRefAssumeMemoryFence(broker(),
HeapObject::cast(object()->GetBackPointer()));
}
bool JSTypedArrayRef::is_on_heap() const {
DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
// Underlying field written 1. during initialization or 2. with release-store.
return object()->is_on_heap(kAcquireLoad);
}
size_t JSTypedArrayRef::length() const {
DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(!is_on_heap());
// Immutable after initialization.
return object()->length();
}
HeapObjectRef JSTypedArrayRef::buffer() const {
DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(!is_on_heap());
// Immutable after initialization.
return MakeRef<HeapObject>(broker(), object()->buffer());
}
void* JSTypedArrayRef::data_ptr() const {
DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(!is_on_heap());
// Underlying field written 1. during initialization or 2. protected by the
// is_on_heap release/acquire semantics (external_pointer store happens-before
......@@ -2217,15 +1645,11 @@ void* JSTypedArrayRef::data_ptr() const {
}
bool MapRef::IsInobjectSlackTrackingInProgress() const {
IF_ACCESS_FROM_HEAP_WITH_FLAG_C(IsInobjectSlackTrackingInProgress);
return Map::Bits3::ConstructionCounterBits::decode(
data()->AsMap()->bit_field3()) != Map::kNoSlackTracking;
return object()->IsInobjectSlackTrackingInProgress();
}
int MapRef::constructor_function_index() const {
IF_ACCESS_FROM_HEAP_WITH_FLAG_C(GetConstructorFunctionIndex);
CHECK(IsPrimitiveMap());
return data()->AsMap()->constructor_function_index();
return object()->GetConstructorFunctionIndex();
}
bool MapRef::is_stable() const {
......@@ -2233,20 +1657,12 @@ bool MapRef::is_stable() const {
return !Map::Bits3::IsUnstableBit::decode(data()->AsMap()->bit_field3());
}
bool MapRef::CanBeDeprecated() const {
IF_ACCESS_FROM_HEAP_WITH_FLAG_C(CanBeDeprecated);
CHECK_GT(NumberOfOwnDescriptors(), 0);
return data()->AsMap()->can_be_deprecated();
}
bool MapRef::CanBeDeprecated() const { return object()->CanBeDeprecated(); }
bool MapRef::CanTransition() const {
IF_ACCESS_FROM_HEAP_WITH_FLAG_C(CanTransition);
return data()->AsMap()->can_transition();
}
bool MapRef::CanTransition() const { return object()->CanTransition(); }
int MapRef::GetInObjectPropertiesStartInWords() const {
IF_ACCESS_FROM_HEAP_WITH_FLAG_C(GetInObjectPropertiesStartInWords);
return data()->AsMap()->in_object_properties_start_in_words();
return object()->GetInObjectPropertiesStartInWords();
}
int MapRef::GetInObjectProperties() const {
......@@ -2384,7 +1800,6 @@ bool ObjectRef::should_access_heap() const {
base::Optional<ObjectRef> JSObjectRef::GetOwnConstantElement(
const FixedArrayBaseRef& elements_ref, uint32_t index,
CompilationDependencies* dependencies) const {
DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
base::Optional<Object> maybe_element = GetOwnConstantElementFromHeap(
*elements_ref.object(), map().elements_kind(), index);
if (!maybe_element.has_value()) return {};
......@@ -2399,7 +1814,6 @@ base::Optional<ObjectRef> JSObjectRef::GetOwnConstantElement(
base::Optional<Object> JSObjectRef::GetOwnConstantElementFromHeap(
FixedArrayBase elements, ElementsKind elements_kind, uint32_t index) const {
DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
DCHECK_LE(index, JSObject::kMaxElementIndex);
Handle<JSObject> holder = object();
......@@ -2444,7 +1858,6 @@ base::Optional<Object> JSObjectRef::GetOwnConstantElementFromHeap(
base::Optional<ObjectRef> JSObjectRef::GetOwnFastDataProperty(
Representation field_representation, FieldIndex index,
CompilationDependencies* dependencies) const {
DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
base::Optional<ObjectRef> result = GetOwnFastDataPropertyFromHeap(
broker(), *this, field_representation, index);
if (result.has_value()) {
......@@ -2456,7 +1869,6 @@ base::Optional<ObjectRef> JSObjectRef::GetOwnFastDataProperty(
base::Optional<ObjectRef> JSObjectRef::GetOwnDictionaryProperty(
InternalIndex index, CompilationDependencies* dependencies) const {
DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(index.is_found());
base::Optional<ObjectRef> result =
GetOwnDictionaryPropertyFromHeap(broker(), object(), index);
......@@ -2475,17 +1887,12 @@ ObjectRef JSArrayRef::GetBoilerplateLength() const {
}
base::Optional<ObjectRef> JSArrayRef::length_unsafe() const {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
return TryMakeRef(broker(),
object()->length(broker()->isolate(), kRelaxedLoad));
} else {
return ObjectRef{broker(), data()->AsJSArray()->length()};
}
return TryMakeRef(broker(),
object()->length(broker()->isolate(), kRelaxedLoad));
}
base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
FixedArrayBaseRef elements_ref, uint32_t index) const {
DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
// Note: we'd like to check `elements_ref == elements()` here, but due to
// concurrency this may not hold. The code below must be able to deal with
// concurrent `elements` modifications.
......@@ -2588,15 +1995,7 @@ base::Optional<JSObjectRef> AllocationSiteRef::boilerplate() const {
base::Optional<FixedArrayBaseRef> JSObjectRef::elements(
RelaxedLoadTag tag) const {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
return TryMakeRef(broker(), object()->elements(tag));
}
const JSObjectData* d = data()->AsJSObject();
if (!d->serialized_elements()) {
TRACE(broker(), "'elements' on " << this);
return base::nullopt;
}
return FixedArrayBaseRef(broker(), d->elements());
return TryMakeRef(broker(), object()->elements(tag));
}
int FixedArrayBaseRef::length() const {
......@@ -2653,13 +2052,6 @@ bool NameRef::IsUniqueName() const {
return IsInternalizedString() || IsSymbol();
}
void RegExpBoilerplateDescriptionRef::Serialize(NotConcurrentInliningTag) {
// TODO(jgruber,v8:7790): Remove once member types are also never serialized.
// Until then, we have to call these functions once on the main thread to
// trigger serialization.
data();
}
Handle<Object> ObjectRef::object() const {
return data_->object();
}
......@@ -2803,55 +2195,24 @@ ScopeInfoRef SharedFunctionInfoRef::scope_info() const {
return MakeRefAssumeMemoryFence(broker(), object()->scope_info(kAcquireLoad));
}
void JSObjectRef::SerializeObjectCreateMap(NotConcurrentInliningTag tag) {
if (data_->should_access_heap()) return;
data()->AsJSObject()->SerializeObjectCreateMap(broker(), tag);
}
base::Optional<MapRef> JSObjectRef::GetObjectCreateMap() const {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
Handle<Map> map_handle = Handle<Map>::cast(map().object());
// Note: implemented as an acquire-load.
if (!map_handle->is_prototype_map()) return {};
Handle<Object> maybe_proto_info = broker()->CanonicalPersistentHandle(
map_handle->prototype_info(kAcquireLoad));
if (!maybe_proto_info->IsPrototypeInfo()) return {};
MaybeObject maybe_object_create_map =
Handle<PrototypeInfo>::cast(maybe_proto_info)
->object_create_map(kAcquireLoad);
if (!maybe_object_create_map->IsWeak()) return {};
return MapRef(broker(),
broker()->GetOrCreateData(
maybe_object_create_map->GetHeapObjectAssumeWeak(),
kAssumeMemoryFence));
}
ObjectData* map_data = data()->AsJSObject()->object_create_map(broker());
if (map_data == nullptr) return base::Optional<MapRef>();
if (map_data->should_access_heap()) {
return TryMakeRef(broker(), Handle<Map>::cast(map_data->object()));
}
return MapRef(broker(), map_data->AsMap());
}
Handle<Map> map_handle = Handle<Map>::cast(map().object());
// Note: implemented as an acquire-load.
if (!map_handle->is_prototype_map()) return {};
void MapRef::SerializeBackPointer(NotConcurrentInliningTag tag) {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsMap()->SerializeBackPointer(broker(), tag);
}
Handle<Object> maybe_proto_info = broker()->CanonicalPersistentHandle(
map_handle->prototype_info(kAcquireLoad));
if (!maybe_proto_info->IsPrototypeInfo()) return {};
bool MapRef::TrySerializePrototype(NotConcurrentInliningTag tag) {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
return true;
}
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
return data()->AsMap()->TrySerializePrototype(broker(), tag);
}
MaybeObject maybe_object_create_map =
Handle<PrototypeInfo>::cast(maybe_proto_info)
->object_create_map(kAcquireLoad);
if (!maybe_object_create_map->IsWeak()) return {};
void MapRef::SerializePrototype(NotConcurrentInliningTag tag) {
CHECK(TrySerializePrototype(tag));
return MapRef(broker(),
broker()->GetOrCreateData(
maybe_object_create_map->GetHeapObjectAssumeWeak(),
kAssumeMemoryFence));
}
bool PropertyCellRef::Cache() const {
......@@ -2869,7 +2230,6 @@ bool NativeContextRef::GlobalIsDetached() const {
base::Optional<PropertyCellRef> JSGlobalObjectRef::GetPropertyCell(
NameRef const& name) const {
DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
base::Optional<PropertyCell> maybe_cell =
ConcurrentLookupIterator::TryGetPropertyCell(
broker()->isolate(), broker()->local_isolate_or_isolate(),
......@@ -2904,12 +2264,10 @@ unsigned CodeRef::GetInlinedBytecodeSize() const {
#undef BIMODAL_ACCESSOR
#undef BIMODAL_ACCESSOR_B
#undef BIMODAL_ACCESSOR_C
#undef BIMODAL_ACCESSOR_WITH_FLAG_B
#undef BIMODAL_ACCESSOR_WITH_FLAG_C
#undef HEAP_ACCESSOR_B
#undef HEAP_ACCESSOR_C
#undef IF_ACCESS_FROM_HEAP
#undef IF_ACCESS_FROM_HEAP_C
#undef IF_ACCESS_FROM_HEAP_WITH_FLAG_C
#undef TRACE
#undef TRACE_MISSING
......
......@@ -59,13 +59,6 @@ inline bool IsAnyStore(AccessMode mode) {
return mode == AccessMode::kStore || mode == AccessMode::kStoreInLiteral;
}
// Clarifies in function signatures that a method may only be called when
// concurrent inlining is disabled.
class NotConcurrentInliningTag final {
public:
explicit NotConcurrentInliningTag(JSHeapBroker* broker);
};
enum class OddballType : uint8_t {
kNone, // Not an Oddball.
kBoolean, // True or False.
......@@ -424,13 +417,9 @@ class JSObjectRef : public JSReceiverRef {
// relaxed read. This is to ease the transition to unserialized (or
// background-serialized) elements.
base::Optional<FixedArrayBaseRef> elements(RelaxedLoadTag) const;
void SerializeElements(NotConcurrentInliningTag tag);
bool IsElementsTenured(const FixedArrayBaseRef& elements);
void SerializeObjectCreateMap(NotConcurrentInliningTag tag);
base::Optional<MapRef> GetObjectCreateMap() const;
void SerializeAsBoilerplateRecursive(NotConcurrentInliningTag tag);
};
class JSDataViewRef : public JSObjectRef {
......@@ -489,8 +478,6 @@ class RegExpBoilerplateDescriptionRef : public HeapObjectRef {
Handle<RegExpBoilerplateDescription> object() const;
void Serialize(NotConcurrentInliningTag tag);
FixedArrayRef data() const;
StringRef source() const;
int flags() const;
......@@ -577,8 +564,6 @@ class NativeContextRef : public ContextRef {
Handle<NativeContext> object() const;
void Serialize(NotConcurrentInliningTag tag);
#define DECL_ACCESSOR(type, name) type##Ref name() const;
BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR)
#undef DECL_ACCESSOR
......@@ -662,8 +647,6 @@ class AllocationSiteRef : public HeapObjectRef {
AllocationType GetAllocationType() const;
ObjectRef nested_site() const;
void SerializeRecursive(NotConcurrentInliningTag tag);
base::Optional<JSObjectRef> boilerplate() const;
ElementsKind GetElementsKind() const;
bool CanInlineCall() const;
......@@ -725,17 +708,10 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
INSTANCE_TYPE_CHECKERS(DEF_TESTER)
#undef DEF_TESTER
void SerializeBackPointer(NotConcurrentInliningTag tag);
HeapObjectRef GetBackPointer() const;
void SerializePrototype(NotConcurrentInliningTag tag);
// TODO(neis): We should be able to remove TrySerializePrototype once
// concurrent-inlining is always on. Then we can also change the return type
// of prototype() back to HeapObjectRef.
bool TrySerializePrototype(NotConcurrentInliningTag tag);
base::Optional<HeapObjectRef> prototype() const;
void SerializeForElementStore(NotConcurrentInliningTag tag);
bool HasOnlyStablePrototypesWithFastElements(
ZoneVector<MapRef>* prototype_maps);
......
......@@ -43,8 +43,7 @@ void JSHeapBroker::IncrementTracingIndentation() { ++trace_indentation_; }
void JSHeapBroker::DecrementTracingIndentation() { --trace_indentation_; }
JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
bool tracing_enabled, bool is_concurrent_inlining,
CodeKind code_kind)
bool tracing_enabled, CodeKind code_kind)
: isolate_(isolate),
#if V8_COMPRESS_POINTERS
cage_base_(isolate),
......@@ -55,7 +54,6 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
root_index_map_(isolate),
array_and_object_prototypes_(zone()),
tracing_enabled_(tracing_enabled),
is_concurrent_inlining_(is_concurrent_inlining),
code_kind_(code_kind),
feedback_(zone()),
property_access_infos_(zone()),
......@@ -710,9 +708,6 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForArrayOrObjectLiteral(
AllocationSiteRef site =
MakeRefAssumeMemoryFence(this, AllocationSite::cast(object));
if (!is_concurrent_inlining() && site.PointsToLiteral()) {
site.SerializeRecursive(NotConcurrentInliningTag{this});
}
return *zone()->New<LiteralFeedback>(site, nexus.kind());
}
......@@ -728,9 +723,6 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForRegExpLiteral(
RegExpBoilerplateDescriptionRef boilerplate = MakeRefAssumeMemoryFence(
this, RegExpBoilerplateDescription::cast(object));
if (!is_concurrent_inlining()) {
boilerplate.Serialize(NotConcurrentInliningTag{this});
}
return *zone()->New<RegExpLiteralFeedback>(boilerplate, nexus.kind());
}
......@@ -971,12 +963,10 @@ PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
AccessInfoFactory factory(this, dependencies, zone());
PropertyAccessInfo access_info =
factory.ComputePropertyAccessInfo(map, name, access_mode);
if (is_concurrent_inlining_) {
TRACE(this, "Storing PropertyAccessInfo for "
<< access_mode << " of property " << name << " on map "
<< map);
property_access_infos_.insert({target, access_info});
}
TRACE(this, "Storing PropertyAccessInfo for "
<< access_mode << " of property " << name << " on map "
<< map);
property_access_infos_.insert({target, access_info});
return access_info;
}
......@@ -989,16 +979,16 @@ MinimorphicLoadPropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
AccessInfoFactory factory(this, nullptr, zone());
MinimorphicLoadPropertyAccessInfo access_info =
factory.ComputePropertyAccessInfo(feedback);
if (is_concurrent_inlining_) {
// We can assume a memory fence on {source.vector} because in production,
// the vector has already passed the gc predicate. Unit tests create
// FeedbackSource objects directly from handles, but they run on
// the main thread.
TRACE(this, "Storing MinimorphicLoadPropertyAccessInfo for "
<< source.index() << " "
<< MakeRefAssumeMemoryFence<Object>(this, source.vector));
minimorphic_property_access_infos_.insert({source, access_info});
}
// We can assume a memory fence on {source.vector} because in production,
// the vector has already passed the gc predicate. Unit tests create
// FeedbackSource objects directly from handles, but they run on
// the main thread.
TRACE(this, "Storing MinimorphicLoadPropertyAccessInfo for "
<< source.index() << " "
<< MakeRefAssumeMemoryFence<Object>(this, source.vector));
minimorphic_property_access_infos_.insert({source, access_info});
return access_info;
}
......
......@@ -94,12 +94,12 @@ DEFINE_OPERATORS_FOR_FLAGS(GetOrCreateDataFlags)
class V8_EXPORT_PRIVATE JSHeapBroker {
public:
JSHeapBroker(Isolate* isolate, Zone* broker_zone, bool tracing_enabled,
bool is_concurrent_inlining, CodeKind code_kind);
CodeKind code_kind);
// For use only in tests, sets default values for some arguments. Avoids
// churn when new flags are added.
JSHeapBroker(Isolate* isolate, Zone* broker_zone)
: JSHeapBroker(isolate, broker_zone, FLAG_trace_heap_broker, false,
: JSHeapBroker(isolate, broker_zone, FLAG_trace_heap_broker,
CodeKind::TURBOFAN) {}
~JSHeapBroker();
......@@ -127,7 +127,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
Zone* zone() const { return zone_; }
bool tracing_enabled() const { return tracing_enabled_; }
bool is_concurrent_inlining() const { return is_concurrent_inlining_; }
bool is_turboprop() const { return code_kind_ == CodeKind::TURBOPROP; }
NexusConfig feedback_nexus_config() const {
......@@ -436,7 +435,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
array_and_object_prototypes_;
BrokerMode mode_ = kDisabled;
bool const tracing_enabled_;
bool const is_concurrent_inlining_;
CodeKind const code_kind_;
std::unique_ptr<PersistentHandles> ph_;
LocalIsolate* local_isolate_ = nullptr;
......
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/js-heap-copy-reducer.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/heap/factory-inl.h"
#include "src/objects/map.h"
#include "src/objects/scope-info.h"
#include "src/objects/template-objects.h"
namespace v8 {
namespace internal {
namespace compiler {
// In the functions below, we call the ObjectRef (or subclass) constructor in
// order to trigger serialization if not yet done.
JSHeapCopyReducer::JSHeapCopyReducer(JSHeapBroker* broker) : broker_(broker) {}
JSHeapBroker* JSHeapCopyReducer::broker() { return broker_; }
Reduction JSHeapCopyReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kHeapConstant: {
ObjectRef object = MakeRef(broker(), HeapConstantOf(node->op()));
if (object.IsJSObject()) {
object.AsJSObject().SerializeObjectCreateMap(
NotConcurrentInliningTag{broker()});
}
break;
}
case IrOpcode::kJSCreateArray: {
CreateArrayParametersOf(node->op()).site(broker());
break;
}
case IrOpcode::kJSCreateArguments: {
Node* const frame_state = NodeProperties::GetFrameStateInput(node);
FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
MakeRef(broker(), state_info.shared_info().ToHandleChecked());
break;
}
case IrOpcode::kJSCreateBlockContext: {
USE(ScopeInfoOf(broker(), node->op()));
break;
}
case IrOpcode::kJSCreateBoundFunction: {
CreateBoundFunctionParameters const& p =
CreateBoundFunctionParametersOf(node->op());
p.map(broker());
break;
}
case IrOpcode::kJSCreateCatchContext: {
USE(ScopeInfoOf(broker(), node->op()));
break;
}
case IrOpcode::kJSCreateClosure: {
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
p.shared_info(broker());
p.code(broker());
break;
}
case IrOpcode::kJSCreateEmptyLiteralArray: {
FeedbackParameter const& p = FeedbackParameterOf(node->op());
if (p.feedback().IsValid()) {
broker()->GetFeedbackForArrayOrObjectLiteral(p.feedback());
}
break;
}
/* Unary ops. */
case IrOpcode::kJSBitwiseNot:
case IrOpcode::kJSDecrement:
case IrOpcode::kJSIncrement:
case IrOpcode::kJSNegate: {
FeedbackParameter const& p = FeedbackParameterOf(node->op());
if (p.feedback().IsValid()) {
// Unary ops are treated as binary ops with respect to feedback.
broker()->GetFeedbackForBinaryOperation(p.feedback());
}
break;
}
/* Binary ops. */
case IrOpcode::kJSAdd:
case IrOpcode::kJSSubtract:
case IrOpcode::kJSMultiply:
case IrOpcode::kJSDivide:
case IrOpcode::kJSModulus:
case IrOpcode::kJSExponentiate:
case IrOpcode::kJSBitwiseOr:
case IrOpcode::kJSBitwiseXor:
case IrOpcode::kJSBitwiseAnd:
case IrOpcode::kJSShiftLeft:
case IrOpcode::kJSShiftRight:
case IrOpcode::kJSShiftRightLogical: {
FeedbackParameter const& p = FeedbackParameterOf(node->op());
if (p.feedback().IsValid()) {
broker()->GetFeedbackForBinaryOperation(p.feedback());
}
break;
}
/* Compare ops. */
case IrOpcode::kJSEqual:
case IrOpcode::kJSGreaterThan:
case IrOpcode::kJSGreaterThanOrEqual:
case IrOpcode::kJSLessThan:
case IrOpcode::kJSLessThanOrEqual:
case IrOpcode::kJSStrictEqual: {
FeedbackParameter const& p = FeedbackParameterOf(node->op());
if (p.feedback().IsValid()) {
broker()->GetFeedbackForCompareOperation(p.feedback());
}
break;
}
case IrOpcode::kJSCreateFunctionContext: {
CreateFunctionContextParameters const& p =
CreateFunctionContextParametersOf(node->op());
p.scope_info(broker());
break;
}
case IrOpcode::kJSCreateLiteralArray:
case IrOpcode::kJSCreateLiteralObject: {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
if (p.feedback().IsValid()) {
broker()->GetFeedbackForArrayOrObjectLiteral(p.feedback());
}
break;
}
case IrOpcode::kJSCreateLiteralRegExp: {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
if (p.feedback().IsValid()) {
broker()->GetFeedbackForRegExpLiteral(p.feedback());
}
break;
}
case IrOpcode::kJSGetTemplateObject: {
GetTemplateObjectParameters const& p =
GetTemplateObjectParametersOf(node->op());
p.shared(broker());
p.description(broker());
broker()->GetFeedbackForTemplateObject(p.feedback());
break;
}
case IrOpcode::kJSCreateWithContext: {
USE(ScopeInfoOf(broker(), node->op()));
break;
}
case IrOpcode::kJSLoadNamed: {
NamedAccess const& p = NamedAccessOf(node->op());
NameRef name = p.name(broker());
if (p.feedback().IsValid()) {
broker()->GetFeedbackForPropertyAccess(p.feedback(), AccessMode::kLoad,
name);
}
break;
}
case IrOpcode::kJSLoadNamedFromSuper: {
NamedAccess const& p = NamedAccessOf(node->op());
NameRef name = p.name(broker());
if (p.feedback().IsValid()) {
broker()->GetFeedbackForPropertyAccess(p.feedback(), AccessMode::kLoad,
name);
}
break;
}
case IrOpcode::kJSStoreNamed: {
NamedAccess const& p = NamedAccessOf(node->op());
p.name(broker());
break;
}
case IrOpcode::kStoreField:
case IrOpcode::kLoadField: {
FieldAccess access = FieldAccessOf(node->op());
Handle<Map> map_handle;
if (access.map.ToHandle(&map_handle)) {
MakeRef(broker(), map_handle);
}
Handle<Name> name_handle;
if (access.name.ToHandle(&name_handle)) {
MakeRef(broker(), name_handle);
}
break;
}
case IrOpcode::kMapGuard: {
ZoneHandleSet<Map> const& maps = MapGuardMapsOf(node->op());
for (Handle<Map> map : maps) {
MakeRef(broker(), map);
}
break;
}
case IrOpcode::kCheckMaps: {
ZoneHandleSet<Map> const& maps = CheckMapsParametersOf(node->op()).maps();
for (Handle<Map> map : maps) {
MakeRef(broker(), map);
}
break;
}
case IrOpcode::kCompareMaps: {
ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op());
for (Handle<Map> map : maps) {
MakeRef(broker(), map);
}
break;
}
case IrOpcode::kJSLoadProperty: {
PropertyAccess const& p = PropertyAccessOf(node->op());
AccessMode access_mode = AccessMode::kLoad;
if (p.feedback().IsValid()) {
broker()->GetFeedbackForPropertyAccess(p.feedback(), access_mode,
base::nullopt);
}
break;
}
default:
break;
}
return NoChange();
}
} // namespace compiler
} // namespace internal
} // namespace v8
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_JS_HEAP_COPY_REDUCER_H_
#define V8_COMPILER_JS_HEAP_COPY_REDUCER_H_
#include "src/compiler/graph-reducer.h"
namespace v8 {
namespace internal {
namespace compiler {
class JSHeapBroker;
// The heap copy reducer makes sure that the relevant heap data referenced
// by handles embedded in the graph is copied to the heap broker.
// TODO(jarin) This is just a temporary solution until the graph uses only
// ObjetRef-derived reference to refer to the heap data.
class V8_EXPORT_PRIVATE JSHeapCopyReducer : public Reducer {
public:
explicit JSHeapCopyReducer(JSHeapBroker* broker);
const char* reducer_name() const override { return "JSHeapCopyReducer"; }
Reduction Reduce(Node* node) override;
private:
JSHeapBroker* broker();
JSHeapBroker* broker_;
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_JS_HEAP_COPY_REDUCER_H_
......@@ -533,24 +533,18 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// always hold true.
CHECK(shared_info->is_compiled());
if (info_->source_positions()) {
if (broker()->is_concurrent_inlining()) {
if (!shared_info->object()->AreSourcePositionsAvailable(
broker()->local_isolate_or_isolate())) {
// This case is expected to be very rare, since we generate source
// positions for all functions when debugging or profiling are turned
// on (see Isolate::NeedsDetailedOptimizedCodeLineInfo). Source
// positions should only be missing here if there is a race between 1)
// enabling/disabling the debugger/profiler, and 2) this compile job.
// In that case, we simply don't inline.
TRACE("Not inlining " << *shared_info << " into " << outer_shared_info
<< " because source positions are missing.");
return NoChange();
}
} else {
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate(),
shared_info->object());
}
if (info_->source_positions() &&
!shared_info->object()->AreSourcePositionsAvailable(
broker()->local_isolate_or_isolate())) {
// This case is expected to be very rare, since we generate source
// positions for all functions when debugging or profiling are turned
// on (see Isolate::NeedsDetailedOptimizedCodeLineInfo). Source
// positions should only be missing here if there is a race between 1)
// enabling/disabling the debugger/profiler, and 2) this compile job.
// In that case, we simply don't inline.
TRACE("Not inlining " << *shared_info << " into " << outer_shared_info
<< " because source positions are missing.");
return NoChange();
}
// Determine the target's feedback vector and its context.
......
......@@ -49,7 +49,6 @@
#include "src/compiler/js-create-lowering.h"
#include "src/compiler/js-generic-lowering.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-heap-copy-reducer.h"
#include "src/compiler/js-inlining-heuristic.h"
#include "src/compiler/js-intrinsic-lowering.h"
#include "src/compiler/js-native-context-specialization.h"
......@@ -157,9 +156,9 @@ class PipelineData {
instruction_zone_(instruction_zone_scope_.zone()),
codegen_zone_scope_(zone_stats_, kCodegenZoneName),
codegen_zone_(codegen_zone_scope_.zone()),
broker_(new JSHeapBroker(
isolate_, info_->zone(), info_->trace_heap_broker(),
info_->concurrent_inlining(), info->code_kind())),
broker_(new JSHeapBroker(isolate_, info_->zone(),
info_->trace_heap_broker(),
info->code_kind())),
register_allocation_zone_scope_(zone_stats_,
kRegisterAllocationZoneName),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
......@@ -1200,17 +1199,8 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
pipeline_.InitializeHeapBroker();
if (!data_.broker()->is_concurrent_inlining()) {
if (!pipeline_.CreateGraph()) {
CHECK(!isolate->has_pending_exception());
return AbortOptimization(BailoutReason::kGraphBuildingFailed);
}
}
if (compilation_info()->concurrent_inlining()) {
// Serialization may have allocated.
isolate->heap()->PublishPendingAllocations();
}
// Serialization may have allocated.
isolate->heap()->PublishPendingAllocations();
return SUCCEEDED;
}
......@@ -1223,10 +1213,8 @@ PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl(
LocalIsolateScope local_isolate_scope(data_.broker(), data_.info(),
local_isolate);
if (data_.broker()->is_concurrent_inlining()) {
if (!pipeline_.CreateGraph()) {
return AbortOptimization(BailoutReason::kGraphBuildingFailed);
}
if (!pipeline_.CreateGraph()) {
return AbortOptimization(BailoutReason::kGraphBuildingFailed);
}
// We selectively Unpark inside OptimizeGraph*.
......@@ -1514,24 +1502,6 @@ struct HeapBrokerInitializationPhase {
}
};
struct CopyMetadataForConcurrentCompilePhase {
DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(SerializeMetadata)
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
JSHeapCopyReducer heap_copy_reducer(data->broker());
AddReducer(data, &graph_reducer, &heap_copy_reducer);
graph_reducer.ReduceGraph();
// Some nodes that are no longer in the graph might still be in the cache.
NodeVector cached_nodes(temp_zone);
data->jsgraph()->GetCachedNodes(&cached_nodes);
for (Node* const node : cached_nodes) graph_reducer.ReduceNode(node);
}
};
struct TypedLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTS(TypedLowering)
......@@ -2693,10 +2663,8 @@ void PipelineImpl::InitializeHeapBroker() {
}
data->broker()->SetTargetNativeContextRef(data->native_context());
if (data->broker()->is_concurrent_inlining()) {
Run<HeapBrokerInitializationPhase>();
data->broker()->StopSerializing();
}
Run<HeapBrokerInitializationPhase>();
data->broker()->StopSerializing();
data->EndPhaseKind();
}
......@@ -2728,15 +2696,6 @@ bool PipelineImpl::CreateGraph() {
}
}
// Run the type-sensitive lowerings and optimizations on the graph.
{
if (!data->broker()->is_concurrent_inlining()) {
Run<HeapBrokerInitializationPhase>();
Run<CopyMetadataForConcurrentCompilePhase>();
data->broker()->StopSerializing();
}
}
data->EndPhaseKind();
return true;
......@@ -3391,20 +3350,12 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
CanonicalHandleScope canonical(isolate, info);
info->ReopenHandlesInNewHandleScope(isolate);
pipeline.InitializeHeapBroker();
// Emulating the proper pipeline, we call CreateGraph on different places
// (i.e before or after creating a LocalIsolateScope) depending on
// is_concurrent_inlining.
if (!data.broker()->is_concurrent_inlining()) {
if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
}
}
{
LocalIsolateScope local_isolate_scope(data.broker(), info,
isolate->main_thread_local_isolate());
if (data.broker()->is_concurrent_inlining()) {
if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
}
if (!pipeline.CreateGraph()) return MaybeHandle<Code>();
// We selectively Unpark inside OptimizeGraph.
if (!pipeline.OptimizeGraph(&linkage)) return MaybeHandle<Code>();
......
......@@ -434,7 +434,6 @@ DEFINE_NEG_IMPLICATION(enable_third_party_heap, inline_new)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, allocation_site_pretenuring)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, turbo_allocation_folding)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, concurrent_recompilation)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, concurrent_inlining)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, script_streaming)
DEFINE_NEG_IMPLICATION(enable_third_party_heap,
parallel_compile_tasks_for_eager_toplevel)
......@@ -520,7 +519,6 @@ DEFINE_WEAK_VALUE_IMPLICATION(future, write_protect_code_memory, false)
DEFINE_BOOL_READONLY(dict_property_const_tracking,
V8_DICT_PROPERTY_CONST_TRACKING_BOOL,
"Use const tracking on dictionary properties")
DEFINE_NEG_IMPLICATION(dict_property_const_tracking, concurrent_inlining)
DEFINE_NEG_IMPLICATION(dict_property_const_tracking, turboprop)
// Flags for jitless
......@@ -548,7 +546,6 @@ DEFINE_BOOL(assert_types, false,
"generate runtime type assertions to test the typer")
// TODO(tebbi): Support allocating types from background thread.
DEFINE_NEG_IMPLICATION(assert_types, concurrent_recompilation)
DEFINE_NEG_IMPLICATION(assert_types, concurrent_inlining)
DEFINE_BOOL(trace_compilation_dependencies, false, "trace code dependencies")
// Depend on --trace-deopt-verbose for reporting dependency invalidations.
......@@ -754,12 +751,13 @@ DEFINE_INT(concurrent_recompilation_queue_length, 8,
"the length of the concurrent compilation queue")
DEFINE_INT(concurrent_recompilation_delay, 0,
"artificial compilation delay in ms")
DEFINE_BOOL(concurrent_inlining, true,
"run optimizing compiler's inlining phase on a separate thread")
// TODO(v8:12142): Remove this flag once all references (chromium feature flag,
// finch trials, field trial configs) are gone.
DEFINE_BOOL(concurrent_inlining, true, "deprecated, does nothing")
DEFINE_BOOL(
stress_concurrent_inlining, false,
"create additional concurrent optimization jobs but throw away result")
DEFINE_IMPLICATION(stress_concurrent_inlining, concurrent_inlining)
DEFINE_IMPLICATION(stress_concurrent_inlining, concurrent_recompilation)
DEFINE_NEG_IMPLICATION(stress_concurrent_inlining, lazy_feedback_allocation)
DEFINE_WEAK_VALUE_IMPLICATION(stress_concurrent_inlining, interrupt_budget,
15 * KB)
......
......@@ -92,7 +92,6 @@ class BackgroundCompilationThread final : public v8::base::Thread {
TEST(TestConcurrentSharedFunctionInfo) {
FlagScope<bool> allow_natives_syntax(&i::FLAG_allow_natives_syntax, true);
FlagScope<bool> concurrent_inlining(&i::FLAG_concurrent_inlining, true);
HandleAndZoneScope scope;
Isolate* isolate = scope.main_isolate();
......
......@@ -6,7 +6,6 @@
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-heap-copy-reducer.h"
#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
......@@ -92,8 +91,6 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
}
Node* reduce(Node* node) {
JSHeapCopyReducer heap_copy_reducer(&js_heap_broker);
CHECK(!heap_copy_reducer.Reduce(node).Changed());
JSGraph jsgraph(main_isolate(), &graph, &common, &javascript, &simplified,
&machine);
GraphReducer graph_reducer(main_zone(), &graph, &tick_counter,
......
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax --opt --noalways-opt --noturboprop
// Flags: --allow-natives-syntax --concurrent-inlining
// Flags: --opt --noalways-opt --noturboprop
class C {};
const c = new C;
const getPrototypeOf = Object.getPrototypeOf;
......
......@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --interrupt-budget=1024 --concurrent-inlining
// Flags: --interrupt-budget=1024
const v2 = {};
const v4 = {a:42};
......
......@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --concurrent-inlining
// Flags: --allow-natives-syntax
(function() {
var use_symbol = {
......
......@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --expose-gc --noincremental-marking --no-concurrent-inlining
// Flags: --expose-gc --noincremental-marking --no-concurrent-recompilation
let cleanup_called = false;
function cleanup(holdings) {
......
......@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --concurrent-inlining --no-use-ic --super-ic
// Flags: --allow-natives-syntax --no-use-ic --super-ic
class A {
bar() { }
......
......@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax --concurrent-inlining
// Flags: --allow-natives-syntax
function bar(error) {
try {
......
......@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --concurrent-inlining
// Flags: --allow-natives-syntax
function bar() {
arr = new Array(4);
......
......@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --expose-gc --opt --no-concurrent-inlining
// Flags: --allow-natives-syntax --expose-gc --opt --no-concurrent-recompilation
// Flags: --no-stress-opt --no-always-opt --no-assert-types
// This weak ref is for checking whether the closure-allocated object o got
......
......@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --expose-gc --allow-natives-syntax
// Flags: --concurrent-inlining --function-context-specialization
// Flags: --expose-gc --allow-natives-syntax --function-context-specialization
function main() {
var obj = {};
......
......@@ -4,7 +4,6 @@
#include "test/unittests/compiler/graph-unittest.h"
#include "src/compiler/js-heap-copy-reducer.h"
#include "src/compiler/node-properties.h"
#include "src/heap/factory.h"
#include "src/objects/objects-inl.h" // TODO(everyone): Make typer.h IWYU compliant.
......
......@@ -8,7 +8,6 @@
#include "src/compiler/access-builder.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-copy-reducer.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
......@@ -46,8 +45,6 @@ class JSTypedLoweringTest : public TypedGraphTest {
protected:
Reduction Reduce(Node* node) {
JSHeapCopyReducer heap_copy_reducer(broker());
CHECK(!heap_copy_reducer.Reduce(node).Changed());
MachineOperatorBuilder machine(zone());
SimplifiedOperatorBuilder simplified(zone());
JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
......
......@@ -17,8 +17,6 @@ ALL_VARIANT_FLAGS = {
"sparkplug": [["--sparkplug"]],
"always_sparkplug": [[ "--always-sparkplug", "--sparkplug"]],
"minor_mc": [["--minor-mc"]],
"no_concurrent_inlining": [["--no-concurrent-inlining",
"--no-stress-concurrent-inlining"]],
"no_lfa": [["--no-lazy-feedback-allocation"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall
# would not force optimization too. It turns into a Nop. Please see
......@@ -31,8 +29,7 @@ ALL_VARIANT_FLAGS = {
"stress": [["--stress-opt", "--no-liftoff", "--stress-lazy-source-positions",
"--no-wasm-generic-wrapper"]],
"stress_concurrent_allocation": [["--stress-concurrent-allocation"]],
"stress_concurrent_inlining": [["--stress-concurrent-inlining",
"--concurrent-inlining"]],
"stress_concurrent_inlining": [["--stress-concurrent-inlining"]],
"stress_js_bg_compile_wasm_code_gc": [["--stress-background-compile",
"--stress-wasm-code-gc"]],
"stress_incremental_marking": [["--stress-incremental-marking"]],
......@@ -62,7 +59,8 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = {
"stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"],
"stress_concurrent_inlining": ["--single-threaded", "--predictable",
"--turboprop", "--lazy-feedback-allocation",
"--assert-types"],
"--assert-types",
"--no-concurrent-recompilation"],
"turboprop": ["--stress_concurrent_inlining"],
# The fast API tests initialize an embedder object that never needs to be
# serialized to the snapshot, so we don't have a
......@@ -82,7 +80,7 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = {
# There is a negative implication: --perf-prof disables
# --wasm-write-protect-code-memory.
"wasm_write_protect_code": ["--perf-prof"],
"assert_types": ["--concurrent-recompilation", "--concurrent-inlining", "--stress_concurrent_inlining", "--no-assert-types"],
"assert_types": ["--concurrent-recompilation", "--stress_concurrent_inlining", "--no-assert-types"],
}
# Flags that lead to a contradiction under certain build variables.
......@@ -100,7 +98,6 @@ INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE = {
"--stress-concurrent-allocation",
"--stress-concurrent-inlining"],
"dict_property_const_tracking": [
"--concurrent-inlining",
"--turboprop",
"--stress-concurrent-inlining"],
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment