Commit 204dc4aa authored by Jakob Gruber's avatar Jakob Gruber Committed by V8 LUCI CQ

[compiler] Remove SerializerForBackgroundCompilation

Based on a CL by mvstanton@.

Bug: v8:7790,v8:12030,v8:12031,v8:12041
Change-Id: I58b75bd96c724a99133bec7d3bd6cf4e0c9be6d4
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3059683Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#76055}
parent c8dd61cb
......@@ -2441,9 +2441,6 @@ filegroup(
"src/compiler/scheduler.h",
"src/compiler/select-lowering.cc",
"src/compiler/select-lowering.h",
"src/compiler/serializer-for-background-compilation.cc",
"src/compiler/serializer-for-background-compilation.h",
"src/compiler/serializer-hints.h",
"src/compiler/simplified-lowering.cc",
"src/compiler/simplified-lowering.h",
"src/compiler/simplified-operator.cc",
......
......@@ -2603,8 +2603,6 @@ v8_header_set("v8_internal_headers") {
"src/compiler/schedule.h",
"src/compiler/scheduler.h",
"src/compiler/select-lowering.h",
"src/compiler/serializer-for-background-compilation.h",
"src/compiler/serializer-hints.h",
"src/compiler/simplified-lowering.h",
"src/compiler/simplified-operator-reducer.h",
"src/compiler/simplified-operator.h",
......@@ -3611,7 +3609,6 @@ v8_compiler_sources = [
"src/compiler/schedule.cc",
"src/compiler/scheduler.cc",
"src/compiler/select-lowering.cc",
"src/compiler/serializer-for-background-compilation.cc",
"src/compiler/simplified-lowering.cc",
"src/compiler/simplified-operator-reducer.cc",
"src/compiler/simplified-operator.cc",
......
......@@ -4562,8 +4562,6 @@ void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
BytecodeGraphBuilderFlags flags,
TickCounter* tick_counter,
ObserveNodeInfo const& observe_node_info) {
DCHECK(broker->IsSerializedForCompilation(
shared_info, feedback_cell.value()->AsFeedbackVector()));
BytecodeGraphBuilder builder(
broker, local_zone, broker->target_native_context(), shared_info,
feedback_cell, osr_offset, jsgraph, invocation_frequency,
......
......@@ -34,10 +34,6 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
V8_WARN_UNUSED_RESULT bool Commit(Handle<Code> code);
// TODO(jgruber): Remove this method once GetPropertyAccessInfo no longer
// uses the two-phase approach between serialization and compilation.
void ClearForConcurrentGetPropertyAccessInfo() { dependencies_.clear(); }
// Return the initial map of {function} and record the assumption that it
// stays the initial map.
MapRef DependOnInitialMap(const JSFunctionRef& function);
......
......@@ -1660,10 +1660,7 @@ bool ObjectRef::equals(const ObjectRef& other) const {
data_->used_status = ObjectData::Usage::kOnlyIdentityUsed;
}
#endif // DEBUG
// TODO(jgruber): Consider going back to reference-equality on data_ once
// ObjectData objects are guaranteed to be canonicalized (see also:
// ClearReconstructibleData).
return data_->object().is_identical_to(other.data_->object());
return data_ == other.data_;
}
Isolate* ObjectRef::isolate() const { return broker()->isolate(); }
......@@ -1783,11 +1780,11 @@ void JSHeapBroker::InitializeAndStartSerializing() {
CollectArrayAndObjectPrototypes();
Factory* const f = isolate()->factory();
SetTargetNativeContextRef(target_native_context().object());
if (!is_concurrent_inlining()) {
target_native_context().Serialize(NotConcurrentInliningTag{this});
Factory* const f = isolate()->factory();
ObjectData* data;
data = GetOrCreateData(f->array_buffer_detaching_protector());
if (!data->should_access_heap()) {
......@@ -1825,12 +1822,11 @@ void JSHeapBroker::InitializeAndStartSerializing() {
if (!data->should_access_heap()) {
data->AsPropertyCell()->Cache(this);
}
GetOrCreateData(f->many_closures_cell());
GetOrCreateData(CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
ArgvMode::kStack, true));
TRACE(this, "Finished serializing standard objects");
}
GetOrCreateData(f->many_closures_cell());
GetOrCreateData(CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
ArgvMode::kStack, true));
TRACE(this, "Finished serializing standard objects");
}
namespace {
......@@ -1871,25 +1867,6 @@ struct CreateDataFunctor<RefSerializationKind::kNeverSerialized, ObjectData,
} // namespace
void JSHeapBroker::ClearReconstructibleData() {
RefsMap::Entry* p = refs_->Start();
while (p != nullptr) {
Address key = p->key;
ObjectData* value = p->value;
p = refs_->Next(p);
if (value->IsMap() &&
value->kind() == ObjectDataKind::kBackgroundSerializedHeapObject) {
CHECK(!value->AsMap()->has_extra_serialized_data());
}
if (value->IsJSObject() &&
value->kind() == ObjectDataKind::kBackgroundSerializedHeapObject) {
CHECK(!value->AsJSObject()->has_extra_serialized_data());
}
// Can be reconstructed from the background thread.
CHECK_NOT_NULL(refs_->Remove(key));
}
}
ObjectData* JSHeapBroker::TryGetOrCreateData(Handle<Object> object,
GetOrCreateDataFlags flags) {
RefsMap::Entry* entry = refs_->Lookup(object.address());
......@@ -2078,9 +2055,8 @@ OddballType MapRef::oddball_type() const {
}
FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const {
// These should all be available because we request the cell for each
// CreateClosure bytecode.
return MakeRef(broker(), object()->closure_feedback_cell(index));
return MakeRefAssumeMemoryFence(broker(),
object()->closure_feedback_cell(index));
}
base::Optional<ObjectRef> JSObjectRef::raw_properties_or_hash() const {
......@@ -3303,11 +3279,7 @@ ScopeInfoRef SharedFunctionInfoRef::scope_info() const {
}
void JSObjectRef::SerializeObjectCreateMap(NotConcurrentInliningTag tag) {
if (data_->should_access_heap()) {
return;
}
CHECK_IMPLIES(!FLAG_turbo_concurrent_get_property_access_info,
broker()->mode() == JSHeapBroker::kSerializing);
if (data_->should_access_heap()) return;
data()->AsJSObject()->SerializeObjectCreateMap(broker(), tag);
}
......
......@@ -518,6 +518,7 @@ class ContextRef : public HeapObjectRef {
V(JSFunction, symbol_function) \
V(JSGlobalObject, global_object) \
V(JSGlobalProxy, global_proxy_object) \
V(JSObject, initial_array_prototype) \
V(JSObject, promise_prototype) \
V(Map, async_function_object_map) \
V(Map, block_context_map) \
......
......@@ -7993,10 +7993,7 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
access_infos.push_back(broker()->GetPropertyAccessInfo(
MakeRef(broker(), map),
MakeRef(broker(), isolate()->factory()->exec_string()),
AccessMode::kLoad, dependencies(),
broker()->is_concurrent_inlining()
? SerializationPolicy::kAssumeSerialized
: SerializationPolicy::kSerializeIfNeeded));
AccessMode::kLoad, dependencies()));
}
PropertyAccessInfo ai_exec =
......
......@@ -239,6 +239,7 @@ namespace {
// some cases - unlike the full builtin, the megamorphic builtin does fewer
// checks and does not collect feedback.
bool ShouldUseMegamorphicLoadBuiltin(FeedbackSource const& source,
base::Optional<NameRef> name,
JSHeapBroker* broker) {
if (broker->is_native_context_independent()) {
// The decision to use the megamorphic load builtin is made based on
......@@ -247,7 +248,8 @@ bool ShouldUseMegamorphicLoadBuiltin(FeedbackSource const& source,
return false;
}
ProcessedFeedback const& feedback = broker->GetFeedback(source);
ProcessedFeedback const& feedback =
broker->GetFeedbackForPropertyAccess(source, AccessMode::kLoad, name);
if (feedback.kind() == ProcessedFeedback::kElementAccess) {
return feedback.AsElementAccess().transition_groups().empty();
......@@ -263,6 +265,7 @@ bool ShouldUseMegamorphicLoadBuiltin(FeedbackSource const& source,
}
UNREACHABLE();
}
} // namespace
void JSGenericLowering::LowerJSHasProperty(Node* node) {
......@@ -290,14 +293,14 @@ void JSGenericLowering::LowerJSLoadProperty(Node* node) {
n->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithBuiltinCall(
node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), {}, broker())
? Builtin::kKeyedLoadICTrampoline_Megamorphic
: Builtin::kKeyedLoadICTrampoline);
} else {
n->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithBuiltinCall(
node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), {}, broker())
? Builtin::kKeyedLoadIC_Megamorphic
: Builtin::kKeyedLoadIC);
}
......@@ -319,7 +322,8 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithBuiltinCall(
node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
node, ShouldUseMegamorphicLoadBuiltin(
p.feedback(), MakeRef(broker(), p.name()), broker())
? Builtin::kLoadICTrampoline_Megamorphic
: Builtin::kLoadICTrampoline);
} else {
......@@ -327,7 +331,8 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithBuiltinCall(
node, ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
node, ShouldUseMegamorphicLoadBuiltin(
p.feedback(), MakeRef(broker(), p.name()), broker())
? Builtin::kLoadIC_Megamorphic
: Builtin::kLoadIC);
}
......
......@@ -54,8 +54,7 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
feedback_(zone()),
property_access_infos_(zone()),
minimorphic_property_access_infos_(zone()),
typed_array_string_tags_(zone()),
serialized_functions_(zone()) {
typed_array_string_tags_(zone()) {
// Note that this initialization of {refs_} with the minimal initial capacity
// is redundant in the normal use case (concurrent compilation enabled,
// standard objects to be serialized), as the map is going to be replaced
......@@ -170,40 +169,6 @@ StringRef JSHeapBroker::GetTypedArrayStringTag(ElementsKind kind) {
}
}
bool JSHeapBroker::ShouldBeSerializedForCompilation(
const SharedFunctionInfoRef& shared, const FeedbackVectorRef& feedback,
const HintsVector& arguments) const {
if (serialized_functions_.size() >= kMaxSerializedFunctionsCacheSize) {
TRACE_BROKER_MISSING(this,
"opportunity - serialized functions cache is full.");
return false;
}
SerializedFunction function{shared, feedback};
auto matching_functions = serialized_functions_.equal_range(function);
return std::find_if(matching_functions.first, matching_functions.second,
[&arguments](const auto& entry) {
return entry.second == arguments;
}) == matching_functions.second;
}
void JSHeapBroker::SetSerializedForCompilation(
const SharedFunctionInfoRef& shared, const FeedbackVectorRef& feedback,
const HintsVector& arguments) {
SerializedFunction function{shared, feedback};
serialized_functions_.insert({function, arguments});
TRACE(this, "Set function " << shared << " with " << feedback
<< " as serialized for compilation");
}
bool JSHeapBroker::IsSerializedForCompilation(
const SharedFunctionInfoRef& shared,
const FeedbackVectorRef& feedback) const {
if (mode() == kDisabled) return true;
SerializedFunction function = {shared, feedback};
return serialized_functions_.find(function) != serialized_functions_.end();
}
bool JSHeapBroker::IsArrayOrObjectPrototype(const JSObjectRef& object) const {
return IsArrayOrObjectPrototype(object.object());
}
......@@ -468,7 +433,7 @@ bool ElementAccessFeedback::HasOnlyStringMaps(JSHeapBroker* broker) const {
MinimorphicLoadPropertyAccessFeedback::MinimorphicLoadPropertyAccessFeedback(
NameRef const& name, FeedbackSlotKind slot_kind, Handle<Object> handler,
ZoneVector<Handle<Map>> const& maps, bool has_migration_target_maps)
ZoneVector<MapRef> const& maps, bool has_migration_target_maps)
: ProcessedFeedback(kMinimorphicPropertyAccess, slot_kind),
name_(name),
handler_(handler),
......@@ -478,7 +443,7 @@ MinimorphicLoadPropertyAccessFeedback::MinimorphicLoadPropertyAccessFeedback(
}
NamedAccessFeedback::NamedAccessFeedback(NameRef const& name,
ZoneVector<Handle<Map>> const& maps,
ZoneVector<MapRef> const& maps,
FeedbackSlotKind slot_kind)
: ProcessedFeedback(kNamedAccess, slot_kind), name_(name), maps_(maps) {
DCHECK(IsLoadICKind(slot_kind) || IsStoreICKind(slot_kind) ||
......@@ -510,46 +475,24 @@ ProcessedFeedback const& JSHeapBroker::GetFeedback(
FeedbackSlotKind JSHeapBroker::GetFeedbackSlotKind(
FeedbackSource const& source) const {
if (is_concurrent_inlining_) {
ProcessedFeedback const& processed = GetFeedback(source);
return processed.slot_kind();
}
if (HasFeedback(source)) return GetFeedback(source).slot_kind();
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
return nexus.kind();
}
bool JSHeapBroker::FeedbackIsInsufficient(FeedbackSource const& source) const {
return is_concurrent_inlining_ ? GetFeedback(source).IsInsufficient()
: FeedbackNexus(source.vector, source.slot,
feedback_nexus_config())
.IsUninitialized();
if (HasFeedback(source)) return GetFeedback(source).IsInsufficient();
return FeedbackNexus(source.vector, source.slot, feedback_nexus_config())
.IsUninitialized();
}
namespace {
// Update deprecated maps, drop unupdatable ones and abandoned prototype maps.
void FilterRelevantReceiverMaps(Isolate* isolate, MapHandles* maps) {
auto in = maps->begin();
auto out = in;
auto end = maps->end();
for (; in != end; ++in) {
Handle<Map> map = *in;
if (Map::TryUpdate(isolate, map).ToHandle(&map) &&
!map->is_abandoned_prototype_map()) {
DCHECK(!map->is_deprecated());
*out = map;
++out;
}
}
// Remove everything between the last valid map and the end of the vector.
maps->erase(out, end);
}
using MapRefAndHandler = std::pair<MapRef, MaybeObjectHandle>;
MaybeObjectHandle TryGetMinimorphicHandler(
std::vector<MapAndHandler> const& maps_and_handlers, FeedbackSlotKind kind,
Handle<NativeContext> native_context, bool is_turboprop) {
ZoneVector<MapRefAndHandler> const& maps_and_handlers,
FeedbackSlotKind kind, NativeContextRef const& native_context,
bool is_turboprop) {
if (!is_turboprop || !FLAG_turbo_dynamic_map_checks || !IsLoadICKind(kind)) {
return MaybeObjectHandle();
}
......@@ -560,14 +503,14 @@ MaybeObjectHandle TryGetMinimorphicHandler(
// polymorphic loads currently we don't inline the builtins even without
// dynamic map checks.
if (maps_and_handlers.size() == 1 &&
*maps_and_handlers[0].first ==
native_context->initial_array_prototype().map()) {
maps_and_handlers[0].first.equals(
native_context.initial_array_prototype().map())) {
return MaybeObjectHandle();
}
MaybeObjectHandle initial_handler;
for (MapAndHandler map_and_handler : maps_and_handlers) {
auto map = map_and_handler.first;
for (const MapRefAndHandler& map_and_handler : maps_and_handlers) {
MapRef map = map_and_handler.first;
MaybeObjectHandle handler = map_and_handler.second;
if (handler.is_null()) return MaybeObjectHandle();
DCHECK(!handler->IsCleared());
......@@ -577,7 +520,7 @@ MaybeObjectHandle TryGetMinimorphicHandler(
LoadHandler::Kind::kField) {
return MaybeObjectHandle();
}
CHECK(!map->IsJSGlobalProxyMap());
CHECK(!map.object()->IsJSGlobalProxyMap());
if (initial_handler.is_null()) {
initial_handler = handler;
} else if (!handler.is_identical_to(initial_handler)) {
......@@ -587,9 +530,9 @@ MaybeObjectHandle TryGetMinimorphicHandler(
return initial_handler;
}
bool HasMigrationTargets(const MapHandles& maps) {
for (Handle<Map> map : maps) {
if (map->is_migration_target()) return true;
bool HasMigrationTargets(const ZoneVector<MapRef>& maps) {
for (const MapRef& map : maps) {
if (map.is_migration_target()) return true;
}
return false;
}
......@@ -614,27 +557,35 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
FeedbackSlotKind kind = nexus.kind();
if (!CanUseFeedback(nexus)) return NewInsufficientFeedback(kind);
std::vector<MapAndHandler> maps_and_handlers;
nexus.ExtractMapsAndFeedback(&maps_and_handlers);
MapHandles maps;
for (auto const& entry : maps_and_handlers) {
maps.push_back(entry.first);
ZoneVector<MapRefAndHandler> maps_and_handlers(zone());
ZoneVector<MapRef> maps(zone());
{
std::vector<MapAndHandler> maps_and_handlers_unfiltered;
nexus.ExtractMapsAndFeedback(&maps_and_handlers_unfiltered);
for (const MapAndHandler& map_and_handler : maps_and_handlers_unfiltered) {
MapRef map = MakeRefAssumeMemoryFence(this, *map_and_handler.first);
// May change concurrently at any time - must be guarded by a dependency
// if non-deprecation is important.
// TODO(jgruber): Consider replaying transitions on deprecated maps (see
// Map::TryUpdate).
if (map.is_deprecated()) continue;
if (map.is_abandoned_prototype_map()) continue;
maps_and_handlers.push_back({map, map_and_handler.second});
maps.push_back(map);
}
}
base::Optional<NameRef> name =
static_name.has_value() ? static_name : GetNameFeedback(nexus);
MaybeObjectHandle handler = TryGetMinimorphicHandler(
maps_and_handlers, kind, target_native_context().object(),
is_turboprop());
maps_and_handlers, kind, target_native_context(), is_turboprop());
if (!handler.is_null()) {
return *zone()->New<MinimorphicLoadPropertyAccessFeedback>(
*name, kind, handler.object(),
ZoneVector<Handle<Map>>(maps.begin(), maps.end(), zone()),
*name, kind, CanonicalPersistentHandle(handler.object()), maps,
HasMigrationTargets(maps));
}
FilterRelevantReceiverMaps(isolate(), &maps);
// If no maps were found for a non-megamorphic access, then our maps died
// and we should soft-deopt.
if (maps.empty() && nexus.ic_state() != MEGAMORPHIC) {
......@@ -644,8 +595,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
if (name.has_value()) {
// We rely on this invariant in JSGenericLowering.
DCHECK_IMPLIES(maps.empty(), nexus.ic_state() == MEGAMORPHIC);
return *zone()->New<NamedAccessFeedback>(
*name, ZoneVector<Handle<Map>>(maps.begin(), maps.end(), zone()), kind);
return *zone()->New<NamedAccessFeedback>(*name, maps, kind);
} else if (nexus.GetKeyType() == ELEMENT && !maps.empty()) {
return ProcessFeedbackMapsForElementAccess(
maps, KeyedAccessMode::FromNexus(nexus), kind);
......@@ -661,7 +611,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForGlobalAccess(
FeedbackSource const& source) {
FeedbackNexus nexus(source.vector, source.slot);
FeedbackNexus nexus(source.vector, source.slot, feedback_nexus_config());
DCHECK(nexus.kind() == FeedbackSlotKind::kLoadGlobalInsideTypeof ||
nexus.kind() == FeedbackSlotKind::kLoadGlobalNotInsideTypeof ||
nexus.kind() == FeedbackSlotKind::kStoreGlobalSloppy ||
......@@ -671,42 +621,35 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForGlobalAccess(
return *zone()->New<GlobalAccessFeedback>(nexus.kind());
}
Handle<Object> feedback_value(nexus.GetFeedback()->GetHeapObjectOrSmi(),
isolate());
Handle<Object> feedback_value =
CanonicalPersistentHandle(nexus.GetFeedback()->GetHeapObjectOrSmi());
if (feedback_value->IsSmi()) {
// The wanted name belongs to a script-scope variable and the feedback
// tells us where to find its value.
int number = feedback_value->Number();
int const number = feedback_value->Number();
int const script_context_index =
FeedbackNexus::ContextIndexBits::decode(number);
int const context_slot_index = FeedbackNexus::SlotIndexBits::decode(number);
bool const immutable = FeedbackNexus::ImmutabilityBit::decode(number);
Handle<Context> context = ScriptContextTable::GetContext(
isolate(), target_native_context().script_context_table().object(),
script_context_index);
{
ObjectRef contents =
MakeRef(this, handle(context->get(context_slot_index), isolate()));
CHECK(!contents.equals(
MakeRef<Object>(this, isolate()->factory()->the_hole_value())));
}
ContextRef context_ref = MakeRef(this, context);
if (immutable) {
context_ref.get(context_slot_index);
}
return *zone()->New<GlobalAccessFeedback>(context_ref, context_slot_index,
immutable, nexus.kind());
ContextRef context = MakeRef(
this,
target_native_context().script_context_table().object()->get_context(
script_context_index));
ObjectRef contents = context.get(context_slot_index).value();
CHECK(!contents.equals(
MakeRef<Object>(this, isolate()->factory()->the_hole_value())));
return *zone()->New<GlobalAccessFeedback>(
context, context_slot_index,
FeedbackNexus::ImmutabilityBit::decode(number), nexus.kind());
}
CHECK(feedback_value->IsPropertyCell());
// The wanted name belongs (or did belong) to a property on the global
// object and the feedback is the cell holding its value.
PropertyCellRef cell =
MakeRef(this, Handle<PropertyCell>::cast(feedback_value));
MakeRef(this,
Handle<PropertyCell>::cast(feedback_value)->value(kAcquireLoad));
return *zone()->New<GlobalAccessFeedback>(cell, nexus.kind());
return *zone()->New<GlobalAccessFeedback>(
MakeRef(this, Handle<PropertyCell>::cast(feedback_value)), nexus.kind());
}
ProcessedFeedback const& JSHeapBroker::ReadFeedbackForBinaryOperation(
......@@ -746,7 +689,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForInstanceOf(
MaybeHandle<JSObject> maybe_constructor = nexus.GetConstructorFeedback();
Handle<JSObject> constructor;
if (maybe_constructor.ToHandle(&constructor)) {
optional_constructor = MakeRef(this, constructor);
optional_constructor = MakeRef(this, *constructor);
}
}
return *zone()->New<InstanceOfFeedback>(optional_constructor, nexus.kind());
......@@ -762,7 +705,8 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForArrayOrObjectLiteral(
return NewInsufficientFeedback(nexus.kind());
}
AllocationSiteRef site = MakeRef(this, AllocationSite::cast(object));
AllocationSiteRef site =
MakeRefAssumeMemoryFence(this, AllocationSite::cast(object));
if (!is_concurrent_inlining() && site.PointsToLiteral()) {
site.SerializeRecursive(NotConcurrentInliningTag{this});
}
......@@ -779,8 +723,8 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForRegExpLiteral(
return NewInsufficientFeedback(nexus.kind());
}
RegExpBoilerplateDescriptionRef boilerplate = MakeRef(
this, handle(RegExpBoilerplateDescription::cast(object), isolate()));
RegExpBoilerplateDescriptionRef boilerplate = MakeRefAssumeMemoryFence(
this, RegExpBoilerplateDescription::cast(object));
if (!is_concurrent_inlining()) {
boilerplate.Serialize(NotConcurrentInliningTag{this});
}
......@@ -797,7 +741,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForTemplateObject(
return NewInsufficientFeedback(nexus.kind());
}
JSArrayRef array = MakeRef(this, handle(JSArray::cast(object), isolate()));
JSArrayRef array = MakeRefAssumeMemoryFence(this, JSArray::cast(object));
return *zone()->New<TemplateObjectFeedback>(array, nexus.kind());
}
......@@ -811,11 +755,10 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall(
MaybeObject maybe_target = nexus.GetFeedback();
HeapObject target_object;
if (maybe_target->GetHeapObject(&target_object)) {
// TryMakeRef is used because the GC predicate may fail if the
// JSFunction was allocated too recently to be store-ordered.
target_ref = TryMakeRef(this, handle(target_object, isolate()));
target_ref = MakeRefAssumeMemoryFence(this, target_object);
}
}
float frequency = nexus.ComputeCallFrequency();
SpeculationMode mode = nexus.GetSpeculationMode();
CallFeedbackContent content = nexus.GetCallFeedbackContent();
......@@ -825,9 +768,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall(
BinaryOperationHint JSHeapBroker::GetFeedbackForBinaryOperation(
FeedbackSource const& source) {
ProcessedFeedback const& feedback =
is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForBinaryOperation(source);
ProcessedFeedback const& feedback = ProcessFeedbackForBinaryOperation(source);
return feedback.IsInsufficient() ? BinaryOperationHint::kNone
: feedback.AsBinaryOperation().value();
}
......@@ -835,67 +776,19 @@ BinaryOperationHint JSHeapBroker::GetFeedbackForBinaryOperation(
CompareOperationHint JSHeapBroker::GetFeedbackForCompareOperation(
FeedbackSource const& source) {
ProcessedFeedback const& feedback =
is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForCompareOperation(source);
ProcessFeedbackForCompareOperation(source);
return feedback.IsInsufficient() ? CompareOperationHint::kNone
: feedback.AsCompareOperation().value();
}
ForInHint JSHeapBroker::GetFeedbackForForIn(FeedbackSource const& source) {
ProcessedFeedback const& feedback = is_concurrent_inlining_
? GetFeedback(source)
: ProcessFeedbackForForIn(source);
ProcessedFeedback const& feedback = ProcessFeedbackForForIn(source);
return feedback.IsInsufficient() ? ForInHint::kNone
: feedback.AsForIn().value();
}
ProcessedFeedback const& JSHeapBroker::GetFeedbackForPropertyAccess(
FeedbackSource const& source, AccessMode mode,
base::Optional<NameRef> static_name) {
return is_concurrent_inlining_
? GetFeedback(source)
: ProcessFeedbackForPropertyAccess(source, mode, static_name);
}
ProcessedFeedback const& JSHeapBroker::GetFeedbackForInstanceOf(
FeedbackSource const& source) {
return is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForInstanceOf(source);
}
ProcessedFeedback const& JSHeapBroker::GetFeedbackForCall(
FeedbackSource const& source) {
return is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForCall(source);
}
ProcessedFeedback const& JSHeapBroker::GetFeedbackForGlobalAccess(
FeedbackSource const& source) {
return is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForGlobalAccess(source);
}
ProcessedFeedback const& JSHeapBroker::GetFeedbackForArrayOrObjectLiteral(
FeedbackSource const& source) {
return is_concurrent_inlining_
? GetFeedback(source)
: ProcessFeedbackForArrayOrObjectLiteral(source);
}
ProcessedFeedback const& JSHeapBroker::GetFeedbackForRegExpLiteral(
FeedbackSource const& source) {
return is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForRegExpLiteral(source);
}
ProcessedFeedback const& JSHeapBroker::GetFeedbackForTemplateObject(
FeedbackSource const& source) {
return is_concurrent_inlining_ ? GetFeedback(source)
: ProcessFeedbackForTemplateObject(source);
}
ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForArrayOrObjectLiteral(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback =
ReadFeedbackForArrayOrObjectLiteral(source);
......@@ -903,7 +796,7 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForArrayOrObjectLiteral(
return feedback;
}
ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForRegExpLiteral(
ProcessedFeedback const& JSHeapBroker::GetFeedbackForRegExpLiteral(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback = ReadFeedbackForRegExpLiteral(source);
......@@ -911,7 +804,7 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForRegExpLiteral(
return feedback;
}
ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForTemplateObject(
ProcessedFeedback const& JSHeapBroker::GetFeedbackForTemplateObject(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback = ReadFeedbackForTemplateObject(source);
......@@ -943,7 +836,7 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForForIn(
return feedback;
}
ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForPropertyAccess(
ProcessedFeedback const& JSHeapBroker::GetFeedbackForPropertyAccess(
FeedbackSource const& source, AccessMode mode,
base::Optional<NameRef> static_name) {
if (HasFeedback(source)) return GetFeedback(source);
......@@ -953,7 +846,7 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForPropertyAccess(
return feedback;
}
ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForInstanceOf(
ProcessedFeedback const& JSHeapBroker::GetFeedbackForInstanceOf(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback = ReadFeedbackForInstanceOf(source);
......@@ -961,7 +854,7 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForInstanceOf(
return feedback;
}
ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForCall(
ProcessedFeedback const& JSHeapBroker::GetFeedbackForCall(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback = ReadFeedbackForCall(source);
......@@ -969,7 +862,7 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForCall(
return feedback;
}
ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForGlobalAccess(
ProcessedFeedback const& JSHeapBroker::GetFeedbackForGlobalAccess(
FeedbackSource const& source) {
if (HasFeedback(source)) return GetFeedback(source);
ProcessedFeedback const& feedback = ReadFeedbackForGlobalAccess(source);
......@@ -978,23 +871,22 @@ ProcessedFeedback const& JSHeapBroker::ProcessFeedbackForGlobalAccess(
}
ElementAccessFeedback const& JSHeapBroker::ProcessFeedbackMapsForElementAccess(
MapHandles const& maps, KeyedAccessMode const& keyed_mode,
ZoneVector<MapRef>& maps, KeyedAccessMode const& keyed_mode,
FeedbackSlotKind slot_kind) {
DCHECK(!maps.empty());
// Collect possible transition targets.
MapHandles possible_transition_targets;
possible_transition_targets.reserve(maps.size());
for (Handle<Map> map : maps) {
MapRef map_ref = MakeRef(this, map);
for (MapRef& map : maps) {
if (!is_concurrent_inlining()) {
map_ref.SerializeRootMap(NotConcurrentInliningTag{this});
map.SerializeRootMap(NotConcurrentInliningTag{this});
}
if (CanInlineElementAccess(map_ref) &&
IsFastElementsKind(map->elements_kind()) &&
GetInitialFastElementsKind() != map->elements_kind()) {
possible_transition_targets.push_back(map);
if (CanInlineElementAccess(map) &&
IsFastElementsKind(map.elements_kind()) &&
GetInitialFastElementsKind() != map.elements_kind()) {
possible_transition_targets.push_back(map.object());
}
}
......@@ -1007,22 +899,11 @@ ElementAccessFeedback const& JSHeapBroker::ProcessFeedbackMapsForElementAccess(
ZoneMap<Handle<Map>, TransitionGroup, HandleLess> transition_groups(zone());
// Separate the actual receiver maps and the possible transition sources.
for (Handle<Map> map : maps) {
for (const MapRef& map : maps) {
// Don't generate elements kind transitions from stable maps.
Map transition_target = map->is_stable()
? Map()
: map->FindElementsKindTransitionedMap(
isolate(), possible_transition_targets);
if (transition_target.is_null()) {
TransitionGroup group(1, map, zone());
transition_groups.insert({map, group});
} else {
Handle<Map> target(transition_target, isolate());
TransitionGroup new_group(1, target, zone());
TransitionGroup& actual_group =
transition_groups.insert({target, new_group}).first->second;
actual_group.push_back(map);
}
// TODO(jgruber): Bring back elements kind transition generation.
TransitionGroup group(1, map.object(), zone());
transition_groups.insert({map.object(), group});
}
ElementAccessFeedback* result =
......@@ -1058,31 +939,22 @@ base::Optional<NameRef> JSHeapBroker::GetNameFeedback(
FeedbackNexus const& nexus) {
Name raw_name = nexus.GetName();
if (raw_name.is_null()) return base::nullopt;
return MakeRef(this, handle(raw_name, isolate()));
return MakeRef(this, raw_name);
}
PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
MapRef map, NameRef name, AccessMode access_mode,
CompilationDependencies* dependencies, SerializationPolicy policy) {
CompilationDependencies* dependencies) {
DCHECK_NOT_NULL(dependencies);
PropertyAccessTarget target({map, name, access_mode});
auto it = property_access_infos_.find(target);
if (it != property_access_infos_.end()) return it->second;
if (policy == SerializationPolicy::kAssumeSerialized &&
!FLAG_turbo_concurrent_get_property_access_info) {
TRACE_BROKER_MISSING(this, "PropertyAccessInfo for "
<< access_mode << " of property " << name
<< " on map " << map);
return PropertyAccessInfo::Invalid(zone());
}
CHECK_NOT_NULL(dependencies);
AccessInfoFactory factory(this, dependencies, zone());
PropertyAccessInfo access_info =
factory.ComputePropertyAccessInfo(map, name, access_mode);
if (is_concurrent_inlining_) {
CHECK_IMPLIES(!FLAG_turbo_concurrent_get_property_access_info,
mode() == kSerializing);
TRACE(this, "Storing PropertyAccessInfo for "
<< access_mode << " of property " << name << " on map "
<< map);
......@@ -1093,17 +965,10 @@ PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
MinimorphicLoadPropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
MinimorphicLoadPropertyAccessFeedback const& feedback,
FeedbackSource const& source, SerializationPolicy policy) {
FeedbackSource const& source) {
auto it = minimorphic_property_access_infos_.find(source);
if (it != minimorphic_property_access_infos_.end()) return it->second;
if (policy == SerializationPolicy::kAssumeSerialized) {
TRACE_BROKER_MISSING(this, "MinimorphicLoadPropertyAccessInfo for slot "
<< source.index() << " "
<< MakeRef<Object>(this, source.vector));
return MinimorphicLoadPropertyAccessInfo::Invalid();
}
AccessInfoFactory factory(this, nullptr, zone());
MinimorphicLoadPropertyAccessInfo access_info =
factory.ComputePropertyAccessInfo(feedback);
......
......@@ -15,7 +15,6 @@
#include "src/compiler/heap-refs.h"
#include "src/compiler/processed-feedback.h"
#include "src/compiler/refs-map.h"
#include "src/compiler/serializer-hints.h"
#include "src/execution/local-isolate.h"
#include "src/handles/handles.h"
#include "src/handles/persistent-handles.h"
......@@ -132,10 +131,9 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
bool is_turboprop() const { return code_kind_ == CodeKind::TURBOPROP; }
NexusConfig feedback_nexus_config() const {
// TODO(mvstanton): when the broker gathers feedback on the background
// thread, this should return a local NexusConfig object which points
// to the associated LocalHeap.
return NexusConfig::FromMainThread(isolate());
return IsMainThread() ? NexusConfig::FromMainThread(isolate())
: NexusConfig::FromBackgroundThread(
isolate(), local_isolate()->heap());
}
enum BrokerMode { kDisabled, kSerializing, kSerialized, kRetired };
......@@ -183,12 +181,11 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
bool HasFeedback(FeedbackSource const& source) const;
void SetFeedback(FeedbackSource const& source,
ProcessedFeedback const* feedback);
ProcessedFeedback const& GetFeedback(FeedbackSource const& source) const;
FeedbackSlotKind GetFeedbackSlotKind(FeedbackSource const& source) const;
// TODO(neis): Move these into serializer when we're always in the background.
ElementAccessFeedback const& ProcessFeedbackMapsForElementAccess(
MapHandles const& maps, KeyedAccessMode const& keyed_mode,
ZoneVector<MapRef>& maps, KeyedAccessMode const& keyed_mode,
FeedbackSlotKind slot_kind);
// Binary, comparison and for-in hints can be fully expressed via
......@@ -216,71 +213,25 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
ProcessedFeedback const& ProcessFeedbackForBinaryOperation(
FeedbackSource const& source);
ProcessedFeedback const& ProcessFeedbackForCall(FeedbackSource const& source);
ProcessedFeedback const& ProcessFeedbackForCompareOperation(
FeedbackSource const& source);
ProcessedFeedback const& ProcessFeedbackForForIn(
FeedbackSource const& source);
ProcessedFeedback const& ProcessFeedbackForGlobalAccess(
FeedbackSource const& source);
ProcessedFeedback const& ProcessFeedbackForInstanceOf(
FeedbackSource const& source);
ProcessedFeedback const& ProcessFeedbackForPropertyAccess(
FeedbackSource const& source, AccessMode mode,
base::Optional<NameRef> static_name);
ProcessedFeedback const& ProcessFeedbackForArrayOrObjectLiteral(
FeedbackSource const& source);
ProcessedFeedback const& ProcessFeedbackForRegExpLiteral(
FeedbackSource const& source);
ProcessedFeedback const& ProcessFeedbackForTemplateObject(
FeedbackSource const& source);
bool FeedbackIsInsufficient(FeedbackSource const& source) const;
base::Optional<NameRef> GetNameFeedback(FeedbackNexus const& nexus);
// If {policy} is {kAssumeSerialized} and the broker doesn't know about the
// combination of {map}, {name}, and {access_mode}, returns Invalid.
PropertyAccessInfo GetPropertyAccessInfo(
MapRef map, NameRef name, AccessMode access_mode,
CompilationDependencies* dependencies = nullptr,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
CompilationDependencies* dependencies);
MinimorphicLoadPropertyAccessInfo GetPropertyAccessInfo(
MinimorphicLoadPropertyAccessFeedback const& feedback,
FeedbackSource const& source,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
// Used to separate the problem of a concurrent GetPropertyAccessInfo (GPAI)
// from serialization. GPAI is currently called both during the serialization
// phase, and on the background thread. While some crucial objects (like
// JSObject) still must be serialized, we do the following:
// - Run GPAI during serialization to discover and serialize required objects.
// - After the serialization phase, clear cached property access infos.
// - On the background thread, rerun GPAI in a concurrent setting. The cache
// has been cleared, thus the actual logic runs again.
// Once all required object kinds no longer require serialization, this
// should be removed together with all GPAI calls during serialization.
void ClearCachedPropertyAccessInfos() {
CHECK(FLAG_turbo_concurrent_get_property_access_info);
property_access_infos_.clear();
}
// As above, clear cached ObjectData that can be reconstructed, i.e. is
// either never-serialized or background-serialized.
void ClearReconstructibleData();
FeedbackSource const& source);
StringRef GetTypedArrayStringTag(ElementsKind kind);
bool ShouldBeSerializedForCompilation(const SharedFunctionInfoRef& shared,
const FeedbackVectorRef& feedback,
const HintsVector& arguments) const;
void SetSerializedForCompilation(const SharedFunctionInfoRef& shared,
const FeedbackVectorRef& feedback,
const HintsVector& arguments);
bool IsSerializedForCompilation(const SharedFunctionInfoRef& shared,
const FeedbackVectorRef& feedback) const;
bool IsMainThread() const {
return local_isolate() == nullptr || local_isolate()->is_main_thread();
}
......@@ -420,6 +371,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
friend class ObjectData;
friend class PropertyCellData;
ProcessedFeedback const& GetFeedback(FeedbackSource const& source) const;
bool CanUseFeedback(const FeedbackNexus& nexus) const;
const ProcessedFeedback& NewInsufficientFeedback(FeedbackSlotKind kind) const;
......@@ -507,22 +459,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
ZoneVector<ObjectData*> typed_array_string_tags_;
struct SerializedFunction {
SharedFunctionInfoRef shared;
FeedbackVectorRef feedback;
bool operator<(const SerializedFunction& other) const {
if (shared.object().address() < other.shared.object().address()) {
return true;
}
if (shared.object().address() == other.shared.object().address()) {
return feedback.object().address() < other.feedback.object().address();
}
return false;
}
};
ZoneMultimap<SerializedFunction, HintsVector> serialized_functions_;
CompilationDependencies* dependencies_ = nullptr;
// The MapUpdater mutex is used in recursive patterns; for example,
......
......@@ -70,7 +70,7 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
case IrOpcode::kJSCreateEmptyLiteralArray: {
FeedbackParameter const& p = FeedbackParameterOf(node->op());
if (p.feedback().IsValid()) {
broker()->ProcessFeedbackForArrayOrObjectLiteral(p.feedback());
broker()->GetFeedbackForArrayOrObjectLiteral(p.feedback());
}
break;
}
......@@ -82,7 +82,7 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
FeedbackParameter const& p = FeedbackParameterOf(node->op());
if (p.feedback().IsValid()) {
// Unary ops are treated as binary ops with respect to feedback.
broker()->ProcessFeedbackForBinaryOperation(p.feedback());
broker()->GetFeedbackForBinaryOperation(p.feedback());
}
break;
}
......@@ -101,7 +101,7 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
case IrOpcode::kJSShiftRightLogical: {
FeedbackParameter const& p = FeedbackParameterOf(node->op());
if (p.feedback().IsValid()) {
broker()->ProcessFeedbackForBinaryOperation(p.feedback());
broker()->GetFeedbackForBinaryOperation(p.feedback());
}
break;
}
......@@ -114,7 +114,7 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
case IrOpcode::kJSStrictEqual: {
FeedbackParameter const& p = FeedbackParameterOf(node->op());
if (p.feedback().IsValid()) {
broker()->ProcessFeedbackForCompareOperation(p.feedback());
broker()->GetFeedbackForCompareOperation(p.feedback());
}
break;
}
......@@ -128,14 +128,14 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
case IrOpcode::kJSCreateLiteralObject: {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
if (p.feedback().IsValid()) {
broker()->ProcessFeedbackForArrayOrObjectLiteral(p.feedback());
broker()->GetFeedbackForArrayOrObjectLiteral(p.feedback());
}
break;
}
case IrOpcode::kJSCreateLiteralRegExp: {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
if (p.feedback().IsValid()) {
broker()->ProcessFeedbackForRegExpLiteral(p.feedback());
broker()->GetFeedbackForRegExpLiteral(p.feedback());
}
break;
}
......@@ -144,7 +144,7 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
GetTemplateObjectParametersOf(node->op());
MakeRef(broker(), p.shared());
MakeRef(broker(), p.description());
broker()->ProcessFeedbackForTemplateObject(p.feedback());
broker()->GetFeedbackForTemplateObject(p.feedback());
break;
}
case IrOpcode::kJSCreateWithContext: {
......@@ -155,8 +155,8 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
NamedAccess const& p = NamedAccessOf(node->op());
NameRef name = MakeRef(broker(), p.name());
if (p.feedback().IsValid()) {
broker()->ProcessFeedbackForPropertyAccess(p.feedback(),
AccessMode::kLoad, name);
broker()->GetFeedbackForPropertyAccess(p.feedback(), AccessMode::kLoad,
name);
}
break;
}
......@@ -164,8 +164,8 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
NamedAccess const& p = NamedAccessOf(node->op());
NameRef name = MakeRef(broker(), p.name());
if (p.feedback().IsValid()) {
broker()->ProcessFeedbackForPropertyAccess(p.feedback(),
AccessMode::kLoad, name);
broker()->GetFeedbackForPropertyAccess(p.feedback(), AccessMode::kLoad,
name);
}
break;
}
......@@ -212,8 +212,8 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
PropertyAccess const& p = PropertyAccessOf(node->op());
AccessMode access_mode = AccessMode::kLoad;
if (p.feedback().IsValid()) {
broker()->ProcessFeedbackForPropertyAccess(p.feedback(), access_mode,
base::nullopt);
broker()->GetFeedbackForPropertyAccess(p.feedback(), access_mode,
base::nullopt);
}
break;
}
......
......@@ -37,13 +37,6 @@ bool CanConsiderForInlining(JSHeapBroker* broker,
}
DCHECK(shared.HasBytecodeArray());
if (!broker->IsSerializedForCompilation(shared, feedback_vector)) {
TRACE_BROKER_MISSING(
broker, "data for " << shared << " (not serialized for compilation)");
TRACE("Cannot consider " << shared << " for inlining with "
<< feedback_vector << " (missing data)");
return false;
}
TRACE("Considering " << shared << " for inlining with " << feedback_vector);
return true;
}
......
......@@ -530,12 +530,6 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// Determine the target's feedback vector and its context.
Node* context;
FeedbackCellRef feedback_cell = DetermineCallContext(node, &context);
if (!broker()->IsSerializedForCompilation(*shared_info,
*feedback_cell.value())) {
TRACE("Not inlining " << *shared_info << " into " << outer_shared_info
<< " because it wasn't serialized for compilation.");
return NoChange();
}
TRACE("Inlining " << *shared_info << " into " << outer_shared_info
<< ((exception_target != nullptr) ? " (inside try-block)"
......
......@@ -410,10 +410,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
MapRef receiver_map = receiver_ref.map();
NameRef name = MakeRef(broker(), isolate()->factory()->has_instance_symbol());
PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
receiver_map, name, AccessMode::kLoad, dependencies(),
broker()->is_concurrent_inlining()
? SerializationPolicy::kAssumeSerialized
: SerializationPolicy::kSerializeIfNeeded);
receiver_map, name, AccessMode::kLoad, dependencies());
// TODO(v8:11457) Support dictionary mode holders here.
if (access_info.IsInvalid() || access_info.HasDictionaryHolder()) {
......@@ -723,10 +720,7 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
access_infos.push_back(broker()->GetPropertyAccessInfo(
MakeRef(broker(), map),
MakeRef(broker(), isolate()->factory()->then_string()),
AccessMode::kLoad, dependencies(),
broker()->is_concurrent_inlining()
? SerializationPolicy::kAssumeSerialized
: SerializationPolicy::kSerializeIfNeeded));
AccessMode::kLoad, dependencies()));
}
PropertyAccessInfo access_info =
access_info_factory.FinalizePropertyAccessInfosAsOne(access_infos,
......@@ -1077,11 +1071,7 @@ Reduction JSNativeContextSpecialization::ReduceMinimorphicPropertyAccess(
}
MinimorphicLoadPropertyAccessInfo access_info =
broker()->GetPropertyAccessInfo(
feedback, source,
broker()->is_concurrent_inlining()
? SerializationPolicy::kAssumeSerialized
: SerializationPolicy::kSerializeIfNeeded);
broker()->GetPropertyAccessInfo(feedback, source);
if (access_info.IsInvalid()) return NoChange();
PropertyAccessBuilder access_builder(jsgraph(), broker(), nullptr);
......@@ -1091,8 +1081,8 @@ Reduction JSNativeContextSpecialization::ReduceMinimorphicPropertyAccess(
}
ZoneHandleSet<Map> maps;
for (Handle<Map> map : feedback.maps()) {
maps.insert(map, graph()->zone());
for (const MapRef& map : feedback.maps()) {
maps.insert(map.object(), graph()->zone());
}
effect = graph()->NewNode(
......@@ -1148,7 +1138,9 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Either infer maps from the graph or use the feedback.
ZoneVector<Handle<Map>> lookup_start_object_maps(zone());
if (!InferMaps(lookup_start_object, effect, &lookup_start_object_maps)) {
lookup_start_object_maps = feedback.maps();
for (const MapRef& map : feedback.maps()) {
lookup_start_object_maps.push_back(map.object());
}
}
RemoveImpossibleMaps(lookup_start_object, &lookup_start_object_maps);
......@@ -1180,10 +1172,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
MapRef map = MakeRef(broker(), map_handle);
if (map.is_deprecated()) continue;
PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
map, feedback.name(), access_mode, dependencies(),
broker()->is_concurrent_inlining()
? SerializationPolicy::kAssumeSerialized
: SerializationPolicy::kSerializeIfNeeded);
map, feedback.name(), access_mode, dependencies());
access_infos_for_feedback.push_back(access_info);
}
......@@ -1636,6 +1625,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccessOnString(
}
namespace {
base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker,
Node* receiver) {
HeapObjectMatcher m(receiver);
......@@ -1646,6 +1636,7 @@ base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker,
if (typed_array.is_on_heap()) return base::nullopt;
return typed_array;
}
} // namespace
void JSNativeContextSpecialization::RemoveImpossibleMaps(
......@@ -1664,6 +1655,20 @@ void JSNativeContextSpecialization::RemoveImpossibleMaps(
}
}
void JSNativeContextSpecialization::RemoveImpossibleMaps(
Node* object, ZoneVector<MapRef>* maps) const {
base::Optional<MapRef> root_map = InferRootMap(object);
if (root_map.has_value() && !root_map->is_abandoned_prototype_map()) {
maps->erase(std::remove_if(maps->begin(), maps->end(),
[root_map](const MapRef& map) {
return map.is_abandoned_prototype_map() ||
(map.FindRootMap().has_value() &&
!map.FindRootMap()->equals(*root_map));
}),
maps->end());
}
}
// Possibly refine the feedback using inferred map information from the graph.
ElementAccessFeedback const&
JSNativeContextSpecialization::TryRefineElementAccessFeedback(
......
......@@ -213,6 +213,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
bool CanTreatHoleAsUndefined(ZoneVector<MapRef> const& receiver_maps);
void RemoveImpossibleMaps(Node* object, ZoneVector<Handle<Map>>* maps) const;
void RemoveImpossibleMaps(Node* object, ZoneVector<MapRef>* maps) const;
ElementAccessFeedback const& TryRefineElementAccessFeedback(
ElementAccessFeedback const& feedback, Node* receiver,
......
......@@ -69,7 +69,6 @@
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
#include "src/compiler/select-lowering.h"
#include "src/compiler/serializer-for-background-compilation.h"
#include "src/compiler/simplified-lowering.h"
#include "src/compiler/simplified-operator-reducer.h"
#include "src/compiler/simplified-operator.h"
......@@ -675,8 +674,8 @@ class PipelineImpl final {
template <typename Phase, typename... Args>
void Run(Args&&... args);
// Step A.1. Serialize the data needed for the compilation front-end.
void Serialize();
// Step A.1. Initialize the heap broker.
void InitializeHeapBroker();
// Step A.2. Run the graph creation and initial optimization passes.
bool CreateGraph();
......@@ -1204,10 +1203,11 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (compilation_info()->is_osr()) data_.InitializeOsrHelper();
// Serialize() and CreateGraph() may already use IsPendingAllocation.
// InitializeHeapBroker() and CreateGraph() may already use
// IsPendingAllocation.
isolate->heap()->PublishPendingAllocations();
pipeline_.Serialize();
pipeline_.InitializeHeapBroker();
if (!data_.broker()->is_concurrent_inlining()) {
if (!pipeline_.CreateGraph()) {
......@@ -1539,42 +1539,6 @@ struct CopyMetadataForConcurrentCompilePhase {
}
};
struct SerializationPhase {
DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(Serialization)
void Run(PipelineData* data, Zone* temp_zone) {
SerializerForBackgroundCompilationFlags flags;
if (data->info()->bailout_on_uninitialized()) {
flags |= SerializerForBackgroundCompilationFlag::kBailoutOnUninitialized;
}
if (data->info()->source_positions()) {
flags |= SerializerForBackgroundCompilationFlag::kCollectSourcePositions;
}
if (data->info()->analyze_environment_liveness()) {
flags |=
SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness;
}
if (data->info()->inlining()) {
flags |= SerializerForBackgroundCompilationFlag::kEnableTurboInlining;
}
RunSerializerForBackgroundCompilation(
data->zone_stats(), data->broker(), data->dependencies(),
data->info()->closure(), flags, data->info()->osr_offset());
if (data->specialization_context().IsJust()) {
MakeRef(data->broker(),
data->specialization_context().FromJust().context);
}
if (FLAG_stress_concurrent_inlining) {
if (FLAG_turbo_concurrent_get_property_access_info) {
data->broker()->ClearCachedPropertyAccessInfos();
data->dependencies()->ClearForConcurrentGetPropertyAccessInfo();
}
// Force re-serialization from the background thread.
data->broker()->ClearReconstructibleData();
}
}
};
struct TypedLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTS(TypedLowering)
......@@ -2657,8 +2621,8 @@ void PipelineImpl::RunPrintAndVerify(const char* phase, bool untyped) {
}
}
void PipelineImpl::Serialize() {
PipelineData* data = this->data_;
void PipelineImpl::InitializeHeapBroker() {
PipelineData* data = data_;
data->BeginPhaseKind("V8.TFBrokerInitAndSerialization");
......@@ -2682,7 +2646,6 @@ void PipelineImpl::Serialize() {
data->broker()->SetTargetNativeContextRef(data->native_context());
if (data->broker()->is_concurrent_inlining()) {
Run<HeapBrokerInitializationPhase>();
Run<SerializationPhase>();
data->broker()->StopSerializing();
}
data->EndPhaseKind();
......@@ -3353,7 +3316,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
CompilationHandleScope compilation_scope(isolate, info);
CanonicalHandleScope canonical(isolate, info);
info->ReopenHandlesInNewHandleScope(isolate);
pipeline.Serialize();
pipeline.InitializeHeapBroker();
// Emulating the proper pipeline, we call CreateGraph on different places
// (i.e before or after creating a LocalIsolateScope) depending on
// is_concurrent_inlining.
......
......@@ -162,15 +162,15 @@ class ElementAccessFeedback : public ProcessedFeedback {
class NamedAccessFeedback : public ProcessedFeedback {
public:
NamedAccessFeedback(NameRef const& name, ZoneVector<Handle<Map>> const& maps,
NamedAccessFeedback(NameRef const& name, ZoneVector<MapRef> const& maps,
FeedbackSlotKind slot_kind);
NameRef const& name() const { return name_; }
ZoneVector<Handle<Map>> const& maps() const { return maps_; }
ZoneVector<MapRef> const& maps() const { return maps_; }
private:
NameRef const name_;
ZoneVector<Handle<Map>> const maps_;
ZoneVector<MapRef> const maps_;
};
class MinimorphicLoadPropertyAccessFeedback : public ProcessedFeedback {
......@@ -178,19 +178,19 @@ class MinimorphicLoadPropertyAccessFeedback : public ProcessedFeedback {
MinimorphicLoadPropertyAccessFeedback(NameRef const& name,
FeedbackSlotKind slot_kind,
Handle<Object> handler,
ZoneVector<Handle<Map>> const& maps,
ZoneVector<MapRef> const& maps,
bool has_migration_target_maps);
NameRef const& name() const { return name_; }
bool is_monomorphic() const { return maps_.size() == 1; }
Handle<Object> handler() const { return handler_; }
ZoneVector<Handle<Map>> const& maps() const { return maps_; }
ZoneVector<MapRef> const& maps() const { return maps_; }
bool has_migration_target_maps() const { return has_migration_target_maps_; }
private:
NameRef const name_;
Handle<Object> const handler_;
ZoneVector<Handle<Map>> const maps_;
ZoneVector<MapRef> const maps_;
bool const has_migration_target_maps_;
};
......
This source diff could not be displayed because it is too large. You can view the blob instead.
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_
#define V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_
#include "src/handles/handles.h"
namespace v8 {
namespace internal {
class BytecodeOffset;
class Zone;
namespace compiler {
class CompilationDependencies;
class JSHeapBroker;
class ZoneStats;
enum class SerializerForBackgroundCompilationFlag : uint8_t {
kBailoutOnUninitialized = 1 << 0,
kCollectSourcePositions = 1 << 1,
kAnalyzeEnvironmentLiveness = 1 << 2,
kEnableTurboInlining = 1 << 3,
};
using SerializerForBackgroundCompilationFlags =
base::Flags<SerializerForBackgroundCompilationFlag>;
void RunSerializerForBackgroundCompilation(
ZoneStats* zone_stats, JSHeapBroker* broker,
CompilationDependencies* dependencies, Handle<JSFunction> closure,
SerializerForBackgroundCompilationFlags flags, BytecodeOffset osr_offset);
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file defines the hints classed gathered temporarily by the
// SerializerForBackgroundCompilation while it's analysing the bytecode
// and copying the necessary data to the JSHeapBroker for further usage
// by the reducers that run on the background thread.
#ifndef V8_COMPILER_SERIALIZER_HINTS_H_
#define V8_COMPILER_SERIALIZER_HINTS_H_
#include "src/compiler/functional-list.h"
#include "src/handles/handles.h"
#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
class Context;
class Object;
class Map;
namespace compiler {
template <typename T, typename EqualTo>
class FunctionalSet {
public:
void Add(T const& elem, Zone* zone) {
for (auto const& l : data_) {
if (equal_to(l, elem)) return;
}
data_.PushFront(elem, zone);
}
void Union(FunctionalSet<T, EqualTo> other, Zone* zone) {
if (!data_.TriviallyEquals(other.data_)) {
// Choose the larger side as tail.
if (data_.Size() < other.data_.Size()) std::swap(data_, other.data_);
for (auto const& elem : other.data_) Add(elem, zone);
}
}
bool IsEmpty() const { return data_.begin() == data_.end(); }
// Warning: quadratic time complexity.
bool Includes(FunctionalSet<T, EqualTo> const& other) const {
return std::all_of(other.begin(), other.end(), [&](T const& other_elem) {
return std::any_of(this->begin(), this->end(), [&](T const& this_elem) {
return equal_to(this_elem, other_elem);
});
});
}
bool operator==(const FunctionalSet<T, EqualTo>& other) const {
return this->data_.TriviallyEquals(other.data_) ||
(this->data_.Size() == other.data_.Size() && this->Includes(other) &&
other.Includes(*this));
}
bool operator!=(const FunctionalSet<T, EqualTo>& other) const {
return !(*this == other);
}
size_t Size() const { return data_.Size(); }
using iterator = typename FunctionalList<T>::iterator;
iterator begin() const { return data_.begin(); }
iterator end() const { return data_.end(); }
private:
static EqualTo equal_to;
FunctionalList<T> data_;
};
template <typename T, typename EqualTo>
EqualTo FunctionalSet<T, EqualTo>::equal_to;
struct VirtualContext {
unsigned int distance;
Handle<Context> context;
VirtualContext(unsigned int distance_in, Handle<Context> context_in)
: distance(distance_in), context(context_in) {
CHECK_GT(distance, 0);
}
bool operator==(const VirtualContext& other) const {
return context.equals(other.context) && distance == other.distance;
}
};
class VirtualClosure;
struct VirtualBoundFunction;
using ConstantsSet = FunctionalSet<Handle<Object>, Handle<Object>::equal_to>;
using VirtualContextsSet =
FunctionalSet<VirtualContext, std::equal_to<VirtualContext>>;
using MapsSet = FunctionalSet<Handle<Map>, Handle<Map>::equal_to>;
using VirtualClosuresSet =
FunctionalSet<VirtualClosure, std::equal_to<VirtualClosure>>;
using VirtualBoundFunctionsSet =
FunctionalSet<VirtualBoundFunction, std::equal_to<VirtualBoundFunction>>;
struct HintsImpl;
class JSHeapBroker;
class Hints {
public:
Hints() = default; // Empty.
static Hints SingleConstant(Handle<Object> constant, Zone* zone);
static Hints SingleMap(Handle<Map> map, Zone* zone);
// For inspection only.
ConstantsSet constants() const;
MapsSet maps() const;
VirtualClosuresSet virtual_closures() const;
VirtualContextsSet virtual_contexts() const;
VirtualBoundFunctionsSet virtual_bound_functions() const;
bool IsEmpty() const;
bool operator==(Hints const& other) const;
bool operator!=(Hints const& other) const;
#ifdef ENABLE_SLOW_DCHECKS
bool Includes(Hints const& other) const;
#endif
Hints Copy(Zone* zone) const; // Shallow.
Hints CopyToParentZone(Zone* zone, JSHeapBroker* broker) const; // Deep.
// As an optimization, empty hints can be represented as {impl_} being
// {nullptr}, i.e., as not having allocated a {HintsImpl} object. As a
// consequence, some operations need to force allocation prior to doing their
// job. In particular, backpropagation from a child serialization
// can only work if the hints were already allocated in the parent zone.
bool IsAllocated() const { return impl_ != nullptr; }
void EnsureShareable(Zone* zone) { EnsureAllocated(zone, false); }
// Make {this} an alias of {other}.
void Reset(Hints* other, Zone* zone);
void Merge(Hints const& other, Zone* zone, JSHeapBroker* broker);
// Destructive updates: if the hints are shared by several registers,
// then the following updates will be seen by all of them:
void AddConstant(Handle<Object> constant, Zone* zone, JSHeapBroker* broker);
void AddMap(Handle<Map> map, Zone* zone, JSHeapBroker* broker,
bool check_zone_equality = true);
void AddVirtualClosure(VirtualClosure const& virtual_closure, Zone* zone,
JSHeapBroker* broker);
void AddVirtualContext(VirtualContext const& virtual_context, Zone* zone,
JSHeapBroker* broker);
void AddVirtualBoundFunction(VirtualBoundFunction const& bound_function,
Zone* zone, JSHeapBroker* broker);
void Add(Hints const& other, Zone* zone, JSHeapBroker* broker);
private:
friend std::ostream& operator<<(std::ostream&, const Hints& hints);
HintsImpl* impl_ = nullptr;
void EnsureAllocated(Zone* zone, bool check_zone_equality = true);
// Helper for Add and Merge.
bool Union(Hints const& other);
static const size_t kMaxHintsSize = 50;
static_assert(kMaxHintsSize >= 1, "must allow for at least one hint");
};
using HintsVector = ZoneVector<Hints>;
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_SERIALIZER_HINTS_H_
......@@ -706,9 +706,6 @@ DEFINE_IMPLICATION(stress_concurrent_inlining, concurrent_inlining)
DEFINE_NEG_IMPLICATION(stress_concurrent_inlining, lazy_feedback_allocation)
DEFINE_WEAK_VALUE_IMPLICATION(stress_concurrent_inlining, interrupt_budget,
15 * KB)
DEFINE_BOOL(
turbo_concurrent_get_property_access_info, true,
"concurrently call GetPropertyAccessInfo (only with --concurrent-inlining)")
DEFINE_INT(max_serializer_nesting, 25,
"maximum levels for nesting child serializers")
DEFINE_WEAK_IMPLICATION(future, concurrent_inlining)
......
......@@ -216,8 +216,12 @@ Handle<Smi> StoreHandler::StoreSlow(Isolate* isolate,
}
Handle<Smi> StoreHandler::StoreProxy(Isolate* isolate) {
return handle(StoreProxy(), isolate);
}
Smi StoreHandler::StoreProxy() {
int config = KindBits::encode(kProxy);
return handle(Smi::FromInt(config), isolate);
return Smi::FromInt(config);
}
Handle<Smi> StoreHandler::StoreField(Isolate* isolate, Kind kind,
......
......@@ -354,6 +354,7 @@ class StoreHandler final : public DataHandler {
// Creates a Smi-handler for storing a property on a proxy.
static inline Handle<Smi> StoreProxy(Isolate* isolate);
static inline Smi StoreProxy();
// Decodes the KeyedAccessStoreMode from a {handler}.
static KeyedAccessStoreMode GetKeyedAccessStoreMode(MaybeObject handler);
......
......@@ -1244,14 +1244,14 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
continue;
} else {
Code code = FromCodeT(CodeT::cast(data_handler->smi_handler()));
handler = handle(code, vector().GetIsolate());
handler = config()->NewHandle(code);
}
} else if (maybe_code_handler.object()->IsSmi()) {
// Skip for Proxy Handlers.
if (*(maybe_code_handler.object()) ==
*StoreHandler::StoreProxy(GetIsolate()))
if (*maybe_code_handler.object() == StoreHandler::StoreProxy()) {
continue;
}
// Decode the KeyedAccessStoreMode information from the Handler.
mode = StoreHandler::GetKeyedAccessStoreMode(*maybe_code_handler);
if (mode != STANDARD_STORE) return mode;
......@@ -1260,7 +1260,7 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
// Element store without prototype chain check.
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
Code code = FromCodeT(CodeT::cast(*maybe_code_handler.object()));
handler = handle(code, vector().GetIsolate());
handler = config()->NewHandle(code);
} else {
handler = Handle<Code>::cast(maybe_code_handler.object());
}
......
......@@ -89,8 +89,6 @@ v8_source_set("cctest_sources") {
"compiler/function-tester.cc",
"compiler/function-tester.h",
"compiler/node-observer-tester.h",
"compiler/serializer-tester.cc",
"compiler/serializer-tester.h",
"compiler/test-basic-block-profiler.cc",
"compiler/test-branch-combine.cc",
"compiler/test-calls-with-arraylike-or-spread.cc",
......
......@@ -136,6 +136,10 @@
'test-strings/Traverse': [PASS, HEAVY],
'test-swiss-name-dictionary-csa/DeleteAtBoundaries': [PASS, HEAVY],
'test-swiss-name-dictionary-csa/SameH2': [PASS, HEAVY],
# TODO(v8:12030): Fix lazy source positions in combination with concurrent
# inlining.
'test-cpu-profiler/DetailedSourcePositionAPI_Inlining': [SKIP],
}], # ALWAYS
##############################################################################
......@@ -611,7 +615,6 @@
# Tests that generate code at runtime.
'codegen-tester/*': [SKIP],
'serializer-tester/*': [SKIP],
'test-accessor-assembler/*': [SKIP],
'test-assembler-*': [SKIP],
'test-basic-block-profiler/*': [SKIP],
......@@ -686,17 +689,6 @@
##############################################################################
['variant == jitless', {
# https://crbug.com/v8/7777
'serializer-tester/SerializeCallAnyReceiver': [SKIP],
'serializer-tester/SerializeCallArguments': [SKIP],
'serializer-tester/SerializeCallProperty': [SKIP],
'serializer-tester/SerializeCallProperty2': [SKIP],
'serializer-tester/SerializeCallUndefinedReceiver': [SKIP],
'serializer-tester/SerializeCallUndefinedReceiver2': [SKIP],
'serializer-tester/SerializeCallWithSpread': [SKIP],
'serializer-tester/SerializeConstruct': [SKIP],
'serializer-tester/SerializeConstructWithSpread': [SKIP],
'serializer-tester/SerializeInlinedClosure': [SKIP],
'serializer-tester/SerializeInlinedFunction': [SKIP],
'test-cpu-profiler/TickLinesOptimized': [SKIP],
'test-heap/TestOptimizeAfterBytecodeFlushingCandidate': [SKIP],
'test-js-to-wasm/*': [SKIP],
......@@ -742,8 +734,6 @@
# Turboprop doesn't use call feedback and hence doesn't inline even if
# the inlining flag is explicitly set.
'test-cpu-profiler/DetailedSourcePositionAPI_Inlining': [SKIP],
'serializer-tester/BoundFunctionArguments': [SKIP],
'serializer-tester/BoundFunctionTarget': [SKIP],
'test-calls-with-arraylike-or-spread/*': [SKIP],
'test-js-to-wasm/*': [SKIP],
}], # variant == turboprop or variant == turboprop_as_toptier
......@@ -819,28 +809,7 @@
'test-invalidated-slots/InvalidatedSlotsNoInvalidatedRanges': [SKIP],
'test-invalidated-slots/InvalidatedSlotsResetObjectRegression': [SKIP],
'test-invalidated-slots/InvalidatedSlotsSomeInvalidatedRanges': [SKIP],
# Requires --concurrent_inlining / --finalize_streaming_on_background:
'serializer-tester/ArrowFunctionInlined': [SKIP],
'serializer-tester/BoundFunctionArguments': [SKIP],
'serializer-tester/BoundFunctionResult': [SKIP],
'serializer-tester/BoundFunctionTarget': [SKIP],
'serializer-tester/MergeJumpTargetEnvironment': [SKIP],
'serializer-tester/MultipleFunctionCalls': [SKIP],
'serializer-tester/SerializeCallAnyReceiver': [SKIP],
'serializer-tester/SerializeCallArguments': [SKIP],
'serializer-tester/SerializeCallProperty': [SKIP],
'serializer-tester/SerializeCallProperty2': [SKIP],
'serializer-tester/SerializeCallUndefinedReceiver': [SKIP],
'serializer-tester/SerializeCallUndefinedReceiver2': [SKIP],
'serializer-tester/SerializeCallWithSpread': [SKIP],
'serializer-tester/SerializeConditionalJump': [SKIP],
'serializer-tester/SerializeConstruct': [SKIP],
'serializer-tester/SerializeConstructSuper': [SKIP],
'serializer-tester/SerializeConstructWithSpread': [SKIP],
'serializer-tester/SerializeEmptyFunction': [SKIP],
'serializer-tester/SerializeInlinedClosure': [SKIP],
'serializer-tester/SerializeInlinedFunction': [SKIP],
'serializer-tester/SerializeUnconditionalJump': [SKIP],
# Requires --finalize_streaming_on_background:
'test-concurrent-allocation/ConcurrentAllocationWhileMainThreadIsParked': [SKIP],
'test-concurrent-allocation/ConcurrentAllocationWhileMainThreadParksAndUnparks': [SKIP],
'test-concurrent-allocation/ConcurrentAllocationWhileMainThreadRunsWithSafepoints': [SKIP],
......
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Serializer tests don't make sense in lite mode, as it doesn't gather
// IC feedback.
#ifndef V8_LITE_MODE
#include "test/cctest/compiler/serializer-tester.h"
#include "src/api/api-inl.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/serializer-for-background-compilation.h"
#include "src/compiler/zone-stats.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
namespace compiler {
SerializerTester::SerializerTester(const char* global_source,
const char* local_source)
: canonical_(main_isolate()) {
// The tests only make sense in the context of concurrent compilation.
FLAG_concurrent_inlining = true;
// The tests don't make sense when optimizations are turned off.
FLAG_opt = true;
// We need the IC to feed it to the serializer.
FLAG_use_ic = true;
// We need manual control over when a given function is optimized.
FLAG_always_opt = false;
// We need allocation of executable memory for the compilation.
FLAG_jitless = false;
FLAG_allow_natives_syntax = true;
FlagList::EnforceFlagImplications();
CompileRun(global_source);
std::string function_string = "(function() { ";
function_string += local_source;
function_string += " })();";
Handle<JSFunction> function = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(CompileRun(function_string.c_str()))));
uint32_t flags = i::OptimizedCompilationInfo::kInlining;
Optimize(function, main_zone(), main_isolate(), flags, &broker_);
// Update handle to the corresponding serialized Handle in the broker.
function =
broker_->FindCanonicalPersistentHandleForTesting<JSFunction>(*function);
function_ = MakeRef(broker(), function);
DCHECK_NOT_NULL(broker_->dependencies());
}
TEST(SerializeEmptyFunction) {
SerializerTester tester(
"", "function f() {}; %EnsureFeedbackVectorForFunction(f); return f;");
JSFunctionRef function = tester.function();
CHECK(tester.broker()->IsSerializedForCompilation(
function.shared(tester.broker()->dependencies()),
function.feedback_vector(tester.broker()->dependencies())));
}
// This helper function allows for testing whether an inlinee candidate
// was properly serialized. It expects that the top-level function (that is
// run through the SerializerTester) will return its inlinee candidate.
void CheckForSerializedInlinee(const char* global_source,
const char* local_source, int argc = 0,
Handle<Object> argv[] = {}) {
SerializerTester tester(global_source, local_source);
JSFunctionRef f = tester.function();
CHECK(tester.broker()->IsSerializedForCompilation(
f.shared(tester.broker()->dependencies()),
f.feedback_vector(tester.broker()->dependencies())));
MaybeHandle<Object> g_obj = Execution::Call(
tester.isolate(), tester.function().object(),
tester.isolate()->factory()->undefined_value(), argc, argv);
Handle<Object> g;
CHECK(g_obj.ToHandle(&g));
CHECK_WITH_MSG(
g->IsJSFunction(),
"The return value of the outer function must be a function too");
Handle<JSFunction> g_func = Handle<JSFunction>::cast(g);
// Look up corresponding serialized Handles in the broker.
Handle<SharedFunctionInfo> sfi(
tester.broker()
->FindCanonicalPersistentHandleForTesting<SharedFunctionInfo>(
g_func->shared()));
SharedFunctionInfoRef g_sfi = MakeRef(tester.broker(), sfi);
Handle<FeedbackVector> fv(
tester.broker()->FindCanonicalPersistentHandleForTesting<FeedbackVector>(
g_func->feedback_vector()));
FeedbackVectorRef g_fv = MakeRef(tester.broker(), fv);
CHECK(tester.broker()->IsSerializedForCompilation(g_sfi, g_fv));
}
TEST(SerializeInlinedClosure) {
CheckForSerializedInlinee("",
"function f() {"
" function g(){ return g; }"
" %EnsureFeedbackVectorForFunction(g);"
" return g();"
"};"
"%EnsureFeedbackVectorForFunction(f);"
"f(); return f;");
}
TEST(SerializeInlinedFunction) {
CheckForSerializedInlinee("",
"function g() {};"
"%EnsureFeedbackVectorForFunction(g);"
"function f() {"
" g(); return g;"
"};"
"%EnsureFeedbackVectorForFunction(f);"
"f(); return f;");
}
TEST(SerializeCallUndefinedReceiver) {
CheckForSerializedInlinee("",
"function g(a,b,c) {};"
"%EnsureFeedbackVectorForFunction(g);"
"function f() {"
" g(1,2,3); return g;"
"};"
"%EnsureFeedbackVectorForFunction(f);"
"f(); return f;");
}
TEST(SerializeCallUndefinedReceiver2) {
CheckForSerializedInlinee("",
"function g(a,b) {};"
"%EnsureFeedbackVectorForFunction(g);"
"function f() {"
" g(1,2); return g;"
"};"
"%EnsureFeedbackVectorForFunction(f);"
"f(); return f;");
}
TEST(SerializeCallProperty) {
CheckForSerializedInlinee("",
"let obj = {"
" g: function g(a,b,c) {}"
"};"
"%EnsureFeedbackVectorForFunction(obj.g);"
"function f() {"
" obj.g(1,2,3); return obj.g;"
"};"
"%EnsureFeedbackVectorForFunction(f);"
"f(); return f;");
}
TEST(SerializeCallProperty2) {
CheckForSerializedInlinee("",
"let obj = {"
" g: function g(a,b) {}"
"};"
"%EnsureFeedbackVectorForFunction(obj.g);"
"function f() {"
" obj.g(1,2); return obj.g;"
"};"
"%EnsureFeedbackVectorForFunction(f);"
"f(); return f;");
}
TEST(SerializeCallAnyReceiver) {
CheckForSerializedInlinee("",
"let obj = {"
" g: function g() {}"
"};"
"%EnsureFeedbackVectorForFunction(obj.g);"
"function f() {"
" with(obj) {"
" g(); return g;"
" };"
"};"
"%EnsureFeedbackVectorForFunction(f);"
"f(); return f;");
}
TEST(SerializeCallWithSpread) {
CheckForSerializedInlinee("",
"function g(args) {};"
"%EnsureFeedbackVectorForFunction(g);"
"const arr = [1,2,3];"
"function f() {"
" g(...arr); return g;"
"};"
"%EnsureFeedbackVectorForFunction(f);"
"f(); return f;");
}
// The following test causes the CallIC of `g` to turn megamorphic,
// thus allowing us to test if we forward arguments hints (`callee` in this
// example) and correctly serialize the inlining candidate `j`.
TEST(SerializeCallArguments) {
CheckForSerializedInlinee("",
"function g(callee) { callee(); };"
"function h() {};"
"function i() {};"
"%EnsureFeedbackVectorForFunction(g);"
"g(h); g(i);"
"function f() {"
" function j() {};"
" g(j);"
" return j;"
"};"
"%EnsureFeedbackVectorForFunction(f);"
"var j = f();"
"%EnsureFeedbackVectorForFunction(j);"
"f(); return f;");
}
TEST(SerializeConstruct) {
CheckForSerializedInlinee("",
"function g() {};"
"%EnsureFeedbackVectorForFunction(g);"
"function f() {"
" new g(); return g;"
"};"
"%EnsureFeedbackVectorForFunction(f);"
"f(); return f;");
}
TEST(SerializeConstructWithSpread) {
CheckForSerializedInlinee("",
"function g(a, b, c) {};"
"%EnsureFeedbackVectorForFunction(g);"
"const arr = [1, 2];"
"function f() {"
" new g(0, ...arr); return g;"
"};"
"%EnsureFeedbackVectorForFunction(f);"
"f(); return f;");
}
TEST(SerializeConstructSuper) {
CheckForSerializedInlinee("",
"class A {};"
"class B extends A { constructor() { super(); } };"
"%EnsureFeedbackVectorForFunction(A);"
"%EnsureFeedbackVectorForFunction(B);"
"function f() {"
" new B(); return A;"
"};"
"%EnsureFeedbackVectorForFunction(f);"
"f(); return f;");
}
TEST(SerializeConditionalJump) {
CheckForSerializedInlinee("",
"function g(callee) { callee(); };"
"function h() {};"
"function i() {};"
"%EnsureFeedbackVectorForFunction(g);"
"let a = true;"
"g(h); g(i);"
"function f() {"
" function q() {};"
" if (a) g(q);"
" return q;"
"};"
"%EnsureFeedbackVectorForFunction(f);"
"var q = f();"
"%EnsureFeedbackVectorForFunction(q);"
"f(); return f;");
}
TEST(SerializeUnconditionalJump) {
CheckForSerializedInlinee("",
"function g(callee) { callee(); };"
"function h() {};"
"function i() {};"
"%EnsureFeedbackVectorForFunction(g);"
"%EnsureFeedbackVectorForFunction(h);"
"%EnsureFeedbackVectorForFunction(i);"
"let a = false;"
"g(h); g(i);"
"function f() {"
" function p() {};"
" function q() {};"
" if (a) q();"
" else g(p);"
" return p;"
"};"
"%EnsureFeedbackVectorForFunction(f);"
"var p = f();"
"%EnsureFeedbackVectorForFunction(p);"
"f(); return f;");
}
TEST(MergeJumpTargetEnvironment) {
CheckForSerializedInlinee(
"",
"function f() {"
" let g;"
" while (true) {"
" if (g === undefined) {g = ()=>1; break;} else {g = ()=>2; break};"
" };"
" g(); return g;"
"};"
"%EnsureFeedbackVectorForFunction(f);"
"%EnsureFeedbackVectorForFunction(f());"
"f(); return f;"); // Two calls to f to make g() megamorhpic.
}
TEST(BoundFunctionTarget) {
const char* global = "function apply1(foo, arg) { return foo(arg); };";
CheckForSerializedInlinee(
global,
"%EnsureFeedbackVectorForFunction(apply1);"
"function test() {"
" const lambda = (a) => a;"
" %EnsureFeedbackVectorForFunction(lambda);"
" let bound = apply1.bind(null, lambda).bind(null, 42);"
" %TurbofanStaticAssert(bound() == 42); return apply1;"
"};"
"%EnsureFeedbackVectorForFunction(test);"
"test(); return test;");
}
TEST(BoundFunctionArguments) {
const char* global = "function apply2(foo, arg) { return foo(arg); };";
CheckForSerializedInlinee(
global,
"%EnsureFeedbackVectorForFunction(apply2);"
"function test() {"
" const lambda = (a) => a;"
" %EnsureFeedbackVectorForFunction(lambda);"
" let bound = apply2.bind(null, lambda).bind(null, 42);"
" %TurbofanStaticAssert(bound() == 42); return lambda;"
"};"
"%EnsureFeedbackVectorForFunction(test);"
"test(); return test;");
}
TEST(ArrowFunctionInlined) {
// The loop is to ensure there is a feedback vector for the arrow function
// {b}.
CheckForSerializedInlinee("",
"function foo() {"
" let b = x => x * x;"
" let a = [1, 2, 3].map(b);"
" return b;"
"}"
"%EnsureFeedbackVectorForFunction(foo);"
"for (let i = 0; i < 100; ++i) foo();"
"return foo;");
}
TEST(BoundFunctionResult) {
CheckForSerializedInlinee(
"",
"function id(x) { return x }"
"function foo() { id.bind(undefined, 42)(); return id; }"
"%PrepareFunctionForOptimization(foo);"
"%PrepareFunctionForOptimization(id);"
"foo();"
"foo();"
"%OptimizeFunctionOnNextCall(foo);"
"foo(); return foo;");
}
TEST(MultipleFunctionCalls) {
CheckForSerializedInlinee(
"",
"function inc(x) { return ++x; }"
"function dec(x) { return --x; }"
"function apply(f, x) { return f(x); }"
"function foo() { apply(inc, 42); apply(dec, 42); return dec; }"
"%PrepareFunctionForOptimization(inc);"
"%PrepareFunctionForOptimization(dec);"
"%PrepareFunctionForOptimization(apply);"
"%PrepareFunctionForOptimization(foo);"
"foo();"
"foo();"
"%OptimizeFunctionOnNextCall(foo);"
"foo(); return foo;");
}
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_LITE_MODE
......@@ -27,7 +27,7 @@
// Flags: --allow-natives-syntax
// Flags: --concurrent-recompilation --block-concurrent-recompilation
// Flags: --no-always-opt --no-turbo-concurrent-get-property-access-info
// Flags: --no-always-opt
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
......
......@@ -27,7 +27,7 @@
// Flags: --allow-natives-syntax --no-always-opt
// Flags: --concurrent-recompilation --block-concurrent-recompilation
// Flags: --no-always-opt --no-turbo-concurrent-get-property-access-info
// Flags: --no-always-opt
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
......
......@@ -29,8 +29,6 @@
// Flags: --concurrent-recompilation --block-concurrent-recompilation
// Flags: --nostress-opt --no-always-opt
// Flags: --no-turboprop
// Flags: --no-concurrent-inlining
// Flags: --no-turbo-concurrent-get-property-access-info
// --nostress-opt is in place because this particular optimization
// (guaranteeing that the Array prototype chain has no elements) is
......
......@@ -3,9 +3,7 @@
// found in the LICENSE file.
//
// Flags: --allow-natives-syntax --opt --no-always-opt
// Flags: --no-stress-flush-code
// Flags: --block-concurrent-recompilation
// Flags: --no-turbo-concurrent-get-property-access-info
// Flags: --no-stress-flush-code --block-concurrent-recompilation
//
// Tests tracking of constness of properties stored in dictionary
// mode prototypes.
......
......@@ -208,6 +208,23 @@
# dispatcher *without* aborting existing jobs.
'interrupt-budget-override': [PASS,FAIL],
'never-optimize': [PASS,FAIL],
# TODO(v8:12041): Reimplement an %UnblockConcurrentRecompilation pattern that
# works with concurrent compilation (i.e. without main-thread serialization).
'compiler/concurrent-invalidate-transition-map': [SKIP],
'compiler/concurrent-proto-change': [SKIP],
'compiler/regress-905555': [SKIP],
'compiler/regress-905555-2': [SKIP],
'concurrent-initial-prototype-change-1': [SKIP],
'const-dict-tracking': [SKIP],
# TODO(v8:12031): Reimplement elements kinds transitions when concurrent
# inlining.
'compiler/call-with-arraylike-or-spread-4': [SKIP],
'elements-kind': [SKIP],
'elements-transition-hoisting': [SKIP],
'regress/regress-7254': [SKIP],
'regress/regress-7510': [SKIP],
}], # ALWAYS
##############################################################################
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment