Commit be699045 authored by Mike Stanton's avatar Mike Stanton Committed by Commit Bot

[TurboFan] Mark Code object as never serialized

Code objects are exposed through JSFunction and SharedFunctionInfo.
If they are builtins, we don't have to worry about background threads
seeing partially initialized code objects. If they are optimized code
objects, we may. Background threads read the code fields with
AcquireLoad semantics. The fields are set on the main thread with
ReleaseStore semantics when appropriate.

Special care is taken when setting an optimized code object in a closure
in the interpreter entry stub. Since the MacroAssembler doesn't support
ReleaseStore semantics, this CL ensures that the optimized code object
is stored with those semantics in the feedback vector, where the
interpreter entry stub finds it.

Bug: v8:7790
Change-Id: I41ecedfe0e9d1ad5091cbe9a97f66c66ca9e07dd
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2676633
Commit-Queue: Michael Stanton <mvstanton@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarSantiago Aboy Solanes <solanes@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72869}
parent 6253320c
...@@ -1151,6 +1151,10 @@ Handle<Code> ContinuationForConcurrentOptimization( ...@@ -1151,6 +1151,10 @@ Handle<Code> ContinuationForConcurrentOptimization(
// code. // code.
if (!function->HasAttachedOptimizedCode()) { if (!function->HasAttachedOptimizedCode()) {
DCHECK(function->feedback_vector().has_optimized_code()); DCHECK(function->feedback_vector().has_optimized_code());
// Release store isn't required here because it was done on store
// into the feedback vector.
STATIC_ASSERT(
FeedbackVector::kFeedbackVectorMaybeOptimizedCodeIsStoreRelease);
function->set_code(function->feedback_vector().optimized_code()); function->set_code(function->feedback_vector().optimized_code());
} }
return handle(function->code(), isolate); return handle(function->code(), isolate);
...@@ -1967,7 +1971,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function, ...@@ -1967,7 +1971,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
} }
// Install code on closure. // Install code on closure.
function->set_code(*code); function->set_code(*code, kReleaseStore);
// Install a feedback vector if necessary. // Install a feedback vector if necessary.
if (code->kind() == CodeKind::BASELINE) { if (code->kind() == CodeKind::BASELINE) {
...@@ -2069,7 +2073,7 @@ bool Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function, ...@@ -2069,7 +2073,7 @@ bool Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
} }
if (!CodeKindIsNativeContextIndependentJSFunction(code_kind)) { if (!CodeKindIsNativeContextIndependentJSFunction(code_kind)) {
function->set_code(*code); function->set_code(*code, kReleaseStore);
} }
// Check postconditions on success. // Check postconditions on success.
...@@ -3225,7 +3229,8 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job, ...@@ -3225,7 +3229,8 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
InsertCodeIntoCompilationCache(isolate, compilation_info); InsertCodeIntoCompilationCache(isolate, compilation_info);
CompilerTracer::TraceCompletedJob(isolate, compilation_info); CompilerTracer::TraceCompletedJob(isolate, compilation_info);
if (should_install_code_on_function) { if (should_install_code_on_function) {
compilation_info->closure()->set_code(*compilation_info->code()); compilation_info->closure()->set_code(*compilation_info->code(),
kReleaseStore);
} }
return CompilationJob::SUCCEEDED; return CompilationJob::SUCCEEDED;
} }
...@@ -3233,7 +3238,7 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job, ...@@ -3233,7 +3238,7 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
DCHECK_EQ(job->state(), CompilationJob::State::kFailed); DCHECK_EQ(job->state(), CompilationJob::State::kFailed);
CompilerTracer::TraceAbortedJob(isolate, compilation_info); CompilerTracer::TraceAbortedJob(isolate, compilation_info);
compilation_info->closure()->set_code(shared->GetCode()); compilation_info->closure()->set_code(shared->GetCode(), kReleaseStore);
// Clear the InOptimizationQueue marker, if it exists. // Clear the InOptimizationQueue marker, if it exists.
if (!CodeKindIsNativeContextIndependentJSFunction(code_kind) && if (!CodeKindIsNativeContextIndependentJSFunction(code_kind) &&
compilation_info->closure()->IsInOptimizationQueue()) { compilation_info->closure()->IsInOptimizationQueue()) {
...@@ -3267,6 +3272,11 @@ void Compiler::PostInstantiation(Handle<JSFunction> function) { ...@@ -3267,6 +3272,11 @@ void Compiler::PostInstantiation(Handle<JSFunction> function) {
// Caching of optimized code enabled and optimized code found. // Caching of optimized code enabled and optimized code found.
DCHECK(!code.marked_for_deoptimization()); DCHECK(!code.marked_for_deoptimization());
DCHECK(function->shared().is_compiled()); DCHECK(function->shared().is_compiled());
// We don't need a release store because the optimized code was
// stored with release semantics into the vector
STATIC_ASSERT(
FeedbackVector::kFeedbackVectorMaybeOptimizedCodeIsStoreRelease);
function->set_code(code); function->set_code(code);
} }
} }
......
...@@ -27,7 +27,7 @@ void DisposeCompilationJob(OptimizedCompilationJob* job, ...@@ -27,7 +27,7 @@ void DisposeCompilationJob(OptimizedCompilationJob* job,
bool restore_function_code) { bool restore_function_code) {
if (restore_function_code) { if (restore_function_code) {
Handle<JSFunction> function = job->compilation_info()->closure(); Handle<JSFunction> function = job->compilation_info()->closure();
function->set_code(function->shared().GetCode()); function->set_code(function->shared().GetCode(), kReleaseStore);
if (function->IsInOptimizationQueue()) { if (function->IsInOptimizationQueue()) {
function->ClearOptimizationMarker(); function->ClearOptimizationMarker();
} }
......
...@@ -82,6 +82,7 @@ enum class OddballType : uint8_t { ...@@ -82,6 +82,7 @@ enum class OddballType : uint8_t {
V(ArrayBoilerplateDescription) \ V(ArrayBoilerplateDescription) \
V(CallHandlerInfo) \ V(CallHandlerInfo) \
V(Cell) \ V(Cell) \
V(Code) \
V(FeedbackCell) \ V(FeedbackCell) \
V(FeedbackVector) \ V(FeedbackVector) \
V(RegExpBoilerplateDescription) \ V(RegExpBoilerplateDescription) \
...@@ -133,7 +134,6 @@ enum class OddballType : uint8_t { ...@@ -133,7 +134,6 @@ enum class OddballType : uint8_t {
V(JSObject) \ V(JSObject) \
/* Subtypes of HeapObject */ \ /* Subtypes of HeapObject */ \
V(AllocationSite) \ V(AllocationSite) \
V(Code) \
V(DescriptorArray) \ V(DescriptorArray) \
V(FixedArrayBase) \ V(FixedArrayBase) \
V(FunctionTemplateInfo) \ V(FunctionTemplateInfo) \
...@@ -389,7 +389,6 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef { ...@@ -389,7 +389,6 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
bool has_feedback_vector() const; bool has_feedback_vector() const;
bool has_initial_map() const; bool has_initial_map() const;
bool has_prototype() const; bool has_prototype() const;
bool HasAttachedOptimizedCode() const;
bool PrototypeRequiresRuntimeLookup() const; bool PrototypeRequiresRuntimeLookup() const;
void Serialize(); void Serialize();
......
...@@ -682,7 +682,6 @@ class JSFunctionData : public JSObjectData { ...@@ -682,7 +682,6 @@ class JSFunctionData : public JSObjectData {
bool has_feedback_vector() const { return has_feedback_vector_; } bool has_feedback_vector() const { return has_feedback_vector_; }
bool has_initial_map() const { return has_initial_map_; } bool has_initial_map() const { return has_initial_map_; }
bool has_prototype() const { return has_prototype_; } bool has_prototype() const { return has_prototype_; }
bool HasAttachedOptimizedCode() const { return has_attached_optimized_code_; }
bool PrototypeRequiresRuntimeLookup() const { bool PrototypeRequiresRuntimeLookup() const {
return PrototypeRequiresRuntimeLookup_; return PrototypeRequiresRuntimeLookup_;
} }
...@@ -710,6 +709,7 @@ class JSFunctionData : public JSObjectData { ...@@ -710,6 +709,7 @@ class JSFunctionData : public JSObjectData {
} }
ObjectData* code() const { ObjectData* code() const {
DCHECK(serialized_code_and_feedback()); DCHECK(serialized_code_and_feedback());
DCHECK(!FLAG_turbo_direct_heap_access);
return code_; return code_;
} }
int initial_map_instance_size_with_min_slack() const { int initial_map_instance_size_with_min_slack() const {
...@@ -721,7 +721,6 @@ class JSFunctionData : public JSObjectData { ...@@ -721,7 +721,6 @@ class JSFunctionData : public JSObjectData {
bool has_feedback_vector_; bool has_feedback_vector_;
bool has_initial_map_; bool has_initial_map_;
bool has_prototype_; bool has_prototype_;
bool has_attached_optimized_code_;
bool PrototypeRequiresRuntimeLookup_; bool PrototypeRequiresRuntimeLookup_;
bool serialized_ = false; bool serialized_ = false;
...@@ -1346,7 +1345,6 @@ JSFunctionData::JSFunctionData(JSHeapBroker* broker, ObjectData** storage, ...@@ -1346,7 +1345,6 @@ JSFunctionData::JSFunctionData(JSHeapBroker* broker, ObjectData** storage,
has_initial_map_(object->has_prototype_slot() && has_initial_map_(object->has_prototype_slot() &&
object->has_initial_map()), object->has_initial_map()),
has_prototype_(object->has_prototype_slot() && object->has_prototype()), has_prototype_(object->has_prototype_slot() && object->has_prototype()),
has_attached_optimized_code_(object->HasAttachedOptimizedCode()),
PrototypeRequiresRuntimeLookup_( PrototypeRequiresRuntimeLookup_(
object->PrototypeRequiresRuntimeLookup()) {} object->PrototypeRequiresRuntimeLookup()) {}
...@@ -1400,7 +1398,12 @@ void JSFunctionData::SerializeCodeAndFeedback(JSHeapBroker* broker) { ...@@ -1400,7 +1398,12 @@ void JSFunctionData::SerializeCodeAndFeedback(JSHeapBroker* broker) {
DCHECK_NULL(feedback_cell_); DCHECK_NULL(feedback_cell_);
DCHECK_NULL(feedback_vector_); DCHECK_NULL(feedback_vector_);
DCHECK_NULL(code_); DCHECK_NULL(code_);
code_ = broker->GetOrCreateData(function->code()); if (!FLAG_turbo_direct_heap_access) {
// This is conditionalized because Code objects are never serialized now.
// We only need to represent the code object in serialized data when
// we're unable to perform direct heap accesses.
code_ = broker->GetOrCreateData(function->code(kAcquireLoad));
}
feedback_cell_ = broker->GetOrCreateData(function->raw_feedback_cell()); feedback_cell_ = broker->GetOrCreateData(function->raw_feedback_cell());
feedback_vector_ = has_feedback_vector() feedback_vector_ = has_feedback_vector()
? broker->GetOrCreateData(function->feedback_vector()) ? broker->GetOrCreateData(function->feedback_vector())
...@@ -2160,7 +2163,9 @@ class CodeData : public HeapObjectData { ...@@ -2160,7 +2163,9 @@ class CodeData : public HeapObjectData {
public: public:
CodeData(JSHeapBroker* broker, ObjectData** storage, Handle<Code> object) CodeData(JSHeapBroker* broker, ObjectData** storage, Handle<Code> object)
: HeapObjectData(broker, storage, object), : HeapObjectData(broker, storage, object),
inlined_bytecode_size_(object->inlined_bytecode_size()) {} inlined_bytecode_size_(object->inlined_bytecode_size()) {
DCHECK(!FLAG_turbo_direct_heap_access);
}
unsigned inlined_bytecode_size() const { return inlined_bytecode_size_; } unsigned inlined_bytecode_size() const { return inlined_bytecode_size_; }
...@@ -3485,7 +3490,6 @@ BIMODAL_ACCESSOR_C(JSDataView, size_t, byte_length) ...@@ -3485,7 +3490,6 @@ BIMODAL_ACCESSOR_C(JSDataView, size_t, byte_length)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_feedback_vector) BIMODAL_ACCESSOR_C(JSFunction, bool, has_feedback_vector)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_initial_map) BIMODAL_ACCESSOR_C(JSFunction, bool, has_initial_map)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_prototype) BIMODAL_ACCESSOR_C(JSFunction, bool, has_prototype)
BIMODAL_ACCESSOR_C(JSFunction, bool, HasAttachedOptimizedCode)
BIMODAL_ACCESSOR_C(JSFunction, bool, PrototypeRequiresRuntimeLookup) BIMODAL_ACCESSOR_C(JSFunction, bool, PrototypeRequiresRuntimeLookup)
BIMODAL_ACCESSOR(JSFunction, Context, context) BIMODAL_ACCESSOR(JSFunction, Context, context)
BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context) BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context)
...@@ -3494,7 +3498,6 @@ BIMODAL_ACCESSOR(JSFunction, Object, prototype) ...@@ -3494,7 +3498,6 @@ BIMODAL_ACCESSOR(JSFunction, Object, prototype)
BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared) BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
BIMODAL_ACCESSOR(JSFunction, FeedbackCell, raw_feedback_cell) BIMODAL_ACCESSOR(JSFunction, FeedbackCell, raw_feedback_cell)
BIMODAL_ACCESSOR(JSFunction, FeedbackVector, feedback_vector) BIMODAL_ACCESSOR(JSFunction, FeedbackVector, feedback_vector)
BIMODAL_ACCESSOR(JSFunction, Code, code)
BIMODAL_ACCESSOR_C(JSGlobalObject, bool, IsDetached) BIMODAL_ACCESSOR_C(JSGlobalObject, bool, IsDetached)
...@@ -4391,6 +4394,15 @@ bool JSFunctionRef::serialized_code_and_feedback() const { ...@@ -4391,6 +4394,15 @@ bool JSFunctionRef::serialized_code_and_feedback() const {
return data()->AsJSFunction()->serialized_code_and_feedback(); return data()->AsJSFunction()->serialized_code_and_feedback();
} }
CodeRef JSFunctionRef::code() const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
return CodeRef(broker(), broker()->CanonicalPersistentHandle(
object()->code(kAcquireLoad)));
}
return CodeRef(broker(), ObjectRef::data()->AsJSFunction()->code());
}
void SharedFunctionInfoRef::SerializeFunctionTemplateInfo() { void SharedFunctionInfoRef::SerializeFunctionTemplateInfo() {
if (data_->should_access_heap()) return; if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing); CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
......
...@@ -204,11 +204,9 @@ Reduction JSInliningHeuristic::Reduce(Node* node) { ...@@ -204,11 +204,9 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
unsigned inlined_bytecode_size = 0; unsigned inlined_bytecode_size = 0;
if (candidate.functions[i].has_value()) { if (candidate.functions[i].has_value()) {
JSFunctionRef function = candidate.functions[i].value(); JSFunctionRef function = candidate.functions[i].value();
if (function.HasAttachedOptimizedCode()) {
inlined_bytecode_size = function.code().inlined_bytecode_size(); inlined_bytecode_size = function.code().inlined_bytecode_size();
candidate.total_size += inlined_bytecode_size; candidate.total_size += inlined_bytecode_size;
} }
}
candidate_is_small = candidate_is_small && candidate_is_small = candidate_is_small &&
IsSmall(bytecode.length() + inlined_bytecode_size); IsSmall(bytecode.length() + inlined_bytecode_size);
} }
...@@ -791,9 +789,11 @@ void JSInliningHeuristic::PrintCandidates() { ...@@ -791,9 +789,11 @@ void JSInliningHeuristic::PrintCandidates() {
os << ", bytecode size: " << candidate.bytecode[i]->length(); os << ", bytecode size: " << candidate.bytecode[i]->length();
if (candidate.functions[i].has_value()) { if (candidate.functions[i].has_value()) {
JSFunctionRef function = candidate.functions[i].value(); JSFunctionRef function = candidate.functions[i].value();
if (function.HasAttachedOptimizedCode()) { unsigned inlined_bytecode_size =
function.code().inlined_bytecode_size();
if (inlined_bytecode_size > 0) {
os << ", existing opt code's inlined bytecode size: " os << ", existing opt code's inlined bytecode size: "
<< function.code().inlined_bytecode_size(); << inlined_bytecode_size;
} }
} }
} else { } else {
......
...@@ -1191,7 +1191,7 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script, ...@@ -1191,7 +1191,7 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
isolate->compilation_cache()->Remove(sfi); isolate->compilation_cache()->Remove(sfi);
for (auto& js_function : data->js_functions) { for (auto& js_function : data->js_functions) {
js_function->set_shared(*new_sfi); js_function->set_shared(*new_sfi);
js_function->set_code(js_function->shared().GetCode()); js_function->set_code(js_function->shared().GetCode(), kReleaseStore);
js_function->set_raw_feedback_cell( js_function->set_raw_feedback_cell(
*isolate->factory()->many_closures_cell()); *isolate->factory()->many_closures_cell());
......
...@@ -432,8 +432,8 @@ Handle<FeedbackVector> Factory::NewFeedbackVector( ...@@ -432,8 +432,8 @@ Handle<FeedbackVector> Factory::NewFeedbackVector(
*feedback_vector_map()); *feedback_vector_map());
Handle<FeedbackVector> vector(FeedbackVector::cast(result), isolate()); Handle<FeedbackVector> vector(FeedbackVector::cast(result), isolate());
vector->set_shared_function_info(*shared); vector->set_shared_function_info(*shared);
vector->set_maybe_optimized_code( vector->set_maybe_optimized_code(HeapObjectReference::ClearedValue(isolate()),
HeapObjectReference::ClearedValue(isolate())); kReleaseStore);
vector->set_length(length); vector->set_length(length);
vector->set_invocation_count(0); vector->set_invocation_count(0);
vector->set_profiler_ticks(0); vector->set_profiler_ticks(0);
...@@ -3458,7 +3458,7 @@ Handle<JSFunction> Factory::JSFunctionBuilder::BuildRaw(Handle<Code> code) { ...@@ -3458,7 +3458,7 @@ Handle<JSFunction> Factory::JSFunctionBuilder::BuildRaw(Handle<Code> code) {
function->set_shared(*sfi_); function->set_shared(*sfi_);
function->set_context(*context_); function->set_context(*context_);
function->set_raw_feedback_cell(*feedback_cell); function->set_raw_feedback_cell(*feedback_cell);
function->set_code(*code); function->set_code(*code, kReleaseStore);
if (map->has_prototype_slot()) { if (map->has_prototype_slot()) {
function->set_prototype_or_initial_map( function->set_prototype_or_initial_map(
ReadOnlyRoots(isolate).the_hole_value()); ReadOnlyRoots(isolate).the_hole_value());
......
...@@ -38,6 +38,9 @@ INT32_ACCESSORS(FeedbackMetadata, slot_count, kSlotCountOffset) ...@@ -38,6 +38,9 @@ INT32_ACCESSORS(FeedbackMetadata, slot_count, kSlotCountOffset)
INT32_ACCESSORS(FeedbackMetadata, create_closure_slot_count, INT32_ACCESSORS(FeedbackMetadata, create_closure_slot_count,
kCreateClosureSlotCountOffset) kCreateClosureSlotCountOffset)
RELEASE_ACQUIRE_WEAK_ACCESSORS(FeedbackVector, maybe_optimized_code,
kMaybeOptimizedCodeOffset)
int32_t FeedbackMetadata::synchronized_slot_count() const { int32_t FeedbackMetadata::synchronized_slot_count() const {
return base::Acquire_Load( return base::Acquire_Load(
reinterpret_cast<const base::Atomic32*>(field_address(kSlotCountOffset))); reinterpret_cast<const base::Atomic32*>(field_address(kSlotCountOffset)));
...@@ -113,7 +116,7 @@ FeedbackMetadata FeedbackVector::metadata() const { ...@@ -113,7 +116,7 @@ FeedbackMetadata FeedbackVector::metadata() const {
void FeedbackVector::clear_invocation_count() { set_invocation_count(0); } void FeedbackVector::clear_invocation_count() { set_invocation_count(0); }
Code FeedbackVector::optimized_code() const { Code FeedbackVector::optimized_code() const {
MaybeObject slot = maybe_optimized_code(); MaybeObject slot = maybe_optimized_code(kAcquireLoad);
DCHECK(slot->IsWeakOrCleared()); DCHECK(slot->IsWeakOrCleared());
HeapObject heap_object; HeapObject heap_object;
Code code = Code code =
...@@ -145,7 +148,7 @@ OptimizationTier FeedbackVector::optimization_tier() const { ...@@ -145,7 +148,7 @@ OptimizationTier FeedbackVector::optimization_tier() const {
// It is possible that the optimization tier bits aren't updated when the code // It is possible that the optimization tier bits aren't updated when the code
// was cleared due to a GC. // was cleared due to a GC.
DCHECK_IMPLIES(tier == OptimizationTier::kNone, DCHECK_IMPLIES(tier == OptimizationTier::kNone,
maybe_optimized_code()->IsCleared()); maybe_optimized_code(kAcquireLoad)->IsCleared());
return tier; return tier;
} }
......
...@@ -394,7 +394,8 @@ void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector, ...@@ -394,7 +394,8 @@ void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
// re-mark the function for non-concurrent optimization after an OSR. We // re-mark the function for non-concurrent optimization after an OSR. We
// should avoid these cases and also check that marker isn't // should avoid these cases and also check that marker isn't
// kCompileOptimized or kCompileOptimizedConcurrent. // kCompileOptimized or kCompileOptimizedConcurrent.
vector->set_maybe_optimized_code(HeapObjectReference::Weak(*code)); vector->set_maybe_optimized_code(HeapObjectReference::Weak(*code),
kReleaseStore);
int32_t state = vector->flags(); int32_t state = vector->flags();
state = OptimizationTierBits::update(state, GetTierForCodeKind(code->kind())); state = OptimizationTierBits::update(state, GetTierForCodeKind(code->kind()));
state = OptimizationMarkerBits::update(state, OptimizationMarker::kNone); state = OptimizationMarkerBits::update(state, OptimizationMarker::kNone);
...@@ -404,7 +405,8 @@ void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector, ...@@ -404,7 +405,8 @@ void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
void FeedbackVector::ClearOptimizedCode() { void FeedbackVector::ClearOptimizedCode() {
DCHECK(has_optimized_code()); DCHECK(has_optimized_code());
DCHECK_NE(optimization_tier(), OptimizationTier::kNone); DCHECK_NE(optimization_tier(), OptimizationTier::kNone);
set_maybe_optimized_code(HeapObjectReference::ClearedValue(GetIsolate())); set_maybe_optimized_code(HeapObjectReference::ClearedValue(GetIsolate()),
kReleaseStore);
ClearOptimizationTier(); ClearOptimizationTier();
} }
...@@ -435,7 +437,7 @@ void FeedbackVector::InitializeOptimizationState() { ...@@ -435,7 +437,7 @@ void FeedbackVector::InitializeOptimizationState() {
void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization( void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
SharedFunctionInfo shared, const char* reason) { SharedFunctionInfo shared, const char* reason) {
MaybeObject slot = maybe_optimized_code(); MaybeObject slot = maybe_optimized_code(kAcquireLoad);
if (slot->IsCleared()) { if (slot->IsCleared()) {
ClearOptimizationTier(); ClearOptimizationTier();
return; return;
......
...@@ -198,6 +198,11 @@ class FeedbackVector ...@@ -198,6 +198,11 @@ class FeedbackVector
STATIC_ASSERT(OptimizationTier::kLastOptimizationTier < STATIC_ASSERT(OptimizationTier::kLastOptimizationTier <
OptimizationTierBits::kMax); OptimizationTierBits::kMax);
static const bool kFeedbackVectorMaybeOptimizedCodeIsStoreRelease = true;
using TorqueGeneratedFeedbackVector<FeedbackVector,
HeapObject>::maybe_optimized_code;
DECL_RELEASE_ACQUIRE_WEAK_ACCESSORS(maybe_optimized_code)
static constexpr uint32_t kHasCompileOptimizedOrLogFirstExecutionMarker = static constexpr uint32_t kHasCompileOptimizedOrLogFirstExecutionMarker =
kNoneOrInOptimizationQueueMask << OptimizationMarkerBits::kShift; kNoneOrInOptimizationQueueMask << OptimizationMarkerBits::kShift;
static constexpr uint32_t kHasNoTopTierCodeOrCompileOptimizedMarkerMask = static constexpr uint32_t kHasNoTopTierCodeOrCompileOptimizedMarkerMask =
......
...@@ -52,7 +52,7 @@ void JSFunction::ClearOptimizationMarker() { ...@@ -52,7 +52,7 @@ void JSFunction::ClearOptimizationMarker() {
} }
bool JSFunction::ChecksOptimizationMarker() { bool JSFunction::ChecksOptimizationMarker() {
return code().checks_optimization_marker(); return code(kAcquireLoad).checks_optimization_marker();
} }
bool JSFunction::IsMarkedForOptimization() { bool JSFunction::IsMarkedForOptimization() {
...@@ -118,7 +118,7 @@ AbstractCode JSFunction::abstract_code(LocalIsolate* isolate) { ...@@ -118,7 +118,7 @@ AbstractCode JSFunction::abstract_code(LocalIsolate* isolate) {
if (ActiveTierIsIgnition()) { if (ActiveTierIsIgnition()) {
return AbstractCode::cast(shared().GetBytecodeArray(isolate)); return AbstractCode::cast(shared().GetBytecodeArray(isolate));
} else { } else {
return AbstractCode::cast(code()); return AbstractCode::cast(code(kAcquireLoad));
} }
} }
...@@ -136,10 +136,7 @@ void JSFunction::set_code(Code value) { ...@@ -136,10 +136,7 @@ void JSFunction::set_code(Code value) {
#endif #endif
} }
void JSFunction::set_code_no_write_barrier(Code value) { RELEASE_ACQUIRE_ACCESSORS(JSFunction, code, Code, kCodeOffset)
DCHECK(!ObjectInYoungGeneration(value));
RELAXED_WRITE_FIELD(*this, kCodeOffset, value);
}
// TODO(ishell): Why relaxed read but release store? // TODO(ishell): Why relaxed read but release store?
DEF_GETTER(JSFunction, shared, SharedFunctionInfo) { DEF_GETTER(JSFunction, shared, SharedFunctionInfo) {
...@@ -255,7 +252,7 @@ DEF_GETTER(JSFunction, prototype, Object) { ...@@ -255,7 +252,7 @@ DEF_GETTER(JSFunction, prototype, Object) {
} }
bool JSFunction::is_compiled() const { bool JSFunction::is_compiled() const {
return code().builtin_index() != Builtins::kCompileLazy && return code(kAcquireLoad).builtin_index() != Builtins::kCompileLazy &&
shared().is_compiled(); shared().is_compiled();
} }
......
...@@ -24,13 +24,14 @@ CodeKinds JSFunction::GetAttachedCodeKinds() const { ...@@ -24,13 +24,14 @@ CodeKinds JSFunction::GetAttachedCodeKinds() const {
// Note: There's a special case when bytecode has been aged away. After // Note: There's a special case when bytecode has been aged away. After
// flushing the bytecode, the JSFunction will still have the interpreter // flushing the bytecode, the JSFunction will still have the interpreter
// entry trampoline attached, but the bytecode is no longer available. // entry trampoline attached, but the bytecode is no longer available.
if (code().is_interpreter_trampoline_builtin()) { Code code = this->code(kAcquireLoad);
if (code.is_interpreter_trampoline_builtin()) {
result |= CodeKindFlag::INTERPRETED_FUNCTION; result |= CodeKindFlag::INTERPRETED_FUNCTION;
} }
const CodeKind kind = code().kind(); const CodeKind kind = code.kind();
if (!CodeKindIsOptimizedJSFunction(kind) || if (!CodeKindIsOptimizedJSFunction(kind) ||
code().marked_for_deoptimization()) { code.marked_for_deoptimization()) {
DCHECK_EQ((result & ~kJSFunctionCodeKindsMask), 0); DCHECK_EQ((result & ~kJSFunctionCodeKindsMask), 0);
return result; return result;
} }
...@@ -117,12 +118,14 @@ bool HighestTierOf(CodeKinds kinds, CodeKind* highest_tier) { ...@@ -117,12 +118,14 @@ bool HighestTierOf(CodeKinds kinds, CodeKind* highest_tier) {
bool JSFunction::ActiveTierIsIgnition() const { bool JSFunction::ActiveTierIsIgnition() const {
if (!shared().HasBytecodeArray()) return false; if (!shared().HasBytecodeArray()) return false;
bool result = (GetActiveTier() == CodeKind::INTERPRETED_FUNCTION); bool result = (GetActiveTier() == CodeKind::INTERPRETED_FUNCTION);
DCHECK_IMPLIES(result, #ifdef DEBUG
code().is_interpreter_trampoline_builtin() || Code code = this->code(kAcquireLoad);
(CodeKindIsOptimizedJSFunction(code().kind()) && DCHECK_IMPLIES(result, code.is_interpreter_trampoline_builtin() ||
code().marked_for_deoptimization()) || (CodeKindIsOptimizedJSFunction(code.kind()) &&
(code().builtin_index() == Builtins::kCompileLazy && code.marked_for_deoptimization()) ||
(code.builtin_index() == Builtins::kCompileLazy &&
shared().IsInterpreted())); shared().IsInterpreted()));
#endif // DEBUG
return result; return result;
} }
......
...@@ -82,9 +82,13 @@ class JSFunction : public JSFunctionOrBoundFunction { ...@@ -82,9 +82,13 @@ class JSFunction : public JSFunctionOrBoundFunction {
// when the function is invoked, e.g. foo() or new foo(). See // when the function is invoked, e.g. foo() or new foo(). See
// [[Call]] and [[Construct]] description in ECMA-262, section // [[Call]] and [[Construct]] description in ECMA-262, section
// 8.6.2, page 27. // 8.6.2, page 27.
// Release/Acquire accessors are used when storing a newly-created
// optimized code object, or when reading from the background thread.
// Storing a builtin doesn't require release semantics because these objects
// are fully initialized.
inline Code code() const; inline Code code() const;
inline void set_code(Code code); inline void set_code(Code code);
inline void set_code_no_write_barrier(Code code); DECL_RELEASE_ACQUIRE_ACCESSORS(code, Code)
// Get the abstract code associated with the function, which will either be // Get the abstract code associated with the function, which will either be
// a Code object or a BytecodeArray. // a Code object or a BytecodeArray.
......
...@@ -42,7 +42,8 @@ RELEASE_ACQUIRE_ACCESSORS(Map, instance_descriptors, DescriptorArray, ...@@ -42,7 +42,8 @@ RELEASE_ACQUIRE_ACCESSORS(Map, instance_descriptors, DescriptorArray,
// We need to use release-store and acquire-load accessor pairs to ensure // We need to use release-store and acquire-load accessor pairs to ensure
// that the concurrent marking thread observes initializing stores of the // that the concurrent marking thread observes initializing stores of the
// layout descriptor. // layout descriptor.
SYNCHRONIZED_WEAK_ACCESSORS(Map, raw_transitions, WEAK_ACCESSORS(Map, raw_transitions, kTransitionsOrPrototypeInfoOffset)
RELEASE_ACQUIRE_WEAK_ACCESSORS(Map, raw_transitions,
kTransitionsOrPrototypeInfoOffset) kTransitionsOrPrototypeInfoOffset)
ACCESSORS_CHECKED2(Map, prototype, HeapObject, kPrototypeOffset, true, ACCESSORS_CHECKED2(Map, prototype, HeapObject, kPrototypeOffset, true,
......
...@@ -425,6 +425,7 @@ class Map : public HeapObject { ...@@ -425,6 +425,7 @@ class Map : public HeapObject {
// Don't call set_raw_transitions() directly to overwrite transitions, use // Don't call set_raw_transitions() directly to overwrite transitions, use
// the TransitionArray::ReplaceTransitions() wrapper instead! // the TransitionArray::ReplaceTransitions() wrapper instead!
DECL_ACCESSORS(raw_transitions, MaybeObject) DECL_ACCESSORS(raw_transitions, MaybeObject)
DECL_RELEASE_ACQUIRE_WEAK_ACCESSORS(raw_transitions)
// [prototype_info]: Per-prototype metadata. Aliased with transitions // [prototype_info]: Per-prototype metadata. Aliased with transitions
// (which prototype maps don't have). // (which prototype maps don't have).
DECL_ACCESSORS(prototype_info, Object) DECL_ACCESSORS(prototype_info, Object)
......
...@@ -131,6 +131,10 @@ ...@@ -131,6 +131,10 @@
DECL_ACQUIRE_GETTER(name, type) \ DECL_ACQUIRE_GETTER(name, type) \
DECL_RELEASE_SETTER(name, type) DECL_RELEASE_SETTER(name, type)
#define DECL_RELEASE_ACQUIRE_WEAK_ACCESSORS(name) \
DECL_ACQUIRE_GETTER(name, MaybeObject) \
DECL_RELEASE_SETTER(name, MaybeObject)
#define DECL_CAST(Type) \ #define DECL_CAST(Type) \
V8_INLINE static Type cast(Object object); \ V8_INLINE static Type cast(Object object); \
V8_INLINE static Type unchecked_cast(Object object) { \ V8_INLINE static Type unchecked_cast(Object object) { \
...@@ -278,26 +282,32 @@ ...@@ -278,26 +282,32 @@
#define WEAK_ACCESSORS(holder, name, offset) \ #define WEAK_ACCESSORS(holder, name, offset) \
WEAK_ACCESSORS_CHECKED(holder, name, offset, true) WEAK_ACCESSORS_CHECKED(holder, name, offset, true)
#define SYNCHRONIZED_WEAK_ACCESSORS_CHECKED2(holder, name, offset, \ #define RELEASE_ACQUIRE_WEAK_ACCESSORS_CHECKED2(holder, name, offset, \
get_condition, set_condition) \ get_condition, set_condition) \
DEF_GETTER(holder, name, MaybeObject) { \ MaybeObject holder::name(AcquireLoadTag tag) const { \
IsolateRoot isolate = GetIsolateForPtrCompr(*this); \
return holder::name(isolate, tag); \
} \
MaybeObject holder::name(IsolateRoot isolate, AcquireLoadTag) const { \
MaybeObject value = \ MaybeObject value = \
TaggedField<MaybeObject, offset>::Acquire_Load(isolate, *this); \ TaggedField<MaybeObject, offset>::Acquire_Load(isolate, *this); \
DCHECK(get_condition); \ DCHECK(get_condition); \
return value; \ return value; \
} \ } \
void holder::set_##name(MaybeObject value, WriteBarrierMode mode) { \ void holder::set_##name(MaybeObject value, ReleaseStoreTag, \
WriteBarrierMode mode) { \
DCHECK(set_condition); \ DCHECK(set_condition); \
TaggedField<MaybeObject, offset>::Release_Store(*this, value); \ TaggedField<MaybeObject, offset>::Release_Store(*this, value); \
CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode); \ CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode); \
} }
#define SYNCHRONIZED_WEAK_ACCESSORS_CHECKED(holder, name, offset, condition) \ #define RELEASE_ACQUIRE_WEAK_ACCESSORS_CHECKED(holder, name, offset, \
SYNCHRONIZED_WEAK_ACCESSORS_CHECKED2(holder, name, offset, condition, \ condition) \
RELEASE_ACQUIRE_WEAK_ACCESSORS_CHECKED2(holder, name, offset, condition, \
condition) condition)
#define SYNCHRONIZED_WEAK_ACCESSORS(holder, name, offset) \ #define RELEASE_ACQUIRE_WEAK_ACCESSORS(holder, name, offset) \
SYNCHRONIZED_WEAK_ACCESSORS_CHECKED(holder, name, offset, true) RELEASE_ACQUIRE_WEAK_ACCESSORS_CHECKED(holder, name, offset, true)
// Getter that returns a Smi as an int and writes an int as a Smi. // Getter that returns a Smi as an int and writes an int as a Smi.
#define SMI_ACCESSORS_CHECKED(holder, name, offset, condition) \ #define SMI_ACCESSORS_CHECKED(holder, name, offset, condition) \
......
...@@ -217,7 +217,7 @@ void TransitionsAccessor::Reload() { ...@@ -217,7 +217,7 @@ void TransitionsAccessor::Reload() {
int TransitionsAccessor::Capacity() { return transitions().Capacity(); } int TransitionsAccessor::Capacity() { return transitions().Capacity(); }
void TransitionsAccessor::Initialize() { void TransitionsAccessor::Initialize() {
raw_transitions_ = map_.raw_transitions(isolate_); raw_transitions_ = map_.raw_transitions(isolate_, kAcquireLoad);
HeapObject heap_object; HeapObject heap_object;
if (raw_transitions_->IsSmi() || raw_transitions_->IsCleared()) { if (raw_transitions_->IsSmi() || raw_transitions_->IsCleared()) {
encoding_ = kUninitialized; encoding_ = kUninitialized;
......
...@@ -429,13 +429,14 @@ void TransitionsAccessor::SetMigrationTarget(Map migration_target) { ...@@ -429,13 +429,14 @@ void TransitionsAccessor::SetMigrationTarget(Map migration_target) {
// sake. // sake.
if (encoding() != kUninitialized) return; if (encoding() != kUninitialized) return;
DCHECK(map_.is_deprecated()); DCHECK(map_.is_deprecated());
map_.set_raw_transitions(MaybeObject::FromObject(migration_target)); map_.set_raw_transitions(MaybeObject::FromObject(migration_target),
kReleaseStore);
MarkNeedsReload(); MarkNeedsReload();
} }
Map TransitionsAccessor::GetMigrationTarget() { Map TransitionsAccessor::GetMigrationTarget() {
if (encoding() == kMigrationTarget) { if (encoding() == kMigrationTarget) {
return map_.raw_transitions()->cast<Map>(); return map_.raw_transitions(kAcquireLoad)->cast<Map>();
} }
return Map(); return Map();
} }
...@@ -449,7 +450,7 @@ void TransitionsAccessor::ReplaceTransitions(MaybeObject new_transitions) { ...@@ -449,7 +450,7 @@ void TransitionsAccessor::ReplaceTransitions(MaybeObject new_transitions) {
DCHECK(old_transitions != new_transitions->GetHeapObjectAssumeStrong()); DCHECK(old_transitions != new_transitions->GetHeapObjectAssumeStrong());
#endif #endif
} }
map_.set_raw_transitions(new_transitions); map_.set_raw_transitions(new_transitions, kReleaseStore);
MarkNeedsReload(); MarkNeedsReload();
} }
......
...@@ -72,7 +72,7 @@ void TryInstallNCICode(Isolate* isolate, Handle<JSFunction> function, ...@@ -72,7 +72,7 @@ void TryInstallNCICode(Isolate* isolate, Handle<JSFunction> function,
Handle<Code> code; Handle<Code> code;
if (sfi->TryGetCachedCode(isolate).ToHandle(&code)) { if (sfi->TryGetCachedCode(isolate).ToHandle(&code)) {
function->set_code(*code); function->set_code(*code, kReleaseStore);
JSFunction::EnsureFeedbackVector(function, is_compiled_scope); JSFunction::EnsureFeedbackVector(function, is_compiled_scope);
if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceHit(sfi, code); if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceHit(sfi, code);
} }
...@@ -428,7 +428,7 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) { ...@@ -428,7 +428,7 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
} }
if (!function->HasAttachedOptimizedCode()) { if (!function->HasAttachedOptimizedCode()) {
function->set_code(function->shared().GetCode()); function->set_code(function->shared().GetCode(), kReleaseStore);
} }
return Object(); return Object();
} }
......
...@@ -176,7 +176,9 @@ void ContextSerializer::SerializeObjectImpl(Handle<HeapObject> obj) { ...@@ -176,7 +176,9 @@ void ContextSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
// serialize optimized code anyway. // serialize optimized code anyway.
Handle<JSFunction> closure = Handle<JSFunction>::cast(obj); Handle<JSFunction> closure = Handle<JSFunction>::cast(obj);
closure->ResetIfBytecodeFlushed(); closure->ResetIfBytecodeFlushed();
if (closure->is_compiled()) closure->set_code(closure->shared().GetCode()); if (closure->is_compiled()) {
closure->set_code(closure->shared().GetCode(), kReleaseStore);
}
} }
CheckRehashability(*obj); CheckRehashability(*obj);
......
...@@ -283,7 +283,7 @@ i::Handle<i::JSFunction> Optimize( ...@@ -283,7 +283,7 @@ i::Handle<i::JSFunction> Optimize(
i::compiler::Pipeline::GenerateCodeForTesting(&info, isolate, out_broker) i::compiler::Pipeline::GenerateCodeForTesting(&info, isolate, out_broker)
.ToHandleChecked(); .ToHandleChecked();
info.native_context().AddOptimizedCode(*code); info.native_context().AddOptimizedCode(*code);
function->set_code(*code); function->set_code(*code, v8::kReleaseStore);
return function; return function;
} }
......
...@@ -45,7 +45,7 @@ FunctionTester::FunctionTester(Handle<Code> code, int param_count) ...@@ -45,7 +45,7 @@ FunctionTester::FunctionTester(Handle<Code> code, int param_count)
flags_(0) { flags_(0) {
CHECK(!code.is_null()); CHECK(!code.is_null());
Compile(function); Compile(function);
function->set_code(*code); function->set_code(*code, kReleaseStore);
} }
FunctionTester::FunctionTester(Handle<Code> code) : FunctionTester(code, 0) {} FunctionTester::FunctionTester(Handle<Code> code) : FunctionTester(code, 0) {}
...@@ -158,7 +158,7 @@ Handle<JSFunction> FunctionTester::CompileGraph(Graph* graph) { ...@@ -158,7 +158,7 @@ Handle<JSFunction> FunctionTester::CompileGraph(Graph* graph) {
Pipeline::GenerateCodeForTesting(&info, isolate, call_descriptor, graph, Pipeline::GenerateCodeForTesting(&info, isolate, call_descriptor, graph,
AssemblerOptions::Default(isolate)) AssemblerOptions::Default(isolate))
.ToHandleChecked(); .ToHandleChecked();
function->set_code(*code); function->set_code(*code, kReleaseStore);
return function; return function;
} }
......
...@@ -130,7 +130,7 @@ TEST(TestConcurrentSharedFunctionInfo) { ...@@ -130,7 +130,7 @@ TEST(TestConcurrentSharedFunctionInfo) {
OptimizedCompilationInfo f_info(&zone, isolate, f_sfi, f, CodeKind::TURBOFAN); OptimizedCompilationInfo f_info(&zone, isolate, f_sfi, f, CodeKind::TURBOFAN);
Handle<Code> f_code = Handle<Code> f_code =
Pipeline::GenerateCodeForTesting(&f_info, isolate).ToHandleChecked(); Pipeline::GenerateCodeForTesting(&f_info, isolate).ToHandleChecked();
f->set_code(*f_code); f->set_code(*f_code, kReleaseStore);
IsCompiledScope compiled_scope_f(*f_sfi, isolate); IsCompiledScope compiled_scope_f(*f_sfi, isolate);
JSFunction::EnsureFeedbackVector(f, &compiled_scope_f); JSFunction::EnsureFeedbackVector(f, &compiled_scope_f);
......
...@@ -135,7 +135,7 @@ class BytecodeGraphTester { ...@@ -135,7 +135,7 @@ class BytecodeGraphTester {
Handle<Code> code = Handle<Code> code =
Pipeline::GenerateCodeForTesting(&compilation_info, isolate_) Pipeline::GenerateCodeForTesting(&compilation_info, isolate_)
.ToHandleChecked(); .ToHandleChecked();
function->set_code(*code); function->set_code(*code, kReleaseStore);
return function; return function;
} }
......
...@@ -4072,7 +4072,8 @@ TEST(WeakReference) { ...@@ -4072,7 +4072,8 @@ TEST(WeakReference) {
.Build(); .Build();
CHECK(code->IsCode()); CHECK(code->IsCode());
fv->set_maybe_optimized_code(i::HeapObjectReference::Weak(*code)); fv->set_maybe_optimized_code(i::HeapObjectReference::Weak(*code),
v8::kReleaseStore);
fv->set_flags(i::FeedbackVector::OptimizationTierBits::encode( fv->set_flags(i::FeedbackVector::OptimizationTierBits::encode(
i::OptimizationTier::kTopTier) | i::OptimizationTier::kTopTier) |
i::FeedbackVector::OptimizationMarkerBits::encode( i::FeedbackVector::OptimizationMarkerBits::encode(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment