Commit be699045 authored by Mike Stanton's avatar Mike Stanton Committed by Commit Bot

[TurboFan] Mark Code object as never serialized

Code objects are exposed through JSFunction and SharedFunctionInfo.
If they are builtins, we don't have to worry about background threads
seeing partially initialized code objects. If they are optimized code
objects, we may. Background threads read the code fields with
AcquireLoad semantics. The fields are set on the main thread with
ReleaseStore semantics when appropriate.

Special care is taken when setting an optimized code object in a closure
in the interpreter entry stub. Since the MacroAssembler doesn't support
ReleaseStore semantics, this CL ensures that the optimized code object
is stored with those semantics in the feedback vector, where the
interpreter entry stub finds it.

Bug: v8:7790
Change-Id: I41ecedfe0e9d1ad5091cbe9a97f66c66ca9e07dd
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2676633
Commit-Queue: Michael Stanton <mvstanton@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarSantiago Aboy Solanes <solanes@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72869}
parent 6253320c
......@@ -1151,6 +1151,10 @@ Handle<Code> ContinuationForConcurrentOptimization(
// code.
if (!function->HasAttachedOptimizedCode()) {
DCHECK(function->feedback_vector().has_optimized_code());
// Release store isn't required here because it was done on store
// into the feedback vector.
STATIC_ASSERT(
FeedbackVector::kFeedbackVectorMaybeOptimizedCodeIsStoreRelease);
function->set_code(function->feedback_vector().optimized_code());
}
return handle(function->code(), isolate);
......@@ -1967,7 +1971,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
}
// Install code on closure.
function->set_code(*code);
function->set_code(*code, kReleaseStore);
// Install a feedback vector if necessary.
if (code->kind() == CodeKind::BASELINE) {
......@@ -2069,7 +2073,7 @@ bool Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
}
if (!CodeKindIsNativeContextIndependentJSFunction(code_kind)) {
function->set_code(*code);
function->set_code(*code, kReleaseStore);
}
// Check postconditions on success.
......@@ -3225,7 +3229,8 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
InsertCodeIntoCompilationCache(isolate, compilation_info);
CompilerTracer::TraceCompletedJob(isolate, compilation_info);
if (should_install_code_on_function) {
compilation_info->closure()->set_code(*compilation_info->code());
compilation_info->closure()->set_code(*compilation_info->code(),
kReleaseStore);
}
return CompilationJob::SUCCEEDED;
}
......@@ -3233,7 +3238,7 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
DCHECK_EQ(job->state(), CompilationJob::State::kFailed);
CompilerTracer::TraceAbortedJob(isolate, compilation_info);
compilation_info->closure()->set_code(shared->GetCode());
compilation_info->closure()->set_code(shared->GetCode(), kReleaseStore);
// Clear the InOptimizationQueue marker, if it exists.
if (!CodeKindIsNativeContextIndependentJSFunction(code_kind) &&
compilation_info->closure()->IsInOptimizationQueue()) {
......@@ -3267,6 +3272,11 @@ void Compiler::PostInstantiation(Handle<JSFunction> function) {
// Caching of optimized code enabled and optimized code found.
DCHECK(!code.marked_for_deoptimization());
DCHECK(function->shared().is_compiled());
// We don't need a release store because the optimized code was
// stored with release semantics into the vector
STATIC_ASSERT(
FeedbackVector::kFeedbackVectorMaybeOptimizedCodeIsStoreRelease);
function->set_code(code);
}
}
......
......@@ -27,7 +27,7 @@ void DisposeCompilationJob(OptimizedCompilationJob* job,
bool restore_function_code) {
if (restore_function_code) {
Handle<JSFunction> function = job->compilation_info()->closure();
function->set_code(function->shared().GetCode());
function->set_code(function->shared().GetCode(), kReleaseStore);
if (function->IsInOptimizationQueue()) {
function->ClearOptimizationMarker();
}
......
......@@ -82,6 +82,7 @@ enum class OddballType : uint8_t {
V(ArrayBoilerplateDescription) \
V(CallHandlerInfo) \
V(Cell) \
V(Code) \
V(FeedbackCell) \
V(FeedbackVector) \
V(RegExpBoilerplateDescription) \
......@@ -133,7 +134,6 @@ enum class OddballType : uint8_t {
V(JSObject) \
/* Subtypes of HeapObject */ \
V(AllocationSite) \
V(Code) \
V(DescriptorArray) \
V(FixedArrayBase) \
V(FunctionTemplateInfo) \
......@@ -389,7 +389,6 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
bool has_feedback_vector() const;
bool has_initial_map() const;
bool has_prototype() const;
bool HasAttachedOptimizedCode() const;
bool PrototypeRequiresRuntimeLookup() const;
void Serialize();
......
......@@ -682,7 +682,6 @@ class JSFunctionData : public JSObjectData {
bool has_feedback_vector() const { return has_feedback_vector_; }
bool has_initial_map() const { return has_initial_map_; }
bool has_prototype() const { return has_prototype_; }
bool HasAttachedOptimizedCode() const { return has_attached_optimized_code_; }
bool PrototypeRequiresRuntimeLookup() const {
return PrototypeRequiresRuntimeLookup_;
}
......@@ -710,6 +709,7 @@ class JSFunctionData : public JSObjectData {
}
ObjectData* code() const {
DCHECK(serialized_code_and_feedback());
DCHECK(!FLAG_turbo_direct_heap_access);
return code_;
}
int initial_map_instance_size_with_min_slack() const {
......@@ -721,7 +721,6 @@ class JSFunctionData : public JSObjectData {
bool has_feedback_vector_;
bool has_initial_map_;
bool has_prototype_;
bool has_attached_optimized_code_;
bool PrototypeRequiresRuntimeLookup_;
bool serialized_ = false;
......@@ -1346,7 +1345,6 @@ JSFunctionData::JSFunctionData(JSHeapBroker* broker, ObjectData** storage,
has_initial_map_(object->has_prototype_slot() &&
object->has_initial_map()),
has_prototype_(object->has_prototype_slot() && object->has_prototype()),
has_attached_optimized_code_(object->HasAttachedOptimizedCode()),
PrototypeRequiresRuntimeLookup_(
object->PrototypeRequiresRuntimeLookup()) {}
......@@ -1400,7 +1398,12 @@ void JSFunctionData::SerializeCodeAndFeedback(JSHeapBroker* broker) {
DCHECK_NULL(feedback_cell_);
DCHECK_NULL(feedback_vector_);
DCHECK_NULL(code_);
code_ = broker->GetOrCreateData(function->code());
if (!FLAG_turbo_direct_heap_access) {
// This is conditionalized because Code objects are never serialized now.
// We only need to represent the code object in serialized data when
// we're unable to perform direct heap accesses.
code_ = broker->GetOrCreateData(function->code(kAcquireLoad));
}
feedback_cell_ = broker->GetOrCreateData(function->raw_feedback_cell());
feedback_vector_ = has_feedback_vector()
? broker->GetOrCreateData(function->feedback_vector())
......@@ -2160,7 +2163,9 @@ class CodeData : public HeapObjectData {
public:
CodeData(JSHeapBroker* broker, ObjectData** storage, Handle<Code> object)
: HeapObjectData(broker, storage, object),
inlined_bytecode_size_(object->inlined_bytecode_size()) {}
inlined_bytecode_size_(object->inlined_bytecode_size()) {
DCHECK(!FLAG_turbo_direct_heap_access);
}
unsigned inlined_bytecode_size() const { return inlined_bytecode_size_; }
......@@ -3485,7 +3490,6 @@ BIMODAL_ACCESSOR_C(JSDataView, size_t, byte_length)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_feedback_vector)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_initial_map)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_prototype)
BIMODAL_ACCESSOR_C(JSFunction, bool, HasAttachedOptimizedCode)
BIMODAL_ACCESSOR_C(JSFunction, bool, PrototypeRequiresRuntimeLookup)
BIMODAL_ACCESSOR(JSFunction, Context, context)
BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context)
......@@ -3494,7 +3498,6 @@ BIMODAL_ACCESSOR(JSFunction, Object, prototype)
BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
BIMODAL_ACCESSOR(JSFunction, FeedbackCell, raw_feedback_cell)
BIMODAL_ACCESSOR(JSFunction, FeedbackVector, feedback_vector)
BIMODAL_ACCESSOR(JSFunction, Code, code)
BIMODAL_ACCESSOR_C(JSGlobalObject, bool, IsDetached)
......@@ -4391,6 +4394,15 @@ bool JSFunctionRef::serialized_code_and_feedback() const {
return data()->AsJSFunction()->serialized_code_and_feedback();
}
CodeRef JSFunctionRef::code() const {
if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
return CodeRef(broker(), broker()->CanonicalPersistentHandle(
object()->code(kAcquireLoad)));
}
return CodeRef(broker(), ObjectRef::data()->AsJSFunction()->code());
}
void SharedFunctionInfoRef::SerializeFunctionTemplateInfo() {
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
......
......@@ -204,10 +204,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
unsigned inlined_bytecode_size = 0;
if (candidate.functions[i].has_value()) {
JSFunctionRef function = candidate.functions[i].value();
if (function.HasAttachedOptimizedCode()) {
inlined_bytecode_size = function.code().inlined_bytecode_size();
candidate.total_size += inlined_bytecode_size;
}
inlined_bytecode_size = function.code().inlined_bytecode_size();
candidate.total_size += inlined_bytecode_size;
}
candidate_is_small = candidate_is_small &&
IsSmall(bytecode.length() + inlined_bytecode_size);
......@@ -791,9 +789,11 @@ void JSInliningHeuristic::PrintCandidates() {
os << ", bytecode size: " << candidate.bytecode[i]->length();
if (candidate.functions[i].has_value()) {
JSFunctionRef function = candidate.functions[i].value();
if (function.HasAttachedOptimizedCode()) {
unsigned inlined_bytecode_size =
function.code().inlined_bytecode_size();
if (inlined_bytecode_size > 0) {
os << ", existing opt code's inlined bytecode size: "
<< function.code().inlined_bytecode_size();
<< inlined_bytecode_size;
}
}
} else {
......
......@@ -1191,7 +1191,7 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
isolate->compilation_cache()->Remove(sfi);
for (auto& js_function : data->js_functions) {
js_function->set_shared(*new_sfi);
js_function->set_code(js_function->shared().GetCode());
js_function->set_code(js_function->shared().GetCode(), kReleaseStore);
js_function->set_raw_feedback_cell(
*isolate->factory()->many_closures_cell());
......
......@@ -432,8 +432,8 @@ Handle<FeedbackVector> Factory::NewFeedbackVector(
*feedback_vector_map());
Handle<FeedbackVector> vector(FeedbackVector::cast(result), isolate());
vector->set_shared_function_info(*shared);
vector->set_maybe_optimized_code(
HeapObjectReference::ClearedValue(isolate()));
vector->set_maybe_optimized_code(HeapObjectReference::ClearedValue(isolate()),
kReleaseStore);
vector->set_length(length);
vector->set_invocation_count(0);
vector->set_profiler_ticks(0);
......@@ -3458,7 +3458,7 @@ Handle<JSFunction> Factory::JSFunctionBuilder::BuildRaw(Handle<Code> code) {
function->set_shared(*sfi_);
function->set_context(*context_);
function->set_raw_feedback_cell(*feedback_cell);
function->set_code(*code);
function->set_code(*code, kReleaseStore);
if (map->has_prototype_slot()) {
function->set_prototype_or_initial_map(
ReadOnlyRoots(isolate).the_hole_value());
......
......@@ -38,6 +38,9 @@ INT32_ACCESSORS(FeedbackMetadata, slot_count, kSlotCountOffset)
INT32_ACCESSORS(FeedbackMetadata, create_closure_slot_count,
kCreateClosureSlotCountOffset)
RELEASE_ACQUIRE_WEAK_ACCESSORS(FeedbackVector, maybe_optimized_code,
kMaybeOptimizedCodeOffset)
int32_t FeedbackMetadata::synchronized_slot_count() const {
return base::Acquire_Load(
reinterpret_cast<const base::Atomic32*>(field_address(kSlotCountOffset)));
......@@ -113,7 +116,7 @@ FeedbackMetadata FeedbackVector::metadata() const {
void FeedbackVector::clear_invocation_count() { set_invocation_count(0); }
Code FeedbackVector::optimized_code() const {
MaybeObject slot = maybe_optimized_code();
MaybeObject slot = maybe_optimized_code(kAcquireLoad);
DCHECK(slot->IsWeakOrCleared());
HeapObject heap_object;
Code code =
......@@ -145,7 +148,7 @@ OptimizationTier FeedbackVector::optimization_tier() const {
// It is possible that the optimization tier bits aren't updated when the code
// was cleared due to a GC.
DCHECK_IMPLIES(tier == OptimizationTier::kNone,
maybe_optimized_code()->IsCleared());
maybe_optimized_code(kAcquireLoad)->IsCleared());
return tier;
}
......
......@@ -394,7 +394,8 @@ void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
// re-mark the function for non-concurrent optimization after an OSR. We
// should avoid these cases and also check that marker isn't
// kCompileOptimized or kCompileOptimizedConcurrent.
vector->set_maybe_optimized_code(HeapObjectReference::Weak(*code));
vector->set_maybe_optimized_code(HeapObjectReference::Weak(*code),
kReleaseStore);
int32_t state = vector->flags();
state = OptimizationTierBits::update(state, GetTierForCodeKind(code->kind()));
state = OptimizationMarkerBits::update(state, OptimizationMarker::kNone);
......@@ -404,7 +405,8 @@ void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
void FeedbackVector::ClearOptimizedCode() {
DCHECK(has_optimized_code());
DCHECK_NE(optimization_tier(), OptimizationTier::kNone);
set_maybe_optimized_code(HeapObjectReference::ClearedValue(GetIsolate()));
set_maybe_optimized_code(HeapObjectReference::ClearedValue(GetIsolate()),
kReleaseStore);
ClearOptimizationTier();
}
......@@ -435,7 +437,7 @@ void FeedbackVector::InitializeOptimizationState() {
void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
SharedFunctionInfo shared, const char* reason) {
MaybeObject slot = maybe_optimized_code();
MaybeObject slot = maybe_optimized_code(kAcquireLoad);
if (slot->IsCleared()) {
ClearOptimizationTier();
return;
......
......@@ -198,6 +198,11 @@ class FeedbackVector
STATIC_ASSERT(OptimizationTier::kLastOptimizationTier <
OptimizationTierBits::kMax);
static const bool kFeedbackVectorMaybeOptimizedCodeIsStoreRelease = true;
using TorqueGeneratedFeedbackVector<FeedbackVector,
HeapObject>::maybe_optimized_code;
DECL_RELEASE_ACQUIRE_WEAK_ACCESSORS(maybe_optimized_code)
static constexpr uint32_t kHasCompileOptimizedOrLogFirstExecutionMarker =
kNoneOrInOptimizationQueueMask << OptimizationMarkerBits::kShift;
static constexpr uint32_t kHasNoTopTierCodeOrCompileOptimizedMarkerMask =
......
......@@ -52,7 +52,7 @@ void JSFunction::ClearOptimizationMarker() {
}
bool JSFunction::ChecksOptimizationMarker() {
return code().checks_optimization_marker();
return code(kAcquireLoad).checks_optimization_marker();
}
bool JSFunction::IsMarkedForOptimization() {
......@@ -118,7 +118,7 @@ AbstractCode JSFunction::abstract_code(LocalIsolate* isolate) {
if (ActiveTierIsIgnition()) {
return AbstractCode::cast(shared().GetBytecodeArray(isolate));
} else {
return AbstractCode::cast(code());
return AbstractCode::cast(code(kAcquireLoad));
}
}
......@@ -136,10 +136,7 @@ void JSFunction::set_code(Code value) {
#endif
}
void JSFunction::set_code_no_write_barrier(Code value) {
DCHECK(!ObjectInYoungGeneration(value));
RELAXED_WRITE_FIELD(*this, kCodeOffset, value);
}
RELEASE_ACQUIRE_ACCESSORS(JSFunction, code, Code, kCodeOffset)
// TODO(ishell): Why relaxed read but release store?
DEF_GETTER(JSFunction, shared, SharedFunctionInfo) {
......@@ -255,7 +252,7 @@ DEF_GETTER(JSFunction, prototype, Object) {
}
bool JSFunction::is_compiled() const {
return code().builtin_index() != Builtins::kCompileLazy &&
return code(kAcquireLoad).builtin_index() != Builtins::kCompileLazy &&
shared().is_compiled();
}
......
......@@ -24,13 +24,14 @@ CodeKinds JSFunction::GetAttachedCodeKinds() const {
// Note: There's a special case when bytecode has been aged away. After
// flushing the bytecode, the JSFunction will still have the interpreter
// entry trampoline attached, but the bytecode is no longer available.
if (code().is_interpreter_trampoline_builtin()) {
Code code = this->code(kAcquireLoad);
if (code.is_interpreter_trampoline_builtin()) {
result |= CodeKindFlag::INTERPRETED_FUNCTION;
}
const CodeKind kind = code().kind();
const CodeKind kind = code.kind();
if (!CodeKindIsOptimizedJSFunction(kind) ||
code().marked_for_deoptimization()) {
code.marked_for_deoptimization()) {
DCHECK_EQ((result & ~kJSFunctionCodeKindsMask), 0);
return result;
}
......@@ -117,12 +118,14 @@ bool HighestTierOf(CodeKinds kinds, CodeKind* highest_tier) {
bool JSFunction::ActiveTierIsIgnition() const {
if (!shared().HasBytecodeArray()) return false;
bool result = (GetActiveTier() == CodeKind::INTERPRETED_FUNCTION);
DCHECK_IMPLIES(result,
code().is_interpreter_trampoline_builtin() ||
(CodeKindIsOptimizedJSFunction(code().kind()) &&
code().marked_for_deoptimization()) ||
(code().builtin_index() == Builtins::kCompileLazy &&
shared().IsInterpreted()));
#ifdef DEBUG
Code code = this->code(kAcquireLoad);
DCHECK_IMPLIES(result, code.is_interpreter_trampoline_builtin() ||
(CodeKindIsOptimizedJSFunction(code.kind()) &&
code.marked_for_deoptimization()) ||
(code.builtin_index() == Builtins::kCompileLazy &&
shared().IsInterpreted()));
#endif // DEBUG
return result;
}
......
......@@ -82,9 +82,13 @@ class JSFunction : public JSFunctionOrBoundFunction {
// when the function is invoked, e.g. foo() or new foo(). See
// [[Call]] and [[Construct]] description in ECMA-262, section
// 8.6.2, page 27.
// Release/Acquire accessors are used when storing a newly-created
// optimized code object, or when reading from the background thread.
// Storing a builtin doesn't require release semantics because these objects
// are fully initialized.
inline Code code() const;
inline void set_code(Code code);
inline void set_code_no_write_barrier(Code code);
DECL_RELEASE_ACQUIRE_ACCESSORS(code, Code)
// Get the abstract code associated with the function, which will either be
// a Code object or a BytecodeArray.
......
......@@ -42,8 +42,9 @@ RELEASE_ACQUIRE_ACCESSORS(Map, instance_descriptors, DescriptorArray,
// We need to use release-store and acquire-load accessor pairs to ensure
// that the concurrent marking thread observes initializing stores of the
// layout descriptor.
SYNCHRONIZED_WEAK_ACCESSORS(Map, raw_transitions,
kTransitionsOrPrototypeInfoOffset)
WEAK_ACCESSORS(Map, raw_transitions, kTransitionsOrPrototypeInfoOffset)
RELEASE_ACQUIRE_WEAK_ACCESSORS(Map, raw_transitions,
kTransitionsOrPrototypeInfoOffset)
ACCESSORS_CHECKED2(Map, prototype, HeapObject, kPrototypeOffset, true,
value.IsNull() || value.IsJSReceiver())
......
......@@ -425,6 +425,7 @@ class Map : public HeapObject {
// Don't call set_raw_transitions() directly to overwrite transitions, use
// the TransitionArray::ReplaceTransitions() wrapper instead!
DECL_ACCESSORS(raw_transitions, MaybeObject)
DECL_RELEASE_ACQUIRE_WEAK_ACCESSORS(raw_transitions)
// [prototype_info]: Per-prototype metadata. Aliased with transitions
// (which prototype maps don't have).
DECL_ACCESSORS(prototype_info, Object)
......
......@@ -131,6 +131,10 @@
DECL_ACQUIRE_GETTER(name, type) \
DECL_RELEASE_SETTER(name, type)
#define DECL_RELEASE_ACQUIRE_WEAK_ACCESSORS(name) \
DECL_ACQUIRE_GETTER(name, MaybeObject) \
DECL_RELEASE_SETTER(name, MaybeObject)
#define DECL_CAST(Type) \
V8_INLINE static Type cast(Object object); \
V8_INLINE static Type unchecked_cast(Object object) { \
......@@ -278,26 +282,32 @@
#define WEAK_ACCESSORS(holder, name, offset) \
WEAK_ACCESSORS_CHECKED(holder, name, offset, true)
#define SYNCHRONIZED_WEAK_ACCESSORS_CHECKED2(holder, name, offset, \
get_condition, set_condition) \
DEF_GETTER(holder, name, MaybeObject) { \
MaybeObject value = \
TaggedField<MaybeObject, offset>::Acquire_Load(isolate, *this); \
DCHECK(get_condition); \
return value; \
} \
void holder::set_##name(MaybeObject value, WriteBarrierMode mode) { \
DCHECK(set_condition); \
TaggedField<MaybeObject, offset>::Release_Store(*this, value); \
CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode); \
#define RELEASE_ACQUIRE_WEAK_ACCESSORS_CHECKED2(holder, name, offset, \
get_condition, set_condition) \
MaybeObject holder::name(AcquireLoadTag tag) const { \
IsolateRoot isolate = GetIsolateForPtrCompr(*this); \
return holder::name(isolate, tag); \
} \
MaybeObject holder::name(IsolateRoot isolate, AcquireLoadTag) const { \
MaybeObject value = \
TaggedField<MaybeObject, offset>::Acquire_Load(isolate, *this); \
DCHECK(get_condition); \
return value; \
} \
void holder::set_##name(MaybeObject value, ReleaseStoreTag, \
WriteBarrierMode mode) { \
DCHECK(set_condition); \
TaggedField<MaybeObject, offset>::Release_Store(*this, value); \
CONDITIONAL_WEAK_WRITE_BARRIER(*this, offset, value, mode); \
}
#define SYNCHRONIZED_WEAK_ACCESSORS_CHECKED(holder, name, offset, condition) \
SYNCHRONIZED_WEAK_ACCESSORS_CHECKED2(holder, name, offset, condition, \
condition)
#define RELEASE_ACQUIRE_WEAK_ACCESSORS_CHECKED(holder, name, offset, \
condition) \
RELEASE_ACQUIRE_WEAK_ACCESSORS_CHECKED2(holder, name, offset, condition, \
condition)
#define SYNCHRONIZED_WEAK_ACCESSORS(holder, name, offset) \
SYNCHRONIZED_WEAK_ACCESSORS_CHECKED(holder, name, offset, true)
#define RELEASE_ACQUIRE_WEAK_ACCESSORS(holder, name, offset) \
RELEASE_ACQUIRE_WEAK_ACCESSORS_CHECKED(holder, name, offset, true)
// Getter that returns a Smi as an int and writes an int as a Smi.
#define SMI_ACCESSORS_CHECKED(holder, name, offset, condition) \
......
......@@ -217,7 +217,7 @@ void TransitionsAccessor::Reload() {
int TransitionsAccessor::Capacity() { return transitions().Capacity(); }
void TransitionsAccessor::Initialize() {
raw_transitions_ = map_.raw_transitions(isolate_);
raw_transitions_ = map_.raw_transitions(isolate_, kAcquireLoad);
HeapObject heap_object;
if (raw_transitions_->IsSmi() || raw_transitions_->IsCleared()) {
encoding_ = kUninitialized;
......
......@@ -429,13 +429,14 @@ void TransitionsAccessor::SetMigrationTarget(Map migration_target) {
// sake.
if (encoding() != kUninitialized) return;
DCHECK(map_.is_deprecated());
map_.set_raw_transitions(MaybeObject::FromObject(migration_target));
map_.set_raw_transitions(MaybeObject::FromObject(migration_target),
kReleaseStore);
MarkNeedsReload();
}
Map TransitionsAccessor::GetMigrationTarget() {
if (encoding() == kMigrationTarget) {
return map_.raw_transitions()->cast<Map>();
return map_.raw_transitions(kAcquireLoad)->cast<Map>();
}
return Map();
}
......@@ -449,7 +450,7 @@ void TransitionsAccessor::ReplaceTransitions(MaybeObject new_transitions) {
DCHECK(old_transitions != new_transitions->GetHeapObjectAssumeStrong());
#endif
}
map_.set_raw_transitions(new_transitions);
map_.set_raw_transitions(new_transitions, kReleaseStore);
MarkNeedsReload();
}
......
......@@ -72,7 +72,7 @@ void TryInstallNCICode(Isolate* isolate, Handle<JSFunction> function,
Handle<Code> code;
if (sfi->TryGetCachedCode(isolate).ToHandle(&code)) {
function->set_code(*code);
function->set_code(*code, kReleaseStore);
JSFunction::EnsureFeedbackVector(function, is_compiled_scope);
if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceHit(sfi, code);
}
......@@ -428,7 +428,7 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
}
if (!function->HasAttachedOptimizedCode()) {
function->set_code(function->shared().GetCode());
function->set_code(function->shared().GetCode(), kReleaseStore);
}
return Object();
}
......
......@@ -176,7 +176,9 @@ void ContextSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
// serialize optimized code anyway.
Handle<JSFunction> closure = Handle<JSFunction>::cast(obj);
closure->ResetIfBytecodeFlushed();
if (closure->is_compiled()) closure->set_code(closure->shared().GetCode());
if (closure->is_compiled()) {
closure->set_code(closure->shared().GetCode(), kReleaseStore);
}
}
CheckRehashability(*obj);
......
......@@ -283,7 +283,7 @@ i::Handle<i::JSFunction> Optimize(
i::compiler::Pipeline::GenerateCodeForTesting(&info, isolate, out_broker)
.ToHandleChecked();
info.native_context().AddOptimizedCode(*code);
function->set_code(*code);
function->set_code(*code, v8::kReleaseStore);
return function;
}
......
......@@ -45,7 +45,7 @@ FunctionTester::FunctionTester(Handle<Code> code, int param_count)
flags_(0) {
CHECK(!code.is_null());
Compile(function);
function->set_code(*code);
function->set_code(*code, kReleaseStore);
}
FunctionTester::FunctionTester(Handle<Code> code) : FunctionTester(code, 0) {}
......@@ -158,7 +158,7 @@ Handle<JSFunction> FunctionTester::CompileGraph(Graph* graph) {
Pipeline::GenerateCodeForTesting(&info, isolate, call_descriptor, graph,
AssemblerOptions::Default(isolate))
.ToHandleChecked();
function->set_code(*code);
function->set_code(*code, kReleaseStore);
return function;
}
......
......@@ -130,7 +130,7 @@ TEST(TestConcurrentSharedFunctionInfo) {
OptimizedCompilationInfo f_info(&zone, isolate, f_sfi, f, CodeKind::TURBOFAN);
Handle<Code> f_code =
Pipeline::GenerateCodeForTesting(&f_info, isolate).ToHandleChecked();
f->set_code(*f_code);
f->set_code(*f_code, kReleaseStore);
IsCompiledScope compiled_scope_f(*f_sfi, isolate);
JSFunction::EnsureFeedbackVector(f, &compiled_scope_f);
......
......@@ -135,7 +135,7 @@ class BytecodeGraphTester {
Handle<Code> code =
Pipeline::GenerateCodeForTesting(&compilation_info, isolate_)
.ToHandleChecked();
function->set_code(*code);
function->set_code(*code, kReleaseStore);
return function;
}
......
......@@ -4072,7 +4072,8 @@ TEST(WeakReference) {
.Build();
CHECK(code->IsCode());
fv->set_maybe_optimized_code(i::HeapObjectReference::Weak(*code));
fv->set_maybe_optimized_code(i::HeapObjectReference::Weak(*code),
v8::kReleaseStore);
fv->set_flags(i::FeedbackVector::OptimizationTierBits::encode(
i::OptimizationTier::kTopTier) |
i::FeedbackVector::OptimizationMarkerBits::encode(
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment