Commit a7e5504d authored by Jaroslav Sevcik's avatar Jaroslav Sevcik Committed by Commit Bot

[cleanup] Stop using the now-unused deopt_count from feedback vector.

Unfortunately, we still have to keep the field because GC mole and Torque
do not support platform specific padding well
(see http://crbug.com/v8/9287).

Bug: v8:9183
Change-Id: I2210be4b8174c97bc82145605f9b862aac3bdc37
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1624791Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Commit-Queue: Jaroslav Sevcik <jarin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61802}
parent aa1b81b6
......@@ -1277,7 +1277,10 @@ extern class FeedbackVector extends HeapObject {
length: int32;
invocation_count: int32;
profiler_ticks: int32;
deopt_count: int32;
// TODO(v8:9287) The padding is not necessary on platforms with 4 bytes
// tagged pointers, we should make it conditional; however, platform-specific
// interacts badly with GCMole, so we need to address that first.
padding: uint32;
}
extern class FeedbackCell extends Struct {
......
......@@ -429,7 +429,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
function.feedback_vector().EvictOptimizedCodeMarkedForDeoptimization(
function.shared(), "unlinking code marked for deopt");
if (!code.deopt_already_counted()) {
function.feedback_vector().increment_deopt_count();
code.set_deopt_already_counted(true);
}
DeoptimizeMarkedCodeForContext(function.context().native_context());
......@@ -493,19 +492,10 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
DCHECK(AllowHeapAllocation::IsAllowed());
disallow_heap_allocation_ = new DisallowHeapAllocation();
#endif // DEBUG
if (compiled_code_.kind() != Code::OPTIMIZED_FUNCTION ||
!compiled_code_.deopt_already_counted()) {
// If the function is optimized, and we haven't counted that deopt yet, then
// increment the function's deopt count so that we can avoid optimising
// functions that deopt too often.
if (deopt_kind_ == DeoptimizeKind::kSoft) {
// Soft deopts shouldn't count against the overall deoptimization count
// that can eventually lead to disabling optimization for a function.
isolate->counters()->soft_deopts_executed()->Increment();
} else if (!function.is_null()) {
function.feedback_vector().increment_deopt_count();
}
if ((compiled_code_.kind() != Code::OPTIMIZED_FUNCTION ||
!compiled_code_.deopt_already_counted()) &&
deopt_kind_ == DeoptimizeKind::kSoft) {
isolate->counters()->soft_deopts_executed()->Increment();
}
if (compiled_code_.kind() == Code::OPTIMIZED_FUNCTION) {
compiled_code_.set_deopt_already_counted(true);
......
......@@ -525,7 +525,7 @@ Handle<FeedbackVector> Factory::NewFeedbackVector(
vector->set_length(length);
vector->set_invocation_count(0);
vector->set_profiler_ticks(0);
vector->set_deopt_count(0);
vector->clear_padding();
vector->set_closure_feedback_cell_array(*closure_feedback_cell_array);
// TODO(leszeks): Initialize based on the feedback metadata.
......
......@@ -108,7 +108,13 @@ ACCESSORS(FeedbackVector, closure_feedback_cell_array, ClosureFeedbackCellArray,
INT32_ACCESSORS(FeedbackVector, length, kLengthOffset)
INT32_ACCESSORS(FeedbackVector, invocation_count, kInvocationCountOffset)
INT32_ACCESSORS(FeedbackVector, profiler_ticks, kProfilerTicksOffset)
INT32_ACCESSORS(FeedbackVector, deopt_count, kDeoptCountOffset)
void FeedbackVector::clear_padding() {
if (FIELD_SIZE(kPaddingOffset) == 0) return;
DCHECK_EQ(4, FIELD_SIZE(kPaddingOffset));
memset(reinterpret_cast<void*>(address() + kPaddingOffset), 0,
FIELD_SIZE(kPaddingOffset));
}
bool FeedbackVector::is_empty() const { return length() == 0; }
......@@ -118,13 +124,6 @@ FeedbackMetadata FeedbackVector::metadata() const {
void FeedbackVector::clear_invocation_count() { set_invocation_count(0); }
void FeedbackVector::increment_deopt_count() {
int count = deopt_count();
if (count < std::numeric_limits<int32_t>::max()) {
set_deopt_count(count + 1);
}
}
Code FeedbackVector::optimized_code() const {
MaybeObject slot = optimized_code_weak_or_smi();
DCHECK(slot->IsSmi() || slot->IsWeakOrCleared());
......
......@@ -246,7 +246,6 @@ Handle<FeedbackVector> FeedbackVector::New(
: OptimizationMarker::kNone)));
DCHECK_EQ(vector->invocation_count(), 0);
DCHECK_EQ(vector->profiler_ticks(), 0);
DCHECK_EQ(vector->deopt_count(), 0);
// Ensure we can skip the write barrier
Handle<Object> uninitialized_sentinel = UninitializedSentinel(isolate);
......@@ -368,7 +367,6 @@ void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
PrintF("]\n");
}
if (!code.deopt_already_counted()) {
increment_deopt_count();
code.set_deopt_already_counted(true);
}
ClearOptimizedCode();
......
......@@ -204,15 +204,14 @@ class FeedbackVector : public HeapObject {
// [invocation_count]: The number of times this function has been invoked.
DECL_INT32_ACCESSORS(invocation_count)
// [invocation_count]: The number of times this function has been seen by the
// [profiler_ticks]: The number of times this function has been seen by the
// runtime profiler.
DECL_INT32_ACCESSORS(profiler_ticks)
// [deopt_count]: The number of times this function has deoptimized.
DECL_INT32_ACCESSORS(deopt_count)
// Initialize the padding if necessary.
inline void clear_padding();
inline void clear_invocation_count();
inline void increment_deopt_count();
inline Code optimized_code() const;
inline OptimizationMarker optimization_marker() const;
......@@ -315,9 +314,10 @@ class FeedbackVector : public HeapObject {
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
TORQUE_GENERATED_FEEDBACK_VECTOR_FIELDS)
static constexpr int kUnalignedHeaderSize = kSize;
static const int kHeaderSize =
RoundUp<kObjectAlignment>(int{kUnalignedHeaderSize});
static const int kHeaderSize = kSize;
static_assert(kSize % kObjectAlignment == 0,
"Header must be padded for alignment");
static const int kFeedbackSlotsOffset = kHeaderSize;
class BodyDescriptor;
......
......@@ -572,15 +572,6 @@ RUNTIME_FUNCTION(Runtime_UnblockConcurrentRecompilation) {
return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_GetDeoptCount) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
// Functions without a feedback vector have never deoptimized.
if (!function->has_feedback_vector()) return Smi::kZero;
return Smi::FromInt(function->feedback_vector().deopt_count());
}
static void ReturnThis(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(args.This());
}
......
......@@ -444,7 +444,6 @@ namespace internal {
F(DisassembleFunction, 1, 1) \
F(FreezeWasmLazyCompilation, 1, 1) \
F(GetCallable, 0, 1) \
F(GetDeoptCount, 1, 1) \
F(GetInitializerFunction, 1, 1) \
F(GetOptimizationStatus, -1, 1) \
F(GetUndetectable, 0, 1) \
......
......@@ -48,4 +48,4 @@ g();
g();
%OptimizeFunctionOnNextCall(g);
g();
assertTrue(%GetDeoptCount(g) > 0);
assertUnoptimized(g);
......@@ -16,8 +16,6 @@ function foo(i, deopt = false) {
}
%PrepareFunctionForOptimization(foo);
assertEquals(0, %GetDeoptCount(foo));
%PrepareFunctionForOptimization(foo);
foo(10);
foo(10);
......@@ -25,9 +23,7 @@ foo(10);
foo(10);
assertOptimized(foo);
assertEquals(0, %GetDeoptCount(foo));
foo(10, true);
assertUnoptimized(foo);
assertEquals(1, %GetDeoptCount(foo));
......@@ -17,16 +17,13 @@ function foo(i, deopt = false) {
%PrepareFunctionForOptimization(foo);
%PrepareFunctionForOptimization(foo);
assertEquals(0, %GetDeoptCount(foo));
foo(10);
foo(10);
%OptimizeFunctionOnNextCall(foo);
foo(10);
assertOptimized(foo);
assertEquals(0, %GetDeoptCount(foo));
foo(10, true);
assertUnoptimized(foo);
assertEquals(1, %GetDeoptCount(foo));
......@@ -16,8 +16,6 @@ function foo(i, deopt = false, deoptobj = null) {
}
}
assertEquals(0, %GetDeoptCount(foo));
%PrepareFunctionForOptimization(foo);
foo(10);
foo(10);
......@@ -25,10 +23,7 @@ foo(10);
foo(10);
assertOptimized(foo);
assertEquals(0, %GetDeoptCount(foo));
foo(10, true, { bar: function(){} });
assertUnoptimized(foo);
// Soft deopts don't count to the deopt count.
assertEquals(0, %GetDeoptCount(foo));
......@@ -9,23 +9,18 @@
function foo() {}
assertEquals(0, %GetDeoptCount(foo));
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
foo();
assertOptimized(foo);
assertEquals(0, %GetDeoptCount(foo));
// Unlink the function.
%DeoptimizeFunction(foo);
assertUnoptimized(foo);
assertEquals(1, %GetDeoptCount(foo));
foo();
assertUnoptimized(foo);
assertEquals(1, %GetDeoptCount(foo));
......@@ -2,15 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax
// Flags: --allow-natives-syntax --opt
function foo()
{
return 1 in [0];
}
%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
foo();
assertEquals(0, %GetDeoptCount(foo));
assertOptimized(foo);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment