Commit de9b0a07 authored by Jaideep Bajwa's avatar Jaideep Bajwa Committed by Commit Bot

PPC/s390: [objects] Make feedback vector a first-class object

Port 37680d65

Original Commit Message:

    Instead of having feedback vector as a subtype of FixedArray with
    reserved slots, make it a first-class variable-sized object with a
    fixed-size header. This allows us to compress counters to ints in the
    header, rather than forcing them to be Smis.

R=leszeks@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=
LOG=N

Change-Id: Ia835942de292c4e4b802e34672f1e8bf8a2491c7
Reviewed-on: https://chromium-review.googlesource.com/590168Reviewed-by: 's avatarJunliang Yan <jyan@ca.ibm.com>
Commit-Queue: Jaideep Bajwa <bjaideep@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#46954}
parent eaec875f
......@@ -1024,11 +1024,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register closure = r4;
Register optimized_code_entry = scratch1;
const int kOptimizedCodeCellOffset =
FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize;
__ LoadP(optimized_code_entry,
FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
__ LoadP(
optimized_code_entry,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is as a weak cell to a code
......@@ -1169,16 +1167,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bne(&switch_to_different_code_kind);
// Increment invocation count for the function.
__ LoadP(
r8, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ AddSmiLiteral(r8, r8, Smi::FromInt(1), r0);
__ LoadP(r8, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
__ addi(r8, r8, Operand(1));
__ StoreP(
r8,
FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize),
FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset),
r0);
// Check function data field is actually a BytecodeArray object.
......
......@@ -1024,11 +1024,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register closure = r3;
Register optimized_code_entry = scratch1;
const int kOptimizedCodeCellOffset =
FeedbackVector::kOptimizedCodeIndex * kPointerSize +
FeedbackVector::kHeaderSize;
__ LoadP(optimized_code_entry,
FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
__ LoadP(
optimized_code_entry,
FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is as a weak cell to a code
......@@ -1166,15 +1164,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bne(&switch_to_different_code_kind);
// Increment invocation count for the function.
__ LoadP(
r1, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
__ StoreP(
r1, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ LoadP(r1, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
__ AddP(r1, r1, Operand(1));
__ StoreP(r1, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountOffset));
// Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) {
......
......@@ -139,13 +139,9 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ Increment invocation count");
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kFeedbackVectorOffset));
__ LoadP(r7, FieldMemOperand(r7, Cell::kValueOffset));
__ LoadP(r8, FieldMemOperand(
r7, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ AddSmiLiteral(r8, r8, Smi::FromInt(1), r0);
__ StoreP(r8, FieldMemOperand(
r7, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize),
__ LoadP(r8, FieldMemOperand(r7, FeedbackVector::kInvocationCountOffset));
__ addi(r8, r8, Operand(1));
__ StoreP(r8, FieldMemOperand(r7, FeedbackVector::kInvocationCountOffset),
r0);
}
......@@ -1001,10 +997,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We need to filter the key, record slow-path here.
int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadFeedbackVector(r3);
__ EmitLoadFeedbackVector(r6);
__ mov(r5, Operand(FeedbackVector::MegamorphicSentinel(isolate())));
__ StoreP(
r5, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(vector_index)), r0);
__ StoreP(r5,
FieldMemOperand(r6, FeedbackVector::kFeedbackSlotsOffset +
vector_index * kPointerSize),
r0);
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
......
......@@ -139,13 +139,9 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ Increment invocation count");
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kFeedbackVectorOffset));
__ LoadP(r6, FieldMemOperand(r6, Cell::kValueOffset));
__ LoadP(r1, FieldMemOperand(
r6, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
__ StoreP(r1, FieldMemOperand(
r6, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ LoadP(r1, FieldMemOperand(r6, FeedbackVector::kInvocationCountOffset));
__ AddP(r1, r1, Operand(1));
__ StoreP(r1, FieldMemOperand(r6, FeedbackVector::kInvocationCountOffset));
}
{
......@@ -972,8 +968,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadFeedbackVector(r2);
__ mov(r4, Operand(FeedbackVector::MegamorphicSentinel(isolate())));
__ StoreP(
r4, FieldMemOperand(r2, FixedArray::OffsetOfElementAt(vector_index)), r0);
__ StoreP(r4,
FieldMemOperand(r2, FeedbackVector::kFeedbackSlotsOffset +
vector_index * kPointerSize),
r0);
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
......
......@@ -1227,12 +1227,12 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
const int count_offset = FixedArray::kHeaderSize + kPointerSize;
const int count_offset = FeedbackVector::kFeedbackSlotsOffset + kPointerSize;
// Load the cache state into r8.
__ SmiToPtrArrayOffset(r8, r6);
__ add(r8, r5, r8);
__ LoadP(r8, FieldMemOperand(r8, FixedArray::kHeaderSize));
__ LoadP(r8, FieldMemOperand(r8, FeedbackVector::kFeedbackSlotsOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
......@@ -1280,7 +1280,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ SmiToPtrArrayOffset(r8, r6);
__ add(r8, r5, r8);
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ StoreP(ip, FieldMemOperand(r8, FixedArray::kHeaderSize), r0);
__ StoreP(ip, FieldMemOperand(r8, FeedbackVector::kFeedbackSlotsOffset), r0);
__ jmp(&done);
// An uninitialized cache is patched with the function
......@@ -1333,7 +1333,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ SmiToPtrArrayOffset(r8, r6);
__ add(r8, r5, r8);
// Put the AllocationSite from the feedback vector into r5, or undefined.
__ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize));
__ LoadP(r5, FieldMemOperand(r8, FeedbackVector::kFeedbackSlotsOffset));
__ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
__ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
if (CpuFeatures::IsSupported(ISELECT)) {
......
......@@ -1217,12 +1217,12 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
const int count_offset = FixedArray::kHeaderSize + kPointerSize;
const int count_offset = FeedbackVector::kFeedbackSlotsOffset + kPointerSize;
// Load the cache state into r7.
__ SmiToPtrArrayOffset(r7, r5);
__ AddP(r7, r4, r7);
__ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize));
__ LoadP(r7, FieldMemOperand(r7, FeedbackVector::kFeedbackSlotsOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
......@@ -1270,7 +1270,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ SmiToPtrArrayOffset(r7, r5);
__ AddP(r7, r4, r7);
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0);
__ StoreP(ip, FieldMemOperand(r7, FeedbackVector::kFeedbackSlotsOffset), r0);
__ jmp(&done);
// An uninitialized cache is patched with the function
......@@ -1322,7 +1322,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ SmiToPtrArrayOffset(r7, r5);
__ AddP(r7, r4, r7);
// Put the AllocationSite from the feedback vector into r4, or undefined.
__ LoadP(r4, FieldMemOperand(r7, FixedArray::kHeaderSize));
__ LoadP(r4, FieldMemOperand(r7, FeedbackVector::kFeedbackSlotsOffset));
__ LoadP(r7, FieldMemOperand(r4, AllocationSite::kMapOffset));
__ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
Label feedback_register_initialized;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment