Commit de9b0a07 authored by Jaideep Bajwa's avatar Jaideep Bajwa Committed by Commit Bot

PPC/s390: [objects] Make feedback vector a first-class object

Port 37680d65

Original Commit Message:

    Instead of having feedback vector as a subtype of FixedArray with
    reserved slots, make it a first-class variable-sized object with a
    fixed-size header. This allows us to compress counters to ints in the
    header, rather than forcing them to be Smis.

R=leszeks@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=
LOG=N

Change-Id: Ia835942de292c4e4b802e34672f1e8bf8a2491c7
Reviewed-on: https://chromium-review.googlesource.com/590168Reviewed-by: 's avatarJunliang Yan <jyan@ca.ibm.com>
Commit-Queue: Jaideep Bajwa <bjaideep@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#46954}
parent eaec875f
...@@ -1024,11 +1024,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, ...@@ -1024,11 +1024,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register closure = r4; Register closure = r4;
Register optimized_code_entry = scratch1; Register optimized_code_entry = scratch1;
const int kOptimizedCodeCellOffset = __ LoadP(
FeedbackVector::kOptimizedCodeIndex * kPointerSize + optimized_code_entry,
FeedbackVector::kHeaderSize; FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
__ LoadP(optimized_code_entry,
FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an // Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is as a weak cell to a code // optimisation marker. Otherwise, interpret is as a weak cell to a code
...@@ -1169,16 +1167,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1169,16 +1167,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bne(&switch_to_different_code_kind); __ bne(&switch_to_different_code_kind);
// Increment invocation count for the function. // Increment invocation count for the function.
__ LoadP( __ LoadP(r8, FieldMemOperand(feedback_vector,
r8, FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
FeedbackVector::kInvocationCountIndex * kPointerSize + __ addi(r8, r8, Operand(1));
FeedbackVector::kHeaderSize));
__ AddSmiLiteral(r8, r8, Smi::FromInt(1), r0);
__ StoreP( __ StoreP(
r8, r8,
FieldMemOperand(feedback_vector, FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset),
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize),
r0); r0);
// Check function data field is actually a BytecodeArray object. // Check function data field is actually a BytecodeArray object.
......
...@@ -1024,11 +1024,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm, ...@@ -1024,11 +1024,9 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register closure = r3; Register closure = r3;
Register optimized_code_entry = scratch1; Register optimized_code_entry = scratch1;
const int kOptimizedCodeCellOffset = __ LoadP(
FeedbackVector::kOptimizedCodeIndex * kPointerSize + optimized_code_entry,
FeedbackVector::kHeaderSize; FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
__ LoadP(optimized_code_entry,
FieldMemOperand(feedback_vector, kOptimizedCodeCellOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an // Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is as a weak cell to a code // optimisation marker. Otherwise, interpret is as a weak cell to a code
...@@ -1166,15 +1164,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1166,15 +1164,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bne(&switch_to_different_code_kind); __ bne(&switch_to_different_code_kind);
// Increment invocation count for the function. // Increment invocation count for the function.
__ LoadP( __ LoadP(r1, FieldMemOperand(feedback_vector,
r1, FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
FeedbackVector::kInvocationCountIndex * kPointerSize + __ AddP(r1, r1, Operand(1));
FeedbackVector::kHeaderSize)); __ StoreP(r1, FieldMemOperand(feedback_vector,
__ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0); FeedbackVector::kInvocationCountOffset));
__ StoreP(
r1, FieldMemOperand(feedback_vector,
FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
// Check function data field is actually a BytecodeArray object. // Check function data field is actually a BytecodeArray object.
if (FLAG_debug_code) { if (FLAG_debug_code) {
......
...@@ -139,13 +139,9 @@ void FullCodeGenerator::Generate() { ...@@ -139,13 +139,9 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ Increment invocation count"); Comment cmnt(masm_, "[ Increment invocation count");
__ LoadP(r7, FieldMemOperand(r4, JSFunction::kFeedbackVectorOffset)); __ LoadP(r7, FieldMemOperand(r4, JSFunction::kFeedbackVectorOffset));
__ LoadP(r7, FieldMemOperand(r7, Cell::kValueOffset)); __ LoadP(r7, FieldMemOperand(r7, Cell::kValueOffset));
__ LoadP(r8, FieldMemOperand( __ LoadP(r8, FieldMemOperand(r7, FeedbackVector::kInvocationCountOffset));
r7, FeedbackVector::kInvocationCountIndex * kPointerSize + __ addi(r8, r8, Operand(1));
FeedbackVector::kHeaderSize)); __ StoreP(r8, FieldMemOperand(r7, FeedbackVector::kInvocationCountOffset),
__ AddSmiLiteral(r8, r8, Smi::FromInt(1), r0);
__ StoreP(r8, FieldMemOperand(
r7, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize),
r0); r0);
} }
...@@ -1001,10 +997,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { ...@@ -1001,10 +997,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We need to filter the key, record slow-path here. // We need to filter the key, record slow-path here.
int const vector_index = SmiFromSlot(slot)->value(); int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadFeedbackVector(r3); __ EmitLoadFeedbackVector(r6);
__ mov(r5, Operand(FeedbackVector::MegamorphicSentinel(isolate()))); __ mov(r5, Operand(FeedbackVector::MegamorphicSentinel(isolate())));
__ StoreP( __ StoreP(r5,
r5, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(vector_index)), r0); FieldMemOperand(r6, FeedbackVector::kFeedbackSlotsOffset +
vector_index * kPointerSize),
r0);
// Convert the entry to a string or (smi) 0 if it isn't a property // Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we // any more. If the property has been removed while iterating, we
......
...@@ -139,13 +139,9 @@ void FullCodeGenerator::Generate() { ...@@ -139,13 +139,9 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ Increment invocation count"); Comment cmnt(masm_, "[ Increment invocation count");
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kFeedbackVectorOffset)); __ LoadP(r6, FieldMemOperand(r3, JSFunction::kFeedbackVectorOffset));
__ LoadP(r6, FieldMemOperand(r6, Cell::kValueOffset)); __ LoadP(r6, FieldMemOperand(r6, Cell::kValueOffset));
__ LoadP(r1, FieldMemOperand( __ LoadP(r1, FieldMemOperand(r6, FeedbackVector::kInvocationCountOffset));
r6, FeedbackVector::kInvocationCountIndex * kPointerSize + __ AddP(r1, r1, Operand(1));
FeedbackVector::kHeaderSize)); __ StoreP(r1, FieldMemOperand(r6, FeedbackVector::kInvocationCountOffset));
__ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
__ StoreP(r1, FieldMemOperand(
r6, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
} }
{ {
...@@ -972,8 +968,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { ...@@ -972,8 +968,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
int const vector_index = SmiFromSlot(slot)->value(); int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadFeedbackVector(r2); __ EmitLoadFeedbackVector(r2);
__ mov(r4, Operand(FeedbackVector::MegamorphicSentinel(isolate()))); __ mov(r4, Operand(FeedbackVector::MegamorphicSentinel(isolate())));
__ StoreP( __ StoreP(r4,
r4, FieldMemOperand(r2, FixedArray::OffsetOfElementAt(vector_index)), r0); FieldMemOperand(r2, FeedbackVector::kFeedbackSlotsOffset +
vector_index * kPointerSize),
r0);
// Convert the entry to a string or (smi) 0 if it isn't a property // Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we // any more. If the property has been removed while iterating, we
......
...@@ -1227,12 +1227,12 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { ...@@ -1227,12 +1227,12 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()), DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol()); masm->isolate()->heap()->uninitialized_symbol());
const int count_offset = FixedArray::kHeaderSize + kPointerSize; const int count_offset = FeedbackVector::kFeedbackSlotsOffset + kPointerSize;
// Load the cache state into r8. // Load the cache state into r8.
__ SmiToPtrArrayOffset(r8, r6); __ SmiToPtrArrayOffset(r8, r6);
__ add(r8, r5, r8); __ add(r8, r5, r8);
__ LoadP(r8, FieldMemOperand(r8, FixedArray::kHeaderSize)); __ LoadP(r8, FieldMemOperand(r8, FeedbackVector::kFeedbackSlotsOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the // A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state. // function without changing the state.
...@@ -1280,7 +1280,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { ...@@ -1280,7 +1280,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ SmiToPtrArrayOffset(r8, r6); __ SmiToPtrArrayOffset(r8, r6);
__ add(r8, r5, r8); __ add(r8, r5, r8);
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex); __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ StoreP(ip, FieldMemOperand(r8, FixedArray::kHeaderSize), r0); __ StoreP(ip, FieldMemOperand(r8, FeedbackVector::kFeedbackSlotsOffset), r0);
__ jmp(&done); __ jmp(&done);
// An uninitialized cache is patched with the function // An uninitialized cache is patched with the function
...@@ -1333,7 +1333,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) { ...@@ -1333,7 +1333,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ SmiToPtrArrayOffset(r8, r6); __ SmiToPtrArrayOffset(r8, r6);
__ add(r8, r5, r8); __ add(r8, r5, r8);
// Put the AllocationSite from the feedback vector into r5, or undefined. // Put the AllocationSite from the feedback vector into r5, or undefined.
__ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize)); __ LoadP(r5, FieldMemOperand(r8, FeedbackVector::kFeedbackSlotsOffset));
__ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset)); __ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
__ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex); __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
if (CpuFeatures::IsSupported(ISELECT)) { if (CpuFeatures::IsSupported(ISELECT)) {
......
...@@ -1217,12 +1217,12 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { ...@@ -1217,12 +1217,12 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()), DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol()); masm->isolate()->heap()->uninitialized_symbol());
const int count_offset = FixedArray::kHeaderSize + kPointerSize; const int count_offset = FeedbackVector::kFeedbackSlotsOffset + kPointerSize;
// Load the cache state into r7. // Load the cache state into r7.
__ SmiToPtrArrayOffset(r7, r5); __ SmiToPtrArrayOffset(r7, r5);
__ AddP(r7, r4, r7); __ AddP(r7, r4, r7);
__ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize)); __ LoadP(r7, FieldMemOperand(r7, FeedbackVector::kFeedbackSlotsOffset));
// A monomorphic cache hit or an already megamorphic state: invoke the // A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state. // function without changing the state.
...@@ -1270,7 +1270,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { ...@@ -1270,7 +1270,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ SmiToPtrArrayOffset(r7, r5); __ SmiToPtrArrayOffset(r7, r5);
__ AddP(r7, r4, r7); __ AddP(r7, r4, r7);
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex); __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0); __ StoreP(ip, FieldMemOperand(r7, FeedbackVector::kFeedbackSlotsOffset), r0);
__ jmp(&done); __ jmp(&done);
// An uninitialized cache is patched with the function // An uninitialized cache is patched with the function
...@@ -1322,7 +1322,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) { ...@@ -1322,7 +1322,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ SmiToPtrArrayOffset(r7, r5); __ SmiToPtrArrayOffset(r7, r5);
__ AddP(r7, r4, r7); __ AddP(r7, r4, r7);
// Put the AllocationSite from the feedback vector into r4, or undefined. // Put the AllocationSite from the feedback vector into r4, or undefined.
__ LoadP(r4, FieldMemOperand(r7, FixedArray::kHeaderSize)); __ LoadP(r4, FieldMemOperand(r7, FeedbackVector::kFeedbackSlotsOffset));
__ LoadP(r7, FieldMemOperand(r4, AllocationSite::kMapOffset)); __ LoadP(r7, FieldMemOperand(r4, AllocationSite::kMapOffset));
__ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex); __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
Label feedback_register_initialized; Label feedback_register_initialized;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment