Commit 1b191a5d authored by mbrandy's avatar mbrandy Committed by Commit bot

PPC: Reland Vector ICs: platform support for vector-based stores.

Port 40fbed06

Original commit message:
    The last changes for vector store functionality, they are in 3 areas:

    1) The new vector [keyed] store code stubs - implementation.
    2) IC and handler compiler adjustments
    3) Odds and ends. A change in ast.cc, a test update, a small Oracle fix.

R=mvstanton@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com, dstence@us.ibm.com
BUG=

Review URL: https://codereview.chromium.org/1330883002

Cr-Commit-Position: refs/heads/master@{#30657}
parent 31a9396e
......@@ -31,7 +31,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
DCHECK(r6.is(StoreTransitionDescriptor::MapRegister()));
DCHECK(FLAG_vector_stores || r6.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, r6, r7, r8};
return registers;
}
......
......@@ -305,25 +305,35 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
}
static void StoreIC_PushArgs(MacroAssembler* masm) {
if (FLAG_vector_stores) {
__ Push(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
VectorStoreICDescriptor::SlotRegister(),
VectorStoreICDescriptor::VectorRegister());
} else {
__ Push(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
}
}
void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
// Push receiver, key and value for runtime call.
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister());
StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
__ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
__ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
}
void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
// Push receiver, key and value for runtime call.
__ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
StoreDescriptor::ValueRegister());
StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
__ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
__ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
1);
}
......@@ -565,6 +575,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ b(&success);
GenerateRestoreName(miss, name);
if (IC::ICUseVector(kind())) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
......
......@@ -112,7 +112,10 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
Label next_map;
__ bne(&next_map);
Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
__ LoadWeakValue(transition_map(), cell, &miss);
Register transition_map = scratch1();
DCHECK(!FLAG_vector_stores &&
transition_map.is(StoreTransitionDescriptor::MapRegister()));
__ LoadWeakValue(transition_map, cell, &miss);
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
__ bind(&next_map);
}
......
......@@ -711,7 +711,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// change the IC from any downstream misses, a dummy vector can be used.
Register vector = VectorStoreICDescriptor::VectorRegister();
Register slot = VectorStoreICDescriptor::SlotRegister();
DCHECK(!AreAliased(vector, slot, r6, r7, r8, r9));
DCHECK(!AreAliased(vector, slot, r8, r9, r10, r11));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
......@@ -723,7 +723,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
receiver, key, r6, r7, r8, r9);
receiver, key, r8, r9, r10, r11);
// Cache miss.
__ b(&miss);
......@@ -806,20 +806,22 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
Register dictionary = r6;
Register dictionary = r8;
DCHECK(receiver.is(r4));
DCHECK(name.is(r5));
DCHECK(value.is(r3));
DCHECK(VectorStoreICDescriptor::VectorRegister().is(r6));
DCHECK(VectorStoreICDescriptor::SlotRegister().is(r7));
__ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
GenerateDictionaryStore(masm, &miss, dictionary, name, value, r7, r8);
GenerateDictionaryStore(masm, &miss, dictionary, name, value, r9, r10);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->store_normal_hit(), 1, r7, r8);
__ IncrementCounter(counters->store_normal_hit(), 1, r9, r10);
__ Ret();
__ bind(&miss);
__ IncrementCounter(counters->store_normal_miss(), 1, r7, r8);
__ IncrementCounter(counters->store_normal_miss(), 1, r9, r10);
GenerateMiss(masm);
}
......
......@@ -137,8 +137,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
if (IC::ICUseVector(ic_kind)) {
Register vector = LoadWithVectorDescriptor::VectorRegister();
Register slot = LoadWithVectorDescriptor::SlotRegister();
Register vector, slot;
if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) {
vector = VectorStoreICDescriptor::VectorRegister();
slot = VectorStoreICDescriptor::SlotRegister();
} else {
vector = LoadWithVectorDescriptor::VectorRegister();
slot = LoadWithVectorDescriptor::SlotRegister();
}
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
}
#endif
......
......@@ -4757,11 +4757,52 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Label miss;
Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // r4
Register key = VectorStoreICDescriptor::NameRegister(); // r5
Register vector = VectorStoreICDescriptor::VectorRegister(); // r6
Register slot = VectorStoreICDescriptor::SlotRegister(); // r7
DCHECK(VectorStoreICDescriptor::ValueRegister().is(r3)); // r3
Register feedback = r8;
Register receiver_map = r9;
Register scratch1 = r10;
__ SmiToPtrArrayOffset(r0, slot);
__ add(feedback, vector, r0);
__ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
Label try_array, load_smi_map, compare_map;
Label not_array, miss;
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
scratch1, &compare_map, &load_smi_map, &try_array);
// Is it a fixed array?
__ bind(&try_array);
__ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
__ bne(&not_array);
Register scratch2 = r11;
HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
&miss);
__ bind(&not_array);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ bne(&miss);
Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(
masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
scratch1, scratch2);
// TODO(mvstanton): Implement.
__ bind(&miss);
StoreIC::GenerateMiss(masm);
__ bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ b(&compare_map);
}
......@@ -4775,12 +4816,135 @@ void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
}
static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
Register receiver_map, Register scratch1,
Register scratch2, Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label start_polymorphic;
Label transition_call;
Register cached_map = scratch1;
Register too_far = scratch2;
Register pointer_reg = feedback;
__ LoadP(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
// +-----+------+------+-----+-----+-----+ ... ----+
// | map | len | wm0 | wt0 | h0 | wm1 | hN |
// +-----+------+------+-----+-----+ ----+ ... ----+
// 0 1 2 len-1
// ^ ^
// | |
// pointer_reg too_far
// aka feedback scratch2
// also need receiver_map
// use cached_map (scratch1) to look in the weak map values.
__ SmiToPtrArrayOffset(r0, too_far);
__ add(too_far, feedback, r0);
__ addi(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ addi(pointer_reg, feedback,
Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
__ bind(&next_loop);
__ LoadP(cached_map, MemOperand(pointer_reg));
__ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
__ cmp(receiver_map, cached_map);
__ bne(&prepare_next);
// Is it a transitioning store?
__ LoadP(too_far, MemOperand(pointer_reg, kPointerSize));
__ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
__ bne(&transition_call);
__ LoadP(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
__ addi(ip, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(ip);
__ bind(&transition_call);
__ LoadP(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
__ JumpIfSmi(too_far, miss);
__ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
// Load the map into the correct register.
DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
__ mr(feedback, too_far);
__ addi(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(ip);
__ bind(&prepare_next);
__ addi(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
__ cmpl(pointer_reg, too_far);
__ blt(&next_loop);
// We exhausted our array of map handler pairs.
__ b(miss);
}
void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Label miss;
Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // r4
Register key = VectorStoreICDescriptor::NameRegister(); // r5
Register vector = VectorStoreICDescriptor::VectorRegister(); // r6
Register slot = VectorStoreICDescriptor::SlotRegister(); // r7
DCHECK(VectorStoreICDescriptor::ValueRegister().is(r3)); // r3
Register feedback = r8;
Register receiver_map = r9;
Register scratch1 = r10;
__ SmiToPtrArrayOffset(r0, slot);
__ add(feedback, vector, r0);
__ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
Label try_array, load_smi_map, compare_map;
Label not_array, miss;
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
scratch1, &compare_map, &load_smi_map, &try_array);
__ bind(&try_array);
// Is it a fixed array?
__ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
__ bne(&not_array);
// We have a polymorphic element handler.
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
Register scratch2 = r11;
HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
&miss);
__ bind(&not_array);
// Is it generic?
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ bne(&try_poly_name);
Handle<Code> megamorphic_stub =
KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
// We might have a name in feedback, and a fixed array in the next slot.
__ cmp(key, feedback);
__ bne(&miss);
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
__ SmiToPtrArrayOffset(r0, slot);
__ add(feedback, vector, r0);
__ LoadP(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
&miss);
// TODO(mvstanton): Implement.
__ bind(&miss);
KeyedStoreIC::GenerateMiss(masm);
__ bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ b(&compare_map);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment