Commit 4ad22295 authored by mbrandy's avatar mbrandy Committed by Commit bot

PPC: [stubs] Optimize LoadGlobalViaContextStub and StoreGlobalViaContextStub.

Port d6ee366d

Original commit message:
    This is the initial round of optimizations for the
    LoadGlobalViaContextStub and StoreGlobalViaContextStub, basically
    turning them into platform code stubs to avoid the Crankshaft overhead
    in the fast case, and making the runtime interface cheaper.

R=bmeurer@chromium.org, dstence@us.ibm.com, michael_dawson@ca.ibm.com
BUG=chromium:510694
LOG=n

Review URL: https://codereview.chromium.org/1261473002

Cr-Commit-Position: refs/heads/master@{#29867}
parent e01f34fa
......@@ -1378,17 +1378,19 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
DCHECK(var->index() > 0);
DCHECK(var->IsStaticGlobalObjectProperty());
// Each var occupies two slots in the context: for reads and writes.
int slot_index = var->index();
int depth = scope()->ContextChainLength(var->scope());
__ mov(LoadGlobalViaContextDescriptor::DepthRegister(),
Operand(Smi::FromInt(depth)));
__ mov(LoadGlobalViaContextDescriptor::SlotRegister(),
Operand(Smi::FromInt(slot_index)));
__ mov(LoadGlobalViaContextDescriptor::NameRegister(),
Operand(var->name()));
LoadGlobalViaContextStub stub(isolate(), depth);
__ CallStub(&stub);
const int slot = var->index();
const int depth = scope()->ContextChainLength(var->scope());
if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
__ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
__ mov(LoadGlobalViaContextDescriptor::NameRegister(),
Operand(var->name()));
LoadGlobalViaContextStub stub(isolate(), depth);
__ CallStub(&stub);
} else {
__ Push(Smi::FromInt(slot));
__ Push(var->name());
__ CallRuntime(Runtime::kLoadGlobalViaContext, 2);
}
} else {
__ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
__ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
......@@ -2714,18 +2716,24 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
DCHECK(var->index() > 0);
DCHECK(var->IsStaticGlobalObjectProperty());
// Each var occupies two slots in the context: for reads and writes.
int slot_index = var->index() + 1;
int depth = scope()->ContextChainLength(var->scope());
__ mov(StoreGlobalViaContextDescriptor::DepthRegister(),
Operand(Smi::FromInt(depth)));
__ mov(StoreGlobalViaContextDescriptor::SlotRegister(),
Operand(Smi::FromInt(slot_index)));
__ mov(StoreGlobalViaContextDescriptor::NameRegister(),
Operand(var->name()));
DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(r3));
StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
__ CallStub(&stub);
const int slot = var->index() + 1;
const int depth = scope()->ContextChainLength(var->scope());
if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
__ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
__ mov(StoreGlobalViaContextDescriptor::NameRegister(),
Operand(var->name()));
DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(r3));
StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
__ CallStub(&stub);
} else {
__ Push(Smi::FromInt(slot));
__ Push(var->name());
__ push(r3);
__ CallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
: Runtime::kStoreGlobalViaContext_Sloppy,
3);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
......
......@@ -4208,7 +4208,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(
__ srwi(scratch2, scratch2, Operand(Name::kHashShift));
__ and_(scratch2, scratch1, scratch2);
// Scale the index by multiplying by the element size.
// Scale the index by multiplying by the entry size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3.
__ ShiftLeftImm(ip, scratch2, Operand(1));
......@@ -5313,6 +5313,162 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register context = cp;
Register result = r3;
Register slot = r5;
Register name = r6;
Label slow_case;
// Go up the context chain to the script context.
for (int i = 0; i < depth(); ++i) {
__ LoadP(result, ContextOperand(context, Context::PREVIOUS_INDEX));
context = result;
}
// Load the PropertyCell value at the specified slot.
__ ShiftLeftImm(result, slot, Operand(kPointerSizeLog2));
__ add(result, context, result);
__ LoadP(result, ContextOperand(result));
__ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
// If the result is not the_hole, return. Otherwise, handle in the runtime.
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
__ Ret(ne);
// Fallback to runtime.
__ bind(&slow_case);
__ SmiTag(slot);
__ Push(slot, name);
__ TailCallRuntime(Runtime::kLoadGlobalViaContext, 2, 1);
}
void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register value = r3;
Register slot = r5;
Register name = r6;
Register cell = r4;
Register cell_details = r7;
Register cell_value = r8;
Register cell_value_map = r9;
Register scratch = r10;
Register context = cp;
Register context_temp = cell;
Label fast_heapobject_case, fast_smi_case, slow_case;
if (FLAG_debug_code) {
__ CompareRoot(value, Heap::kTheHoleValueRootIndex);
__ Check(ne, kUnexpectedValue);
__ AssertName(name);
}
// Go up the context chain to the script context.
for (int i = 0; i < depth(); i++) {
__ LoadP(context_temp, ContextOperand(context, Context::PREVIOUS_INDEX));
context = context_temp;
}
// Load the PropertyCell at the specified slot.
__ ShiftLeftImm(cell, slot, Operand(kPointerSizeLog2));
__ add(cell, context, cell);
__ LoadP(cell, ContextOperand(cell));
// Load PropertyDetails for the cell (actually only the cell_type and kind).
__ LoadP(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
__ SmiUntag(cell_details);
__ andi(cell_details, cell_details,
Operand(PropertyDetails::PropertyCellTypeField::kMask |
PropertyDetails::KindField::kMask));
// Check if PropertyCell holds mutable data.
Label not_mutable_data;
__ cmpi(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
PropertyCellType::kMutable) |
PropertyDetails::KindField::encode(kData)));
__ bne(&not_mutable_data);
__ JumpIfSmi(value, &fast_smi_case);
__ bind(&fast_heapobject_case);
__ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
// RecordWriteField clobbers the value register, so we copy it before the
// call.
__ mr(r7, value);
__ RecordWriteField(cell, PropertyCell::kValueOffset, r7, scratch,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Ret();
__ bind(&not_mutable_data);
// Check if PropertyCell value matches the new value (relevant for Constant,
// ConstantType and Undefined cells).
Label not_same_value;
__ LoadP(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
__ cmp(cell_value, value);
__ bne(&not_same_value);
if (FLAG_debug_code) {
Label done;
// This can only be true for Constant, ConstantType and Undefined cells,
// because we never store the_hole via this stub.
__ cmpi(cell_details,
Operand(PropertyDetails::PropertyCellTypeField::encode(
PropertyCellType::kConstant) |
PropertyDetails::KindField::encode(kData)));
__ beq(&done);
__ cmpi(cell_details,
Operand(PropertyDetails::PropertyCellTypeField::encode(
PropertyCellType::kConstantType) |
PropertyDetails::KindField::encode(kData)));
__ beq(&done);
__ cmpi(cell_details,
Operand(PropertyDetails::PropertyCellTypeField::encode(
PropertyCellType::kUndefined) |
PropertyDetails::KindField::encode(kData)));
__ Check(eq, kUnexpectedValue);
__ bind(&done);
}
__ Ret();
__ bind(&not_same_value);
// Check if PropertyCell contains data with constant type.
__ cmpi(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
PropertyCellType::kConstantType) |
PropertyDetails::KindField::encode(kData)));
__ bne(&slow_case);
// Now either both old and new values must be smis or both must be heap
// objects with same map.
Label value_is_heap_object;
__ JumpIfNotSmi(value, &value_is_heap_object);
__ JumpIfNotSmi(cell_value, &slow_case);
// Old and new values are smis, no need for a write barrier here.
__ bind(&fast_smi_case);
__ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
__ Ret();
__ bind(&value_is_heap_object);
__ JumpIfSmi(cell_value, &slow_case);
__ LoadP(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
__ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
__ cmp(cell_value_map, scratch);
__ beq(&fast_heapobject_case);
// Fallback to runtime.
__ bind(&slow_case);
__ SmiTag(slot);
__ Push(slot, name, value);
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
: Runtime::kStoreGlobalViaContext_Sloppy,
3, 1);
}
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
......
......@@ -36,12 +36,10 @@ const Register VectorStoreICDescriptor::VectorRegister() { return r6; }
const Register StoreTransitionDescriptor::MapRegister() { return r6; }
const Register LoadGlobalViaContextDescriptor::DepthRegister() { return r4; }
const Register LoadGlobalViaContextDescriptor::SlotRegister() { return r5; }
const Register LoadGlobalViaContextDescriptor::NameRegister() { return r6; }
const Register StoreGlobalViaContextDescriptor::DepthRegister() { return r4; }
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r5; }
const Register StoreGlobalViaContextDescriptor::NameRegister() { return r6; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r3; }
......
......@@ -3064,16 +3064,20 @@ void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->result()).is(r3));
__ mov(LoadGlobalViaContextDescriptor::DepthRegister(),
Operand(Smi::FromInt(instr->depth())));
__ mov(LoadGlobalViaContextDescriptor::SlotRegister(),
Operand(Smi::FromInt(instr->slot_index())));
__ mov(LoadGlobalViaContextDescriptor::NameRegister(),
Operand(instr->name()));
Handle<Code> stub =
CodeFactory::LoadGlobalViaContext(isolate(), instr->depth()).code();
CallCode(stub, RelocInfo::CODE_TARGET, instr);
int const slot = instr->slot_index();
int const depth = instr->depth();
if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
__ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
__ mov(LoadGlobalViaContextDescriptor::NameRegister(),
Operand(instr->name()));
Handle<Code> stub =
CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
CallCode(stub, RelocInfo::CODE_TARGET, instr);
} else {
__ Push(Smi::FromInt(slot));
__ Push(instr->name());
__ CallRuntime(Runtime::kLoadGlobalViaContext, 2);
}
}
......@@ -4485,17 +4489,24 @@ void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
DCHECK(ToRegister(instr->value())
.is(StoreGlobalViaContextDescriptor::ValueRegister()));
__ mov(StoreGlobalViaContextDescriptor::DepthRegister(),
Operand(Smi::FromInt(instr->depth())));
__ mov(StoreGlobalViaContextDescriptor::SlotRegister(),
Operand(Smi::FromInt(instr->slot_index())));
__ mov(StoreGlobalViaContextDescriptor::NameRegister(),
Operand(instr->name()));
Handle<Code> stub =
CodeFactory::StoreGlobalViaContext(isolate(), instr->depth(),
instr->language_mode()).code();
CallCode(stub, RelocInfo::CODE_TARGET, instr);
int const slot = instr->slot_index();
int const depth = instr->depth();
if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
__ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
__ mov(StoreGlobalViaContextDescriptor::NameRegister(),
Operand(instr->name()));
Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
isolate(), depth, instr->language_mode()).code();
CallCode(stub, RelocInfo::CODE_TARGET, instr);
} else {
__ Push(Smi::FromInt(slot));
__ Push(instr->name());
__ push(StoreGlobalViaContextDescriptor::ValueRegister());
__ CallRuntime(is_strict(instr->language_mode())
? Runtime::kStoreGlobalViaContext_Strict
: Runtime::kStoreGlobalViaContext_Sloppy,
3);
}
}
......
......@@ -213,7 +213,7 @@ class MacroAssembler : public Assembler {
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldOperand(reg, off).
// the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
......@@ -1549,7 +1549,7 @@ class CodePatcher {
// -----------------------------------------------------------------------------
// Static helper functions.
inline MemOperand ContextOperand(Register context, int index) {
inline MemOperand ContextOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment