Commit 4c698991 authored by chunyang.dai's avatar chunyang.dai Committed by Commit bot

X87: [stubs] Optimize LoadGlobalViaContextStub and StoreGlobalViaContextStub.

port d6ee366d (r29834).

original commit message:

    This is the initial round of optimizations for the
    LoadGlobalViaContextStub and StoreGlobalViaContextStub, basically
    turning them into platform code stubs to avoid the Crankshaft overhead
    in the fast case, and making the runtime interface cheaper.

BUG=

Review URL: https://codereview.chromium.org/1258513003

Cr-Commit-Position: refs/heads/master@{#29839}
parent c4cd117e
...@@ -4776,6 +4776,161 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { ...@@ -4776,6 +4776,161 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
} }
void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register context_reg = esi;
Register slot_reg = ebx;
Register name_reg = ecx;
Register result_reg = eax;
Label slow_case;
// Go up context chain to the script context.
for (int i = 0; i < depth(); ++i) {
__ mov(result_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
context_reg = result_reg;
}
// Load the PropertyCell value at the specified slot.
__ mov(result_reg, ContextOperand(context_reg, slot_reg));
__ mov(result_reg, FieldOperand(result_reg, PropertyCell::kValueOffset));
// Check that value is not the_hole.
__ CompareRoot(result_reg, Heap::kTheHoleValueRootIndex);
__ j(equal, &slow_case, Label::kNear);
__ Ret();
// Fallback to the runtime.
__ bind(&slow_case);
__ SmiTag(slot_reg);
__ Pop(result_reg); // Pop return address.
__ Push(slot_reg);
__ Push(name_reg);
__ Push(result_reg); // Push return address.
__ TailCallRuntime(Runtime::kLoadGlobalViaContext, 2, 1);
}
void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register context_reg = esi;
Register slot_reg = ebx;
Register name_reg = ecx;
Register value_reg = eax;
Register cell_reg = edi;
Register cell_details_reg = edx;
Label fast_heapobject_case, fast_smi_case, slow_case;
if (FLAG_debug_code) {
__ CompareRoot(value_reg, Heap::kTheHoleValueRootIndex);
__ Check(not_equal, kUnexpectedValue);
__ AssertName(name_reg);
}
// Go up context chain to the script context.
for (int i = 0; i < depth(); ++i) {
__ mov(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
context_reg = cell_reg;
}
// Load the PropertyCell at the specified slot.
__ mov(cell_reg, ContextOperand(context_reg, slot_reg));
// Load PropertyDetails for the cell (actually only the cell_type and kind).
__ mov(cell_details_reg,
FieldOperand(cell_reg, PropertyCell::kDetailsOffset));
__ SmiUntag(cell_details_reg);
__ and_(cell_details_reg,
Immediate(PropertyDetails::PropertyCellTypeField::kMask |
PropertyDetails::KindField::kMask));
// Check if PropertyCell holds mutable data.
Label not_mutable_data;
__ cmp(cell_details_reg,
Immediate(PropertyDetails::PropertyCellTypeField::encode(
PropertyCellType::kMutable) |
PropertyDetails::KindField::encode(kData)));
__ j(not_equal, &not_mutable_data);
__ JumpIfSmi(value_reg, &fast_smi_case);
__ bind(&fast_heapobject_case);
__ mov(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
__ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
cell_details_reg, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// RecordWriteField clobbers the value register, so we need to reload.
__ mov(value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
__ Ret();
__ bind(&not_mutable_data);
// Check if PropertyCell value matches the new value (relevant for Constant,
// ConstantType and Undefined cells).
Label not_same_value;
__ cmp(value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
__ j(not_equal, &not_same_value,
FLAG_debug_code ? Label::kFar : Label::kNear);
if (FLAG_debug_code) {
Label done;
// This can only be true for Constant, ConstantType and Undefined cells,
// because we never store the_hole via this stub.
__ cmp(cell_details_reg,
Immediate(PropertyDetails::PropertyCellTypeField::encode(
PropertyCellType::kConstant) |
PropertyDetails::KindField::encode(kData)));
__ j(equal, &done);
__ cmp(cell_details_reg,
Immediate(PropertyDetails::PropertyCellTypeField::encode(
PropertyCellType::kConstantType) |
PropertyDetails::KindField::encode(kData)));
__ j(equal, &done);
__ cmp(cell_details_reg,
Immediate(PropertyDetails::PropertyCellTypeField::encode(
PropertyCellType::kUndefined) |
PropertyDetails::KindField::encode(kData)));
__ Check(equal, kUnexpectedValue);
__ bind(&done);
}
__ Ret();
__ bind(&not_same_value);
// Check if PropertyCell contains data with constant type.
__ cmp(cell_details_reg,
Immediate(PropertyDetails::PropertyCellTypeField::encode(
PropertyCellType::kConstantType) |
PropertyDetails::KindField::encode(kData)));
__ j(not_equal, &slow_case, Label::kNear);
// Now either both old and new values must be SMIs or both must be heap
// objects with same map.
Label value_is_heap_object;
Register cell_value_reg = cell_details_reg;
__ mov(cell_value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
__ JumpIfNotSmi(value_reg, &value_is_heap_object, Label::kNear);
__ JumpIfNotSmi(cell_value_reg, &slow_case, Label::kNear);
// Old and new values are SMIs, no need for a write barrier here.
__ bind(&fast_smi_case);
__ mov(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
__ Ret();
__ bind(&value_is_heap_object);
__ JumpIfSmi(cell_value_reg, &slow_case, Label::kNear);
Register cell_value_map_reg = cell_value_reg;
__ mov(cell_value_map_reg,
FieldOperand(cell_value_reg, HeapObject::kMapOffset));
__ cmp(cell_value_map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
__ j(equal, &fast_heapobject_case);
// Fallback to the runtime.
__ bind(&slow_case);
__ SmiTag(slot_reg);
__ Pop(cell_reg); // Pop return address.
__ Push(slot_reg);
__ Push(name_reg);
__ Push(value_reg);
__ Push(cell_reg); // Push return address.
__ TailCallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
: Runtime::kStoreGlobalViaContext_Sloppy,
3, 1);
}
// Generates an Operand for saving parameters after PrepareCallApiFunction. // Generates an Operand for saving parameters after PrepareCallApiFunction.
static Operand ApiParameterOperand(int index) { static Operand ApiParameterOperand(int index) {
return Operand(esp, index * kPointerSize); return Operand(esp, index * kPointerSize);
......
...@@ -1334,15 +1334,18 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy, ...@@ -1334,15 +1334,18 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
DCHECK(var->index() > 0); DCHECK(var->index() > 0);
DCHECK(var->IsStaticGlobalObjectProperty()); DCHECK(var->IsStaticGlobalObjectProperty());
// Each var occupies two slots in the context: for reads and writes. // Each var occupies two slots in the context: for reads and writes.
int slot_index = var->index(); int const slot = var->index();
int depth = scope()->ContextChainLength(var->scope()); int const depth = scope()->ContextChainLength(var->scope());
__ mov(LoadGlobalViaContextDescriptor::DepthRegister(), if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
Immediate(Smi::FromInt(depth))); __ Move(LoadGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
__ mov(LoadGlobalViaContextDescriptor::SlotRegister(), __ mov(LoadGlobalViaContextDescriptor::NameRegister(), var->name());
Immediate(Smi::FromInt(slot_index))); LoadGlobalViaContextStub stub(isolate(), depth);
__ mov(LoadGlobalViaContextDescriptor::NameRegister(), var->name()); __ CallStub(&stub);
LoadGlobalViaContextStub stub(isolate(), depth); } else {
__ CallStub(&stub); __ Push(Smi::FromInt(slot));
__ Push(var->name());
__ CallRuntime(Runtime::kLoadGlobalViaContext, 2);
}
} else { } else {
__ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand()); __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
...@@ -2611,16 +2614,23 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op, ...@@ -2611,16 +2614,23 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
DCHECK(var->index() > 0); DCHECK(var->index() > 0);
DCHECK(var->IsStaticGlobalObjectProperty()); DCHECK(var->IsStaticGlobalObjectProperty());
// Each var occupies two slots in the context: for reads and writes. // Each var occupies two slots in the context: for reads and writes.
int slot_index = var->index() + 1; int const slot = var->index() + 1;
int depth = scope()->ContextChainLength(var->scope()); int const depth = scope()->ContextChainLength(var->scope());
__ mov(StoreGlobalViaContextDescriptor::DepthRegister(), if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
Immediate(Smi::FromInt(depth))); __ Move(StoreGlobalViaContextDescriptor::SlotRegister(), Immediate(slot));
__ mov(StoreGlobalViaContextDescriptor::SlotRegister(), __ mov(StoreGlobalViaContextDescriptor::NameRegister(), var->name());
Immediate(Smi::FromInt(slot_index))); DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(eax));
__ mov(StoreGlobalViaContextDescriptor::NameRegister(), var->name()); StoreGlobalViaContextStub stub(isolate(), depth, language_mode());
DCHECK(StoreGlobalViaContextDescriptor::ValueRegister().is(eax)); __ CallStub(&stub);
StoreGlobalViaContextStub stub(isolate(), depth, language_mode()); } else {
__ CallStub(&stub); __ Push(Smi::FromInt(slot));
__ Push(var->name());
__ Push(eax);
__ CallRuntime(is_strict(language_mode())
? Runtime::kStoreGlobalViaContext_Strict
: Runtime::kStoreGlobalViaContext_Sloppy,
3);
}
} else if (var->mode() == LET && op != Token::INIT_LET) { } else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier. // Non-initializing assignment to let variable needs a write barrier.
......
...@@ -37,12 +37,10 @@ const Register StoreTransitionDescriptor::MapRegister() { ...@@ -37,12 +37,10 @@ const Register StoreTransitionDescriptor::MapRegister() {
} }
const Register LoadGlobalViaContextDescriptor::DepthRegister() { return edx; }
const Register LoadGlobalViaContextDescriptor::SlotRegister() { return ebx; } const Register LoadGlobalViaContextDescriptor::SlotRegister() { return ebx; }
const Register LoadGlobalViaContextDescriptor::NameRegister() { return ecx; } const Register LoadGlobalViaContextDescriptor::NameRegister() { return ecx; }
const Register StoreGlobalViaContextDescriptor::DepthRegister() { return edx; }
const Register StoreGlobalViaContextDescriptor::SlotRegister() { return ebx; } const Register StoreGlobalViaContextDescriptor::SlotRegister() { return ebx; }
const Register StoreGlobalViaContextDescriptor::NameRegister() { return ecx; } const Register StoreGlobalViaContextDescriptor::NameRegister() { return ecx; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; } const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
......
...@@ -3150,15 +3150,19 @@ void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) { ...@@ -3150,15 +3150,19 @@ void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
DCHECK(ToRegister(instr->context()).is(esi)); DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->result()).is(eax)); DCHECK(ToRegister(instr->result()).is(eax));
__ mov(LoadGlobalViaContextDescriptor::DepthRegister(), int const slot = instr->slot_index();
Immediate(Smi::FromInt(instr->depth()))); int const depth = instr->depth();
__ mov(LoadGlobalViaContextDescriptor::SlotRegister(), if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
Immediate(Smi::FromInt(instr->slot_index()))); __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), slot);
__ mov(LoadGlobalViaContextDescriptor::NameRegister(), instr->name()); __ mov(LoadGlobalViaContextDescriptor::NameRegister(), instr->name());
Handle<Code> stub =
Handle<Code> stub = CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
CodeFactory::LoadGlobalViaContext(isolate(), instr->depth()).code(); CallCode(stub, RelocInfo::CODE_TARGET, instr);
CallCode(stub, RelocInfo::CODE_TARGET, instr); } else {
__ Push(Smi::FromInt(slot));
__ Push(instr->name());
__ CallRuntime(Runtime::kLoadGlobalViaContext, 2);
}
} }
...@@ -4546,16 +4550,24 @@ void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) { ...@@ -4546,16 +4550,24 @@ void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
DCHECK(ToRegister(instr->value()) DCHECK(ToRegister(instr->value())
.is(StoreGlobalViaContextDescriptor::ValueRegister())); .is(StoreGlobalViaContextDescriptor::ValueRegister()));
__ mov(StoreGlobalViaContextDescriptor::DepthRegister(), int const slot = instr->slot_index();
Immediate(Smi::FromInt(instr->depth()))); int const depth = instr->depth();
__ mov(StoreGlobalViaContextDescriptor::SlotRegister(), if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
Immediate(Smi::FromInt(instr->slot_index()))); __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), slot);
__ mov(StoreGlobalViaContextDescriptor::NameRegister(), instr->name()); __ mov(StoreGlobalViaContextDescriptor::NameRegister(), instr->name());
Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
Handle<Code> stub = isolate(), depth, instr->language_mode())
CodeFactory::StoreGlobalViaContext(isolate(), instr->depth(), .code();
instr->language_mode()).code(); CallCode(stub, RelocInfo::CODE_TARGET, instr);
CallCode(stub, RelocInfo::CODE_TARGET, instr); } else {
__ Push(Smi::FromInt(slot));
__ Push(instr->name());
__ Push(StoreGlobalViaContextDescriptor::ValueRegister());
__ CallRuntime(is_strict(instr->language_mode())
? Runtime::kStoreGlobalViaContext_Strict
: Runtime::kStoreGlobalViaContext_Sloppy,
3);
}
} }
......
...@@ -1022,6 +1022,11 @@ inline Operand ContextOperand(Register context, int index) { ...@@ -1022,6 +1022,11 @@ inline Operand ContextOperand(Register context, int index) {
} }
inline Operand ContextOperand(Register context, Register index) {
return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
}
inline Operand GlobalObjectOperand() { inline Operand GlobalObjectOperand() {
return ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX); return ContextOperand(esi, Context::GLOBAL_OBJECT_INDEX);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment