Commit 44ab0e15 authored by palfia@homejinni.com's avatar palfia@homejinni.com

MIPS: Convert FastNewClosureStub into hydrogen.

Port r16356 (0a0ea300)

BUG=

Review URL: https://codereview.chromium.org/23618007

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@16371 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent a7be0472
......@@ -39,6 +39,17 @@ namespace v8 {
namespace internal {
void FastNewClosureStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a2 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
}
void ToNumberStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
......@@ -310,134 +321,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in cp.
Counters* counters = masm->isolate()->counters();
Label gc;
// Pop the function info from the stack.
__ pop(a3);
// Attempt to allocate new JSFunction in new space.
__ Allocate(JSFunction::kSize, v0, a1, a2, &gc, TAG_OBJECT);
__ IncrementCounter(counters->fast_new_closure_total(), 1, t2, t3);
int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
// Compute the function map in the current native context and set that
// as the map of the allocated object.
__ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
__ lw(t1, MemOperand(a2, Context::SlotOffset(map_index)));
__ sw(t1, FieldMemOperand(v0, HeapObject::kMapOffset));
// Initialize the rest of the function. We don't have to update the
// write barrier because the allocated object is in new space.
__ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
__ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
__ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
__ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
__ sw(t1, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
__ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
__ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
__ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
// But first check if there is an optimized version for our context.
Label check_optimized;
Label install_unoptimized;
if (FLAG_cache_optimized_code) {
__ lw(a1,
FieldMemOperand(a3, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ And(at, a1, a1);
__ Branch(&check_optimized, ne, at, Operand(zero_reg));
}
__ bind(&install_unoptimized);
__ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
__ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
__ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
__ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
// Return result. The argument function info has been popped already.
__ Ret(USE_DELAY_SLOT);
__ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
__ bind(&check_optimized);
__ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, t2, t3);
// a2 holds native context, a1 points to fixed array of 3-element entries
// (native context, optimized code, literals).
// The optimized code map must never be empty, so check the first elements.
Label install_optimized;
// Speculatively move code object into t0.
__ lw(t0, FieldMemOperand(a1, SharedFunctionInfo::kFirstCodeSlot));
__ lw(t1, FieldMemOperand(a1, SharedFunctionInfo::kFirstContextSlot));
__ Branch(&install_optimized, eq, a2, Operand(t1));
// Iterate through the rest of map backwards. t0 holds an index as a Smi.
Label loop;
__ lw(t0, FieldMemOperand(a1, FixedArray::kLengthOffset));
__ bind(&loop);
// Do not double check first entry.
__ Branch(&install_unoptimized, eq, t0,
Operand(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex)));
__ Subu(t0, t0, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
__ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t1, t1, Operand(at));
__ lw(t1, MemOperand(t1));
__ Branch(&loop, ne, a2, Operand(t1));
// Hit: fetch the optimized code.
__ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t1, t1, Operand(at));
__ Addu(t1, t1, Operand(kPointerSize));
__ lw(t0, MemOperand(t1));
__ bind(&install_optimized);
__ IncrementCounter(counters->fast_new_closure_install_optimized(),
1, t2, t3);
// TODO(fschneider): Idea: store proper code pointers in the map and either
// unmangle them on marking or do nothing as the whole map is discarded on
// major GC anyway.
__ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ sw(t0, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
// Now link a function into a list of optimized functions.
__ lw(t0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST));
__ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
// No need for write barrier as JSFunction (eax) is in the new space.
__ sw(v0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST));
// Store JSFunction (eax) into edx before issuing write barrier as
// it clobbers all the registers passed.
__ mov(t0, v0);
__ RecordWriteContextSlot(
a2,
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
t0,
a1,
kRAHasNotBeenSaved,
kDontSaveFPRegs);
// Return result. The argument function info has been popped already.
__ Ret();
// Create a new closure through the slower runtime call.
__ bind(&gc);
__ LoadRoot(t0, Heap::kFalseValueRootIndex);
__ Push(cp, a3, t0);
__ TailCallRuntime(Runtime::kNewClosure, 3, 1);
}
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
......
......@@ -1333,8 +1333,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
scope()->is_function_scope() &&
info->num_literals() == 0) {
FastNewClosureStub stub(info->language_mode(), info->is_generator());
__ li(a0, Operand(info));
__ push(a0);
__ li(a2, Operand(info));
__ CallStub(&stub);
} else {
__ li(a0, Operand(info));
......
......@@ -4067,6 +4067,16 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
}
void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
Register function = ToRegister(instr->function());
Register code_object = ToRegister(instr->code_object());
__ Addu(code_object, code_object,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ sw(code_object,
FieldMemOperand(function, JSFunction::kCodeEntryOffset));
}
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
......@@ -5407,8 +5417,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
if (!pretenure && instr->hydrogen()->has_no_literals()) {
FastNewClosureStub stub(instr->hydrogen()->language_mode(),
instr->hydrogen()->is_generator());
__ li(a1, Operand(instr->hydrogen()->shared_info()));
__ push(a1);
__ li(a2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ li(a2, Operand(instr->hydrogen()->shared_info()));
......
......@@ -265,6 +265,14 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
function()->PrintTo(stream);
stream->Add(".code_entry = ");
code_object()->PrintTo(stream);
}
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
......@@ -1079,6 +1087,14 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
LInstruction* LChunkBuilder::DoStoreCodeEntry(
HStoreCodeEntry* store_code_entry) {
LOperand* function = UseRegister(store_code_entry->function());
LOperand* code_object = UseTempRegister(store_code_entry->code_object());
return new(zone()) LStoreCodeEntry(function, code_object);
}
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
HInnerAllocatedObject* inner_object) {
LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
......
......@@ -161,6 +161,7 @@ class LCodeGen;
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
......@@ -1731,7 +1732,24 @@ class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
class LInnerAllocatedObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
public:
LStoreCodeEntry(LOperand* function, LOperand* code_object) {
inputs_[0] = function;
temps_[0] = code_object;
}
LOperand* function() { return inputs_[0]; }
LOperand* code_object() { return temps_[0]; }
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
};
class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInnerAllocatedObject(LOperand* base_object) {
inputs_[0] = base_object;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment