Convert FastNewClosureStub into hydrogen.

BUG=
R=mstarzinger@chromium.org

Review URL: https://codereview.chromium.org/22562002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@16356 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent dc6a16d6
......@@ -38,6 +38,17 @@ namespace v8 {
namespace internal {
void FastNewClosureStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r2 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
}
void ToNumberStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
......@@ -309,134 +320,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in cp.
Counters* counters = masm->isolate()->counters();
Label gc;
// Pop the function info from the stack.
__ pop(r3);
// Attempt to allocate new JSFunction in new space.
__ Allocate(JSFunction::kSize, r0, r1, r2, &gc, TAG_OBJECT);
__ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7);
int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
// Compute the function map in the current native context and set that
// as the map of the allocated object.
__ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
__ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index)));
__ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset));
// Initialize the rest of the function. We don't have to update the
// write barrier because the allocated object is in new space.
__ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
__ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
__ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
__ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
__ str(r5, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
__ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
__ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
__ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
// But first check if there is an optimized version for our context.
Label check_optimized;
Label install_unoptimized;
if (FLAG_cache_optimized_code) {
__ ldr(r1,
FieldMemOperand(r3, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ tst(r1, r1);
__ b(ne, &check_optimized);
}
__ bind(&install_unoptimized);
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
__ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
// Return result. The argument function info has been popped already.
__ Ret();
__ bind(&check_optimized);
__ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7);
// r2 holds native context, r1 points to fixed array of 3-element entries
// (native context, optimized code, literals).
// The optimized code map must never be empty, so check the first elements.
Label install_optimized;
// Speculatively move code object into r4.
__ ldr(r4, FieldMemOperand(r1, SharedFunctionInfo::kFirstCodeSlot));
__ ldr(r5, FieldMemOperand(r1, SharedFunctionInfo::kFirstContextSlot));
__ cmp(r2, r5);
__ b(eq, &install_optimized);
// Iterate through the rest of map backwards. r4 holds an index as a Smi.
Label loop;
__ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset));
__ bind(&loop);
// Do not double check first entry.
__ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex)));
__ b(eq, &install_unoptimized);
__ sub(r4, r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
__ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4));
__ ldr(r5, MemOperand(r5));
__ cmp(r2, r5);
__ b(ne, &loop);
// Hit: fetch the optimized code.
__ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4));
__ add(r5, r5, Operand(kPointerSize));
__ ldr(r4, MemOperand(r5));
__ bind(&install_optimized);
__ IncrementCounter(counters->fast_new_closure_install_optimized(),
1, r6, r7);
// TODO(fschneider): Idea: store proper code pointers in the map and either
// unmangle them on marking or do nothing as the whole map is discarded on
// major GC anyway.
__ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
// Now link a function into a list of optimized functions.
__ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
__ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
// No need for write barrier as JSFunction (eax) is in the new space.
__ str(r0, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
// Store JSFunction (eax) into edx before issuing write barrier as
// it clobbers all the registers passed.
__ mov(r4, r0);
__ RecordWriteContextSlot(
r2,
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
r4,
r1,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
// Return result. The argument function info has been popped already.
__ Ret();
// Create a new closure through the slower runtime call.
__ bind(&gc);
__ LoadRoot(r4, Heap::kFalseValueRootIndex);
__ Push(cp, r3, r4);
__ TailCallRuntime(Runtime::kNewClosure, 3, 1);
}
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
......
......@@ -1330,8 +1330,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
scope()->is_function_scope() &&
info->num_literals() == 0) {
FastNewClosureStub stub(info->language_mode(), info->is_generator());
__ mov(r0, Operand(info));
__ push(r0);
__ mov(r2, Operand(info));
__ CallStub(&stub);
} else {
__ mov(r0, Operand(info));
......
......@@ -260,6 +260,14 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
function()->PrintTo(stream);
stream->Add(".code_entry = ");
code_object()->PrintTo(stream);
}
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
......@@ -1079,6 +1087,14 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
LInstruction* LChunkBuilder::DoStoreCodeEntry(
HStoreCodeEntry* store_code_entry) {
LOperand* function = UseRegister(store_code_entry->function());
LOperand* code_object = UseTempRegister(store_code_entry->code_object());
return new(zone()) LStoreCodeEntry(function, code_object);
}
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
HInnerAllocatedObject* inner_object) {
LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
......
......@@ -162,6 +162,7 @@ class LCodeGen;
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
......@@ -1753,7 +1754,24 @@ class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
class LInnerAllocatedObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
public:
LStoreCodeEntry(LOperand* function, LOperand* code_object) {
inputs_[0] = function;
temps_[0] = code_object;
}
LOperand* function() { return inputs_[0]; }
LOperand* code_object() { return temps_[0]; }
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
};
class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInnerAllocatedObject(LOperand* base_object) {
inputs_[0] = base_object;
......
......@@ -4146,6 +4146,15 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
}
void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
Register function = ToRegister(instr->function());
Register code_object = ToRegister(instr->code_object());
__ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
__ str(code_object,
FieldMemOperand(function, JSFunction::kCodeEntryOffset));
}
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
......@@ -5417,8 +5426,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
if (!pretenure && instr->hydrogen()->has_no_literals()) {
FastNewClosureStub stub(instr->hydrogen()->language_mode(),
instr->hydrogen()->is_generator());
__ mov(r1, Operand(instr->hydrogen()->shared_info()));
__ push(r1);
__ mov(r2, Operand(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ mov(r2, Operand(instr->hydrogen()->shared_info()));
......
......@@ -112,6 +112,13 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HValue* BuildInternalArrayConstructor(ElementsKind kind,
ArgumentClass argument_class);
void BuildInstallOptimizedCode(HValue* js_function, HValue* native_context,
HValue* code_object);
void BuildInstallCode(HValue* js_function, HValue* shared_info);
void BuildInstallFromOptimizedCodeMap(HValue* js_function,
HValue* shared_info,
HValue* native_context);
private:
HValue* BuildArraySingleArgumentConstructor(JSArrayBuilder* builder);
HValue* BuildArrayNArgumentsConstructor(JSArrayBuilder* builder,
......@@ -904,4 +911,194 @@ Handle<Code> ElementsTransitionAndStoreStub::GenerateCode() {
}
void CodeStubGraphBuilderBase::BuildInstallOptimizedCode(
HValue* js_function,
HValue* native_context,
HValue* code_object) {
Counters* counters = isolate()->counters();
AddIncrementCounter(counters->fast_new_closure_install_optimized(),
context());
// TODO(fschneider): Idea: store proper code pointers in the optimized code
// map and either unmangle them on marking or do nothing as the whole map is
// discarded on major GC anyway.
Add<HStoreCodeEntry>(js_function, code_object);
// Now link a function into a list of optimized functions.
HValue* optimized_functions_list = Add<HLoadNamedField>(native_context,
HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST));
Add<HStoreNamedField>(js_function,
HObjectAccess::ForNextFunctionLinkPointer(),
optimized_functions_list);
// This store is the only one that should have a write barrier.
Add<HStoreNamedField>(native_context,
HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST),
js_function);
}
void CodeStubGraphBuilderBase::BuildInstallCode(HValue* js_function,
HValue* shared_info) {
Add<HStoreNamedField>(js_function,
HObjectAccess::ForNextFunctionLinkPointer(),
graph()->GetConstantUndefined());
HValue* code_object = Add<HLoadNamedField>(shared_info,
HObjectAccess::ForCodeOffset());
Add<HStoreCodeEntry>(js_function, code_object);
}
void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
HValue* js_function,
HValue* shared_info,
HValue* native_context) {
Counters* counters = isolate()->counters();
IfBuilder is_optimized(this);
HInstruction* optimized_map = Add<HLoadNamedField>(shared_info,
HObjectAccess::ForOptimizedCodeMap());
HValue* null_constant = Add<HConstant>(0);
is_optimized.If<HCompareObjectEqAndBranch>(optimized_map, null_constant);
is_optimized.Then();
{
BuildInstallCode(js_function, shared_info);
}
is_optimized.Else();
{
AddIncrementCounter(counters->fast_new_closure_try_optimized(), context());
// optimized_map points to fixed array of 3-element entries
// (native context, optimized code, literals).
// Map must never be empty, so check the first elements.
Label install_optimized;
HValue* first_context_slot = Add<HLoadNamedField>(optimized_map,
HObjectAccess::ForFirstContextSlot());
IfBuilder already_in(this);
already_in.If<HCompareObjectEqAndBranch>(native_context,
first_context_slot);
already_in.Then();
{
HValue* code_object = Add<HLoadNamedField>(optimized_map,
HObjectAccess::ForFirstCodeSlot());
BuildInstallOptimizedCode(js_function, native_context, code_object);
}
already_in.Else();
{
HValue* shared_function_entry_length =
Add<HConstant>(SharedFunctionInfo::kEntryLength);
LoopBuilder loop_builder(this,
context(),
LoopBuilder::kPostDecrement,
shared_function_entry_length);
HValue* array_length = Add<HLoadNamedField>(optimized_map,
HObjectAccess::ForFixedArrayLength());
HValue* key = loop_builder.BeginBody(array_length,
graph()->GetConstant0(),
Token::GT);
{
// Iterate through the rest of map backwards.
// Do not double check first entry.
HValue* second_entry_index =
Add<HConstant>(SharedFunctionInfo::kSecondEntryIndex);
IfBuilder restore_check(this);
restore_check.If<HCompareNumericAndBranch>(key, second_entry_index,
Token::EQ);
restore_check.Then();
{
// Store the unoptimized code
BuildInstallCode(js_function, shared_info);
loop_builder.Break();
}
restore_check.Else();
{
HValue* keyed_minus = AddInstruction(HSub::New(zone(), context(), key,
shared_function_entry_length));
HInstruction* keyed_lookup = Add<HLoadKeyed>(optimized_map,
keyed_minus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
IfBuilder done_check(this);
done_check.If<HCompareObjectEqAndBranch>(native_context,
keyed_lookup);
done_check.Then();
{
// Hit: fetch the optimized code.
HValue* keyed_plus = AddInstruction(HAdd::New(zone(), context(),
keyed_minus, graph()->GetConstant1()));
HValue* code_object = Add<HLoadKeyed>(optimized_map,
keyed_plus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
BuildInstallOptimizedCode(js_function, native_context, code_object);
// Fall out of the loop
loop_builder.Break();
}
done_check.Else();
done_check.End();
}
restore_check.End();
}
loop_builder.EndBody();
}
already_in.End();
}
is_optimized.End();
}
template<>
HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
Counters* counters = isolate()->counters();
Factory* factory = isolate()->factory();
HInstruction* empty_fixed_array =
Add<HConstant>(factory->empty_fixed_array());
HValue* shared_info = GetParameter(0);
// Create a new closure from the given function info in new space
HValue* size = Add<HConstant>(JSFunction::kSize);
HInstruction* js_function = Add<HAllocate>(size, HType::JSObject(),
NOT_TENURED, JS_FUNCTION_TYPE);
AddIncrementCounter(counters->fast_new_closure_total(), context());
int map_index = Context::FunctionMapIndex(casted_stub()->language_mode(),
casted_stub()->is_generator());
// Compute the function map in the current native context and set that
// as the map of the allocated object.
HInstruction* native_context = BuildGetNativeContext();
HInstruction* map_slot_value = Add<HLoadNamedField>(native_context,
HObjectAccess::ForContextSlot(map_index));
Add<HStoreNamedField>(js_function, HObjectAccess::ForMap(), map_slot_value);
// Initialize the rest of the function.
Add<HStoreNamedField>(js_function, HObjectAccess::ForPropertiesPointer(),
empty_fixed_array);
Add<HStoreNamedField>(js_function, HObjectAccess::ForElementsPointer(),
empty_fixed_array);
Add<HStoreNamedField>(js_function, HObjectAccess::ForLiteralsPointer(),
empty_fixed_array);
Add<HStoreNamedField>(js_function, HObjectAccess::ForPrototypeOrInitialMap(),
graph()->GetConstantHole());
Add<HStoreNamedField>(js_function,
HObjectAccess::ForSharedFunctionInfoPointer(),
shared_info);
Add<HStoreNamedField>(js_function, HObjectAccess::ForFunctionContextPointer(),
shared_info);
Add<HStoreNamedField>(js_function, HObjectAccess::ForFunctionContextPointer(),
context());
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
// But first check if there is an optimized version for our context.
if (FLAG_cache_optimized_code) {
BuildInstallFromOptimizedCodeMap(js_function, shared_info, native_context);
} else {
BuildInstallCode(js_function, shared_info);
}
return js_function;
}
Handle<Code> FastNewClosureStub::GenerateCode() {
return DoGenerateCode(this);
}
} } // namespace v8::internal
......@@ -759,6 +759,12 @@ void ArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) {
}
void FastNewClosureStub::InstallDescriptors(Isolate* isolate) {
FastNewClosureStub stub(STRICT_MODE, false);
InstallDescriptor(isolate, &stub);
}
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
: argument_count_(ANY) {
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
......
......@@ -489,20 +489,29 @@ class ToNumberStub: public HydrogenCodeStub {
};
class FastNewClosureStub : public PlatformCodeStub {
class FastNewClosureStub : public HydrogenCodeStub {
public:
explicit FastNewClosureStub(LanguageMode language_mode, bool is_generator)
: language_mode_(language_mode),
is_generator_(is_generator) { }
void Generate(MacroAssembler* masm);
virtual Handle<Code> GenerateCode();
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
static void InstallDescriptors(Isolate* isolate);
LanguageMode language_mode() const { return language_mode_; }
bool is_generator() const { return is_generator_; }
private:
class StrictModeBits: public BitField<bool, 0, 1> {};
class IsGeneratorBits: public BitField<bool, 1, 1> {};
Major MajorKey() { return FastNewClosure; }
int MinorKey() {
int NotMissMinorKey() {
return StrictModeBits::encode(language_mode_ != CLASSIC_MODE) |
IsGeneratorBits::encode(is_generator_);
}
......
......@@ -4045,6 +4045,15 @@ HObjectAccess HObjectAccess::ForJSObjectOffset(int offset,
}
HObjectAccess HObjectAccess::ForContextSlot(int index) {
ASSERT(index >= 0);
Portion portion = kInobject;
int offset = Context::kHeaderSize + index * kPointerSize;
ASSERT_EQ(offset, Context::SlotOffset(index) + kHeapObjectTag);
return HObjectAccess(portion, offset, Representation::Tagged());
}
HObjectAccess HObjectAccess::ForJSArrayOffset(int offset) {
ASSERT(offset >= 0);
Portion portion = kInobject;
......
......@@ -164,6 +164,7 @@ class LChunkBuilder;
V(Shr) \
V(Simulate) \
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
......@@ -5200,7 +5201,33 @@ class HAllocate V8_FINAL : public HTemplateInstruction<2> {
};
class HInnerAllocatedObject V8_FINAL : public HTemplateInstruction<1> {
class HStoreCodeEntry V8_FINAL: public HTemplateInstruction<2> {
public:
static HStoreCodeEntry* New(Zone* zone,
HValue* context,
HValue* function,
HValue* code) {
return new(zone) HStoreCodeEntry(function, code);
}
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
HValue* function() { return OperandAt(0); }
HValue* code_object() { return OperandAt(1); }
DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry)
private:
HStoreCodeEntry(HValue* function, HValue* code) {
SetOperandAt(0, function);
SetOperandAt(1, code);
}
};
class HInnerAllocatedObject V8_FINAL: public HTemplateInstruction<1> {
public:
static HInnerAllocatedObject* New(Zone* zone,
HValue* context,
......@@ -5509,6 +5536,14 @@ class HObjectAccess V8_FINAL {
return HObjectAccess(kElementsPointer, JSObject::kElementsOffset);
}
static HObjectAccess ForLiteralsPointer() {
return HObjectAccess(kInobject, JSFunction::kLiteralsOffset);
}
static HObjectAccess ForNextFunctionLinkPointer() {
return HObjectAccess(kInobject, JSFunction::kNextFunctionLinkOffset);
}
static HObjectAccess ForArrayLength(ElementsKind elements_kind) {
return HObjectAccess(
kArrayLengths,
......@@ -5553,6 +5588,35 @@ class HObjectAccess V8_FINAL {
return HObjectAccess(kInobject, JSFunction::kPrototypeOrInitialMapOffset);
}
static HObjectAccess ForSharedFunctionInfoPointer() {
return HObjectAccess(kInobject, JSFunction::kSharedFunctionInfoOffset);
}
static HObjectAccess ForCodeEntryPointer() {
return HObjectAccess(kInobject, JSFunction::kCodeEntryOffset);
}
static HObjectAccess ForCodeOffset() {
return HObjectAccess(kInobject, SharedFunctionInfo::kCodeOffset);
}
static HObjectAccess ForFirstCodeSlot() {
return HObjectAccess(kInobject, SharedFunctionInfo::kFirstCodeSlot);
}
static HObjectAccess ForFirstContextSlot() {
return HObjectAccess(kInobject, SharedFunctionInfo::kFirstContextSlot);
}
static HObjectAccess ForOptimizedCodeMap() {
return HObjectAccess(kInobject,
SharedFunctionInfo::kOptimizedCodeMapOffset);
}
static HObjectAccess ForFunctionContextPointer() {
return HObjectAccess(kInobject, JSFunction::kContextOffset);
}
static HObjectAccess ForMap() {
return HObjectAccess(kMaps, JSObject::kMapOffset);
}
......@@ -5583,6 +5647,8 @@ class HObjectAccess V8_FINAL {
// Create an access to an in-object property in a JSArray.
static HObjectAccess ForJSArrayOffset(int offset);
static HObjectAccess ForContextSlot(int index);
// Create an access to the backing store of an object.
static HObjectAccess ForBackingStoreOffset(int offset,
Representation representation = Representation::Tagged());
......
......@@ -828,7 +828,6 @@ void HGraphBuilder::IfBuilder::Else() {
ASSERT(!captured_);
ASSERT(!finished_);
last_true_block_ = builder_->current_block();
ASSERT(first_true_block_ == NULL || !last_true_block_->IsFinished());
builder_->set_current_block(first_false_block_);
did_else_ = true;
}
......@@ -864,9 +863,11 @@ void HGraphBuilder::IfBuilder::End() {
if (!did_else_) {
last_true_block_ = builder_->current_block();
}
if (first_true_block_ == NULL) {
if (last_true_block_ == NULL || last_true_block_->IsFinished()) {
ASSERT(did_else_);
// Return on true. Nothing to do, just continue the false block.
} else if (first_false_block_ == NULL) {
} else if (first_false_block_ == NULL ||
(did_else_ && builder_->current_block()->IsFinished())) {
// Deopt on false. Nothing to do except switching to the true block.
builder_->set_current_block(last_true_block_);
} else {
......@@ -906,6 +907,24 @@ HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder,
header_block_ = builder->CreateLoopHeaderBlock();
body_block_ = NULL;
exit_block_ = NULL;
exit_trampoline_block_ = NULL;
increment_amount_ = builder_->graph()->GetConstant1();
}
HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder,
HValue* context,
LoopBuilder::Direction direction,
HValue* increment_amount)
: builder_(builder),
context_(context),
direction_(direction),
finished_(false) {
header_block_ = builder->CreateLoopHeaderBlock();
body_block_ = NULL;
exit_block_ = NULL;
exit_trampoline_block_ = NULL;
increment_amount_ = increment_amount;
}
......@@ -921,12 +940,14 @@ HValue* HGraphBuilder::LoopBuilder::BeginBody(
HEnvironment* body_env = env->Copy();
HEnvironment* exit_env = env->Copy();
body_block_ = builder_->CreateBasicBlock(body_env);
exit_block_ = builder_->CreateBasicBlock(exit_env);
// Remove the phi from the expression stack
body_env->Pop();
exit_env->Pop();
body_block_ = builder_->CreateBasicBlock(body_env);
exit_block_ = builder_->CreateBasicBlock(exit_env);
builder_->set_current_block(header_block_);
env->Pop();
HCompareNumericAndBranch* compare =
new(zone()) HCompareNumericAndBranch(phi_, terminating, token);
compare->SetSuccessorAt(0, body_block_);
......@@ -950,15 +971,26 @@ HValue* HGraphBuilder::LoopBuilder::BeginBody(
}
void HGraphBuilder::LoopBuilder::Break() {
if (exit_trampoline_block_ == NULL) {
// Its the first time we saw a break.
HEnvironment* env = exit_block_->last_environment()->Copy();
exit_trampoline_block_ = builder_->CreateBasicBlock(env);
exit_block_->GotoNoSimulate(exit_trampoline_block_);
}
builder_->current_block()->GotoNoSimulate(exit_trampoline_block_);
}
void HGraphBuilder::LoopBuilder::EndBody() {
ASSERT(!finished_);
if (direction_ == kPostIncrement || direction_ == kPostDecrement) {
HValue* one = builder_->graph()->GetConstant1();
if (direction_ == kPostIncrement) {
increment_ = HAdd::New(zone(), context_, phi_, one);
increment_ = HAdd::New(zone(), context_, phi_, increment_amount_);
} else {
increment_ = HSub::New(zone(), context_, phi_, one);
increment_ = HSub::New(zone(), context_, phi_, increment_amount_);
}
increment_->ClearFlag(HValue::kCanOverflow);
builder_->AddInstruction(increment_);
......@@ -970,9 +1002,11 @@ void HGraphBuilder::LoopBuilder::EndBody() {
last_block->GotoNoSimulate(header_block_);
header_block_->loop_information()->RegisterBackEdge(last_block);
builder_->set_current_block(exit_block_);
// Pop the phi from the expression stack
builder_->environment()->Pop();
if (exit_trampoline_block_ != NULL) {
builder_->set_current_block(exit_trampoline_block_);
} else {
builder_->set_current_block(exit_block_);
}
finished_ = true;
}
......
......@@ -1412,6 +1412,11 @@ class HGraphBuilder {
LoopBuilder(HGraphBuilder* builder,
HValue* context,
Direction direction);
LoopBuilder(HGraphBuilder* builder,
HValue* context,
Direction direction,
HValue* increment_amount);
~LoopBuilder() {
ASSERT(finished_);
}
......@@ -1420,6 +1425,9 @@ class HGraphBuilder {
HValue* initial,
HValue* terminating,
Token::Value token);
void Break();
void EndBody();
private:
......@@ -1427,11 +1435,13 @@ class HGraphBuilder {
HGraphBuilder* builder_;
HValue* context_;
HValue* increment_amount_;
HInstruction* increment_;
HPhi* phi_;
HBasicBlock* header_block_;
HBasicBlock* body_block_;
HBasicBlock* exit_block_;
HBasicBlock* exit_trampoline_block_;
Direction direction_;
bool finished_;
};
......
......@@ -43,6 +43,17 @@ namespace v8 {
namespace internal {
void FastNewClosureStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { ebx };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
}
void ToNumberStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
......@@ -299,133 +310,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in esi.
Counters* counters = masm->isolate()->counters();
Label gc;
__ Allocate(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
__ IncrementCounter(counters->fast_new_closure_total(), 1);
// Get the function info from the stack.
__ mov(edx, Operand(esp, 1 * kPointerSize));
int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
// Compute the function map in the current native context and set that
// as the map of the allocated object.
__ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ mov(ecx, FieldOperand(ecx, GlobalObject::kNativeContextOffset));
__ mov(ebx, Operand(ecx, Context::SlotOffset(map_index)));
__ mov(FieldOperand(eax, JSObject::kMapOffset), ebx);
// Initialize the rest of the function. We don't have to update the
// write barrier because the allocated object is in new space.
Factory* factory = masm->isolate()->factory();
__ mov(ebx, Immediate(factory->empty_fixed_array()));
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
__ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
Immediate(factory->the_hole_value()));
__ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
__ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
__ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
// But first check if there is an optimized version for our context.
Label check_optimized;
Label install_unoptimized;
if (FLAG_cache_optimized_code) {
__ mov(ebx, FieldOperand(edx, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ test(ebx, ebx);
__ j(not_zero, &check_optimized, Label::kNear);
}
__ bind(&install_unoptimized);
__ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
Immediate(factory->undefined_value()));
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
__ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
// Return and remove the on-stack parameter.
__ ret(1 * kPointerSize);
__ bind(&check_optimized);
__ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
// ecx holds native context, ebx points to fixed array of 3-element entries
// (native context, optimized code, literals).
// Map must never be empty, so check the first elements.
Label install_optimized;
// Speculatively move code object into edx.
__ mov(edx, FieldOperand(ebx, SharedFunctionInfo::kFirstCodeSlot));
__ cmp(ecx, FieldOperand(ebx, SharedFunctionInfo::kFirstContextSlot));
__ j(equal, &install_optimized);
// Iterate through the rest of map backwards. edx holds an index as a Smi.
Label loop;
Label restore;
__ mov(edx, FieldOperand(ebx, FixedArray::kLengthOffset));
__ bind(&loop);
// Do not double check first entry.
__ cmp(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex)));
__ j(equal, &restore);
__ sub(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
__ cmp(ecx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 0));
__ j(not_equal, &loop, Label::kNear);
// Hit: fetch the optimized code.
__ mov(edx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 1));
__ bind(&install_optimized);
__ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
// TODO(fschneider): Idea: store proper code pointers in the optimized code
// map and either unmangle them on marking or do nothing as the whole map is
// discarded on major GC anyway.
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
__ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
// Now link a function into a list of optimized functions.
__ mov(edx, ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST));
__ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset), edx);
// No need for write barrier as JSFunction (eax) is in the new space.
__ mov(ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST), eax);
// Store JSFunction (eax) into edx before issuing write barrier as
// it clobbers all the registers passed.
__ mov(edx, eax);
__ RecordWriteContextSlot(
ecx,
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
edx,
ebx,
kDontSaveFPRegs);
// Return and remove the on-stack parameter.
__ ret(1 * kPointerSize);
__ bind(&restore);
// Restore SharedFunctionInfo into edx.
__ mov(edx, Operand(esp, 1 * kPointerSize));
__ jmp(&install_unoptimized);
// Create a new closure through the slower runtime call.
__ bind(&gc);
__ pop(ecx); // Temporarily remove return address.
__ pop(edx);
__ push(esi);
__ push(edx);
__ push(Immediate(factory->false_value()));
__ push(ecx); // Restore return address.
__ TailCallRuntime(Runtime::kNewClosure, 3, 1);
}
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
......
......@@ -1268,7 +1268,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
scope()->is_function_scope() &&
info->num_literals() == 0) {
FastNewClosureStub stub(info->language_mode(), info->is_generator());
__ push(Immediate(info));
__ mov(ebx, Immediate(info));
__ CallStub(&stub);
} else {
__ push(esi);
......
......@@ -4349,6 +4349,14 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
}
void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
Register function = ToRegister(instr->function());
Register code_object = ToRegister(instr->code_object());
__ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
__ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
}
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
......@@ -6209,7 +6217,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
if (!pretenure && instr->hydrogen()->has_no_literals()) {
FastNewClosureStub stub(instr->hydrogen()->language_mode(),
instr->hydrogen()->is_generator());
__ push(Immediate(instr->hydrogen()->shared_info()));
__ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ push(esi);
......
......@@ -290,6 +290,14 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
function()->PrintTo(stream);
stream->Add(".code_entry = ");
code_object()->PrintTo(stream);
}
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
......@@ -1150,6 +1158,14 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
LInstruction* LChunkBuilder::DoStoreCodeEntry(
HStoreCodeEntry* store_code_entry) {
LOperand* function = UseRegister(store_code_entry->function());
LOperand* code_object = UseTempRegister(store_code_entry->code_object());
return new(zone()) LStoreCodeEntry(function, code_object);
}
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
HInnerAllocatedObject* inner_object) {
LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
......
......@@ -161,6 +161,7 @@ class LCodeGen;
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
......@@ -1777,7 +1778,24 @@ class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
class LInnerAllocatedObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
public:
LStoreCodeEntry(LOperand* function, LOperand* code_object) {
inputs_[0] = function;
temps_[0] = code_object;
}
LOperand* function() { return inputs_[0]; }
LOperand* code_object() { return temps_[0]; }
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
};
class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInnerAllocatedObject(LOperand* base_object) {
inputs_[0] = base_object;
......
......@@ -2325,6 +2325,7 @@ bool Isolate::Init(Deserializer* des) {
ToBooleanStub::InitializeForIsolate(this);
ArrayConstructorStubBase::InstallDescriptors(this);
InternalArrayConstructorStubBase::InstallDescriptors(this);
FastNewClosureStub::InstallDescriptors(this);
}
if (FLAG_concurrent_recompilation) optimizing_compiler_thread_.Start();
......
......@@ -7951,6 +7951,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosureFromStubFailure) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
Handle<Context> context(isolate->context());
PretenureFlag pretenure_flag = NOT_TENURED;
Handle<JSFunction> result =
isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
context,
pretenure_flag);
return *result;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) {
HandleScope scope(isolate);
ASSERT(args.length() == 3);
......
......@@ -395,6 +395,7 @@ namespace internal {
\
/* Statements */ \
F(NewClosure, 3, 1) \
F(NewClosureFromStubFailure, 1, 1) \
F(NewObject, 1, 1) \
F(NewObjectFromBound, 1, 1) \
F(FinalizeInstanceSize, 1, 1) \
......
......@@ -39,6 +39,17 @@ namespace v8 {
namespace internal {
void FastNewClosureStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { rbx };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
}
void ToNumberStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
......@@ -295,141 +306,6 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
}
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Create a new closure from the given function info in new
// space. Set the context to the current context in rsi.
Counters* counters = masm->isolate()->counters();
Label gc;
__ Allocate(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
__ IncrementCounter(counters->fast_new_closure_total(), 1);
// Get the function info from the stack.
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ movq(rdx, args.GetArgumentOperand(0));
int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
// Compute the function map in the current native context and set that
// as the map of the allocated object.
__ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
__ movq(rbx, Operand(rcx, Context::SlotOffset(map_index)));
__ movq(FieldOperand(rax, JSObject::kMapOffset), rbx);
// Initialize the rest of the function. We don't have to update the
// write barrier because the allocated object is in new space.
__ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
__ LoadRoot(r8, Heap::kTheHoleValueRootIndex);
__ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
__ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
__ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
__ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), r8);
__ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
__ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
__ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
// But first check if there is an optimized version for our context.
Label check_optimized;
Label install_unoptimized;
if (FLAG_cache_optimized_code) {
__ movq(rbx,
FieldOperand(rdx, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ testq(rbx, rbx);
__ j(not_zero, &check_optimized, Label::kNear);
}
__ bind(&install_unoptimized);
__ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset),
rdi); // Initialize with undefined.
__ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
__ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
__ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
// Return and remove the on-stack parameter.
__ ret(1 * kPointerSize);
__ bind(&check_optimized);
__ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
// rcx holds native context, rbx points to fixed array of 3-element entries
// (native context, optimized code, literals).
// The optimized code map must never be empty, so check the first elements.
Label install_optimized;
// Speculatively move code object into edx.
__ movq(rdx, FieldOperand(rbx, SharedFunctionInfo::kFirstCodeSlot));
__ cmpq(rcx, FieldOperand(rbx, SharedFunctionInfo::kFirstContextSlot));
__ j(equal, &install_optimized);
// Iterate through the rest of map backwards. rdx holds an index.
Label loop;
Label restore;
__ movq(rdx, FieldOperand(rbx, FixedArray::kLengthOffset));
__ SmiToInteger32(rdx, rdx);
__ bind(&loop);
// Do not double check first entry.
__ cmpq(rdx, Immediate(SharedFunctionInfo::kSecondEntryIndex));
__ j(equal, &restore);
__ subq(rdx, Immediate(SharedFunctionInfo::kEntryLength));
__ cmpq(rcx, FieldOperand(rbx,
rdx,
times_pointer_size,
FixedArray::kHeaderSize));
__ j(not_equal, &loop, Label::kNear);
// Hit: fetch the optimized code.
__ movq(rdx, FieldOperand(rbx,
rdx,
times_pointer_size,
FixedArray::kHeaderSize + 1 * kPointerSize));
__ bind(&install_optimized);
__ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
// TODO(fschneider): Idea: store proper code pointers in the map and either
// unmangle them on marking or do nothing as the whole map is discarded on
// major GC anyway.
__ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
__ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
// Now link a function into a list of optimized functions.
__ movq(rdx, ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST));
__ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdx);
// No need for write barrier as JSFunction (rax) is in the new space.
__ movq(ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST), rax);
// Store JSFunction (rax) into rdx before issuing write barrier as
// it clobbers all the registers passed.
__ movq(rdx, rax);
__ RecordWriteContextSlot(
rcx,
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
rdx,
rbx,
kDontSaveFPRegs);
// Return and remove the on-stack parameter.
__ ret(1 * kPointerSize);
__ bind(&restore);
__ movq(rdx, args.GetArgumentOperand(0));
__ jmp(&install_unoptimized);
// Create a new closure through the slower runtime call.
__ bind(&gc);
__ PopReturnAddressTo(rcx);
__ pop(rdx);
__ push(rsi);
__ push(rdx);
__ PushRoot(Heap::kFalseValueRootIndex);
__ PushReturnAddressFrom(rcx);
__ TailCallRuntime(Runtime::kNewClosure, 3, 1);
}
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
......
......@@ -1292,7 +1292,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
scope()->is_function_scope() &&
info->num_literals() == 0) {
FastNewClosureStub stub(info->language_mode(), info->is_generator());
__ Push(info);
__ Move(rbx, info);
__ CallStub(&stub);
} else {
__ push(rsi);
......
......@@ -3906,6 +3906,14 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
}
void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
Register function = ToRegister(instr->function());
Register code_object = ToRegister(instr->code_object());
__ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
__ movq(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
}
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
......@@ -5167,7 +5175,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
if (!pretenure && instr->hydrogen()->has_no_literals()) {
FastNewClosureStub stub(instr->hydrogen()->language_mode(),
instr->hydrogen()->is_generator());
__ Push(instr->hydrogen()->shared_info());
__ Move(rbx, instr->hydrogen()->shared_info());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ push(rsi);
......
......@@ -263,6 +263,14 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
}
void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
function()->PrintTo(stream);
stream->Add(".code_entry = ");
code_object()->PrintTo(stream);
}
void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
stream->Add(" = ");
base_object()->PrintTo(stream);
......@@ -1083,6 +1091,14 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
}
LInstruction* LChunkBuilder::DoStoreCodeEntry(
HStoreCodeEntry* store_code_entry) {
LOperand* function = UseRegister(store_code_entry->function());
LOperand* code_object = UseTempRegister(store_code_entry->code_object());
return new(zone()) LStoreCodeEntry(function, code_object);
}
LInstruction* LChunkBuilder::DoInnerAllocatedObject(
HInnerAllocatedObject* inner_object) {
LOperand* base_object = UseRegisterAtStart(inner_object->base_object());
......
......@@ -160,6 +160,7 @@ class LCodeGen;
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
......@@ -1693,7 +1694,24 @@ class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
};
class LInnerAllocatedObject V8_FINAL : public LTemplateInstruction<1, 1, 0> {
class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 1, 1> {
public:
LStoreCodeEntry(LOperand* function, LOperand* code_object) {
inputs_[0] = function;
temps_[0] = code_object;
}
LOperand* function() { return inputs_[0]; }
LOperand* code_object() { return temps_[0]; }
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
};
class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInnerAllocatedObject(LOperand* base_object) {
inputs_[0] = base_object;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment