Commit 62f3e48d authored by whesse@chromium.org's avatar whesse@chromium.org

Refactor Reference so that SetValue and GetValue pop the reference state.

Review URL: http://codereview.chromium.org/487017

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@3720 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 80d71c64
// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
......@@ -605,14 +605,19 @@ void CodeGenerator::LoadTypeofExpression(Expression* expr) {
}
Reference::Reference(CodeGenerator* cgen, Expression* expression)
: cgen_(cgen), expression_(expression), type_(ILLEGAL) {
Reference::Reference(CodeGenerator* cgen,
Expression* expression,
bool persist_after_get)
: cgen_(cgen),
expression_(expression),
type_(ILLEGAL),
persist_after_get_(persist_after_get) {
cgen->LoadReference(this);
}
Reference::~Reference() {
cgen_->UnloadReference(this);
ASSERT(is_unloaded() || is_illegal());
}
......@@ -661,6 +666,7 @@ void CodeGenerator::UnloadReference(Reference* ref) {
frame_->Drop(size);
frame_->EmitPush(r0);
}
ref->set_unloaded();
}
......@@ -1244,8 +1250,6 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
Reference target(this, node->proxy());
LoadAndSpill(val);
target.SetValue(NOT_CONST_INIT);
// The reference is removed from the stack (preserving TOS) when
// it goes out of scope.
}
// Get rid of the assigned value (declarations are statements).
frame_->Drop();
......@@ -1932,25 +1936,17 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
if (each.size() > 0) {
__ ldr(r0, frame_->ElementAt(each.size()));
frame_->EmitPush(r0);
}
// If the reference was to a slot we rely on the convenient property
// that it doesn't matter whether a value (eg, r3 pushed above) is
// right on top of or right underneath a zero-sized reference.
each.SetValue(NOT_CONST_INIT);
if (each.size() > 0) {
// It's safe to pop the value lying on top of the reference before
// unloading the reference itself (which preserves the top of stack,
// ie, now the topmost value of the non-zero sized reference), since
// we will discard the top of stack after unloading the reference
// anyway.
frame_->EmitPop(r0);
each.SetValue(NOT_CONST_INIT);
frame_->Drop(2);
} else {
// If the reference was to a slot we rely on the convenient property
// that it doesn't matter whether a value (eg, r3 pushed above) is
// right on top of or right underneath a zero-sized reference.
each.SetValue(NOT_CONST_INIT);
frame_->Drop();
}
}
}
// Discard the i'th entry pushed above or else the remainder of the
// reference, whichever is currently on top of the stack.
frame_->Drop();
// Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
VisitAndSpill(node->body());
......@@ -2844,7 +2840,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Assignment");
{ Reference target(this, node->target());
{ Reference target(this, node->target(), node->is_compound());
if (target.is_illegal()) {
// Fool the virtual frame into thinking that we left the assignment's
// value on the frame.
......@@ -2859,8 +2855,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
node->op() == Token::INIT_CONST) {
LoadAndSpill(node->value());
} else {
// +=, *= and similar binary assignments.
} else { // Assignment is a compound assignment.
// Get the old value of the lhs.
target.GetValueAndSpill();
Literal* literal = node->value()->AsLiteral();
......@@ -2881,13 +2876,12 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
frame_->EmitPush(r0);
}
}
Variable* var = node->target()->AsVariableProxy()->AsVariable();
if (var != NULL &&
(var->mode() == Variable::CONST) &&
node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
// Assignment ignored - leave the value on the stack.
UnloadReference(&target);
} else {
CodeForSourcePosition(node->position());
if (node->op() == Token::INIT_CONST) {
......@@ -3097,16 +3091,20 @@ void CodeGenerator::VisitCall(Call* node) {
// JavaScript example: 'array[index](1, 2, 3)'
// -------------------------------------------
// Load the function to call from the property through a reference.
Reference ref(this, property);
ref.GetValueAndSpill(); // receiver
// Pass receiver to called function.
LoadAndSpill(property->obj());
LoadAndSpill(property->key());
EmitKeyedLoad(false);
frame_->Drop(); // key
// Put the function below the receiver.
if (property->is_synthetic()) {
// Use the global receiver.
frame_->Drop();
frame_->EmitPush(r0);
LoadGlobalReceiver(r0);
} else {
__ ldr(r0, frame_->ElementAt(ref.size()));
frame_->EmitPush(r0);
frame_->EmitPop(r1); // receiver
frame_->EmitPush(r0); // function
frame_->EmitPush(r1); // receiver
}
// Call the function.
......@@ -3807,7 +3805,9 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
frame_->EmitPush(r0);
}
{ Reference target(this, node->expression());
// A constant reference is not saved to, so a constant reference is not a
// compound assignment reference.
{ Reference target(this, node->expression(), !is_const);
if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
......@@ -4268,6 +4268,16 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
}
void CodeGenerator::EmitKeyedLoad(bool is_global) {
Comment cmnt(masm_, "[ Load from keyed Property");
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
RelocInfo::Mode rmode = is_global
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET;
frame_->CallCodeObject(ic, rmode, 0);
}
#ifdef DEBUG
bool CodeGenerator::HasValidEntryRegisters() { return true; }
#endif
......@@ -4334,23 +4344,21 @@ void Reference::GetValue() {
case KEYED: {
// TODO(181): Implement inlined version of array indexing once
// loop nesting is properly tracked on ARM.
VirtualFrame* frame = cgen_->frame();
Comment cmnt(masm, "[ Load from keyed Property");
ASSERT(property != NULL);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
Variable* var = expression_->AsVariableProxy()->AsVariable();
ASSERT(var == NULL || var->is_global());
RelocInfo::Mode rmode = (var == NULL)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
frame->CallCodeObject(ic, rmode, 0);
frame->EmitPush(r0);
cgen_->EmitKeyedLoad(var != NULL);
cgen_->frame()->EmitPush(r0);
break;
}
default:
UNREACHABLE();
}
if (!persist_after_get_) {
cgen_->UnloadReference(this);
}
}
......@@ -4412,6 +4420,7 @@ void Reference::SetValue(InitState init_state) {
default:
UNREACHABLE();
}
cgen_->UnloadReference(this);
}
......
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
......@@ -43,57 +43,69 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// Reference support
// A reference is a C++ stack-allocated object that keeps an ECMA
// reference on the execution stack while in scope. For variables
// the reference is empty, indicating that it isn't necessary to
// store state on the stack for keeping track of references to those.
// For properties, we keep either one (named) or two (indexed) values
// on the execution stack to represent the reference.
// A reference is a C++ stack-allocated object that puts a
// reference on the virtual frame. The reference may be consumed
// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
// When the lifetime (scope) of a valid reference ends, it must have
// been consumed, and be in state UNLOADED.
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
Reference(CodeGenerator* cgen, Expression* expression);
enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
Reference(CodeGenerator* cgen,
Expression* expression,
bool persist_after_get = false);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
ASSERT(type_ == ILLEGAL);
ASSERT_EQ(ILLEGAL, type_);
type_ = value;
}
void set_unloaded() {
ASSERT_NE(ILLEGAL, type_);
ASSERT_NE(UNLOADED, type_);
type_ = UNLOADED;
}
// The size the reference takes up on the stack.
int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
int size() const {
return (type_ < SLOT) ? 0 : type_;
}
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
bool is_unloaded() const { return type_ == UNLOADED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
// the expression stack, and it is left in place with its value above it.
// the expression stack, and it is consumed by the call unless the
// reference is for a compound assignment.
// If the reference is not consumed, it is left in place under its value.
void GetValue();
// Generate code to push the value of a reference on top of the expression
// stack and then spill the stack frame. This function is used temporarily
// while the code generator is being transformed.
// Generate code to pop a reference, push the value of the reference,
// and then spill the stack frame.
inline void GetValueAndSpill();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
// on the expression stack. The stored value is left in place (with the
// reference intact below it) to support chained assignments.
// on the expression stack. The value is stored in the location specified
// by the reference, and is left on top of the stack, after the reference
// is popped from beneath it (unloaded).
void SetValue(InitState init_state);
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
// Keep the reference on the stack after get, so it can be used by set later.
bool persist_after_get_;
};
......@@ -274,6 +286,9 @@ class CodeGenerator: public AstVisitor {
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
// Store the value on top of the stack to a slot.
void StoreToSlot(Slot* slot, InitState init_state);
// Load a keyed property, leaving it in r0. The receiver and key are
// passed on the stack, and remain there.
void EmitKeyedLoad(bool is_global);
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
......
// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
......@@ -639,15 +639,22 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
return frame_->Pop();
}
//------------------------------------------------------------------------------
// CodeGenerator implementation of variables, lookups, and stores.
Reference::Reference(CodeGenerator* cgen, Expression* expression)
: cgen_(cgen), expression_(expression), type_(ILLEGAL) {
Reference::Reference(CodeGenerator* cgen,
Expression* expression,
bool persist_after_get)
: cgen_(cgen),
expression_(expression),
type_(ILLEGAL),
persist_after_get_(persist_after_get) {
cgen->LoadReference(this);
}
Reference::~Reference() {
cgen_->UnloadReference(this);
ASSERT(is_unloaded() || is_illegal());
}
......@@ -697,6 +704,7 @@ void CodeGenerator::UnloadReference(Reference* ref) {
// Pop a reference from the stack while preserving TOS.
Comment cmnt(masm_, "[ UnloadReference");
frame_->Nip(ref->size());
ref->set_unloaded();
}
......@@ -2297,20 +2305,29 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
}
void CodeGenerator::CallApplyLazy(Property* apply,
void CodeGenerator::CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position) {
// An optimized implementation of expressions of the form
// x.apply(y, arguments).
// If the arguments object of the scope has not been allocated,
// and x.apply is Function.prototype.apply, this optimization
// just copies y and the arguments of the current function on the
// stack, as receiver and arguments, and calls x.
// In the implementation comments, we call x the applicand
// and y the receiver.
ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
ASSERT(arguments->IsArguments());
JumpTarget slow, done;
// Load the apply function onto the stack. This will usually
// Load applicand.apply onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
Reference ref(this, apply);
ref.GetValue();
ASSERT(ref.type() == Reference::NAMED);
Load(applicand);
Handle<String> name = Factory::LookupAsciiSymbol("apply");
frame()->Push(name);
Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
__ nop();
frame()->Push(&answer);
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
......@@ -2320,6 +2337,11 @@ void CodeGenerator::CallApplyLazy(Property* apply,
// Emit the source position information after having loaded the
// receiver and the arguments.
CodeForSourcePosition(position);
// Contents of frame at this point:
// Frame[0]: arguments object of the current function or the hole.
// Frame[1]: receiver
// Frame[2]: applicand.apply
// Frame[3]: applicand.
// Check if the arguments object has been lazily allocated
// already. If so, just use that instead of copying the arguments
......@@ -2327,143 +2349,151 @@ void CodeGenerator::CallApplyLazy(Property* apply,
// named 'arguments' has been introduced.
frame_->Dup();
Result probe = frame_->Pop();
bool try_lazy = true;
if (probe.is_constant()) {
try_lazy = probe.handle()->IsTheHole();
} else {
__ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
probe.Unuse();
slow.Branch(not_equal);
}
if (try_lazy) {
JumpTarget build_args;
// Get rid of the arguments object probe.
frame_->Drop();
{ VirtualFrame::SpilledScope spilled_scope;
Label slow, done;
bool try_lazy = true;
if (probe.is_constant()) {
try_lazy = probe.handle()->IsTheHole();
} else {
__ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
probe.Unuse();
__ j(not_equal, &slow);
}
// Before messing with the execution stack, we sync all
// elements. This is bound to happen anyway because we're
// about to call a function.
frame_->SyncRange(0, frame_->element_count() - 1);
if (try_lazy) {
Label build_args;
// Get rid of the arguments object probe.
frame_->Drop(); // Can be called on a spilled frame.
// Stack now has 3 elements on it.
// Contents of stack at this point:
// esp[0]: receiver
// esp[1]: applicand.apply
// esp[2]: applicand.
// Check that the receiver really is a JavaScript object.
{ frame_->PushElementAt(0);
Result receiver = frame_->Pop();
receiver.ToRegister();
__ test(receiver.reg(), Immediate(kSmiTagMask));
build_args.Branch(zero);
Result tmp = allocator_->Allocate();
// Check that the receiver really is a JavaScript object.
__ mov(eax, Operand(esp, 0));
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &build_args);
// We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
__ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, tmp.reg());
build_args.Branch(less);
}
// Verify that we're invoking Function.prototype.apply.
{ frame_->PushElementAt(1);
Result apply = frame_->Pop();
apply.ToRegister();
__ test(apply.reg(), Immediate(kSmiTagMask));
build_args.Branch(zero);
Result tmp = allocator_->Allocate();
__ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
build_args.Branch(not_equal);
__ mov(tmp.reg(),
FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
__ j(below, &build_args);
// Check that applicand.apply is Function.prototype.apply.
__ mov(eax, Operand(esp, kPointerSize));
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &build_args);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &build_args);
__ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
__ cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
__ cmp(FieldOperand(ecx, SharedFunctionInfo::kCodeOffset),
Immediate(apply_code));
build_args.Branch(not_equal);
}
// Get the function receiver from the stack. Check that it
// really is a function.
__ mov(edi, Operand(esp, 2 * kPointerSize));
__ test(edi, Immediate(kSmiTagMask));
build_args.Branch(zero);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
build_args.Branch(not_equal);
// Copy the arguments to this function possibly from the
// adaptor frame below it.
Label invoke, adapted;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
__ cmp(Operand(ecx),
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adapted);
// No arguments adaptor frame. Copy fixed number of arguments.
__ mov(eax, Immediate(scope_->num_parameters()));
for (int i = 0; i < scope_->num_parameters(); i++) {
__ push(frame_->ParameterAt(i));
}
__ jmp(&invoke);
// Arguments adaptor frame present. Copy arguments from there, but
// avoid copying too many arguments to avoid stack overflows.
__ bind(&adapted);
static const uint32_t kArgumentsLimit = 1 * KB;
__ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(eax);
__ mov(ecx, Operand(eax));
__ cmp(eax, kArgumentsLimit);
build_args.Branch(above);
// Loop through the arguments pushing them onto the execution
// stack. We don't inform the virtual frame of the push, so we don't
// have to worry about getting rid of the elements from the virtual
// frame.
Label loop;
__ bind(&loop);
__ test(ecx, Operand(ecx));
__ j(zero, &invoke);
__ push(Operand(edx, ecx, times_4, 1 * kPointerSize));
__ dec(ecx);
__ jmp(&loop);
// Invoke the function. The virtual frame knows about the receiver
// so make sure to forget that explicitly.
__ bind(&invoke);
ParameterCount actual(eax);
__ InvokeFunction(edi, actual, CALL_FUNCTION);
frame_->Forget(1);
Result result = allocator()->Allocate(eax);
frame_->SetElementAt(0, &result);
done.Jump();
// Slow-case: Allocate the arguments object since we know it isn't
// there, and fall-through to the slow-case where we call
// Function.prototype.apply.
build_args.Bind();
Result arguments_object = StoreArgumentsObject(false);
frame_->Push(&arguments_object);
slow.Bind();
}
__ j(not_equal, &build_args);
// Check that applicand is a function.
__ mov(edi, Operand(esp, 2 * kPointerSize));
__ test(edi, Immediate(kSmiTagMask));
__ j(zero, &build_args);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &build_args);
// Copy the arguments to this function possibly from the
// adaptor frame below it.
Label invoke, adapted;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
__ cmp(Operand(ecx),
Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adapted);
// No arguments adaptor frame. Copy fixed number of arguments.
__ mov(eax, Immediate(scope_->num_parameters()));
for (int i = 0; i < scope_->num_parameters(); i++) {
__ push(frame_->ParameterAt(i));
}
__ jmp(&invoke);
// Arguments adaptor frame present. Copy arguments from there, but
// avoid copying too many arguments to avoid stack overflows.
__ bind(&adapted);
static const uint32_t kArgumentsLimit = 1 * KB;
__ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(eax);
__ mov(ecx, Operand(eax));
__ cmp(eax, kArgumentsLimit);
__ j(above, &build_args);
// Loop through the arguments pushing them onto the execution
// stack. We don't inform the virtual frame of the push, so we don't
// have to worry about getting rid of the elements from the virtual
// frame.
Label loop;
// ecx is a small non-negative integer, due to the test above.
__ test(ecx, Operand(ecx));
__ j(zero, &invoke);
__ bind(&loop);
__ push(Operand(edx, ecx, times_pointer_size, 1 * kPointerSize));
__ dec(ecx);
__ j(not_zero, &loop);
// Invoke the function.
__ bind(&invoke);
ParameterCount actual(eax);
__ InvokeFunction(edi, actual, CALL_FUNCTION);
// Drop applicand.apply and applicand from the stack, and push
// the result of the function call, but leave the spilled frame
// unchanged, with 3 elements, so it is correct when we compile the
// slow-case code.
__ add(Operand(esp), Immediate(2 * kPointerSize));
__ push(eax);
// Stack now has 1 element:
// esp[0]: result
__ jmp(&done);
// Flip the apply function and the function to call on the stack, so
// the function looks like the receiver of the apply call. This way,
// the generic Function.prototype.apply implementation can deal with
// the call like it usually does.
Result a2 = frame_->Pop();
Result a1 = frame_->Pop();
Result ap = frame_->Pop();
Result fn = frame_->Pop();
frame_->Push(&ap);
frame_->Push(&fn);
frame_->Push(&a1);
frame_->Push(&a2);
CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
Result res = frame_->CallStub(&call_function, 3);
frame_->Push(&res);
// All done. Restore context register after call.
if (try_lazy) done.Bind();
// Slow-case: Allocate the arguments object since we know it isn't
// there, and fall-through to the slow-case where we call
// applicand.apply.
__ bind(&build_args);
// Stack now has 3 elements, because we have jumped from where:
// esp[0]: receiver
// esp[1]: applicand.apply
// esp[2]: applicand.
// StoreArgumentsObject requires a correct frame, and may modify it.
Result arguments_object = StoreArgumentsObject(false);
frame_->SpillAll();
arguments_object.ToRegister();
frame_->EmitPush(arguments_object.reg());
arguments_object.Unuse();
// Stack and frame now have 4 elements.
__ bind(&slow);
}
// Generic computation of x.apply(y, args) with no special optimization.
// Flip applicand.apply and applicand on the stack, so
// applicand looks like the receiver of the applicand.apply call.
// Then process it as a normal function call.
__ mov(eax, Operand(esp, 3 * kPointerSize));
__ mov(ebx, Operand(esp, 2 * kPointerSize));
__ mov(Operand(esp, 2 * kPointerSize), eax);
__ mov(Operand(esp, 3 * kPointerSize), ebx);
CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
Result res = frame_->CallStub(&call_function, 3);
// The function and its two arguments have been dropped.
frame_->Drop(1); // Drop the receiver as well.
res.ToRegister();
frame_->EmitPush(res.reg());
// Stack now has 1 element:
// esp[0]: result
if (try_lazy) __ bind(&done);
} // End of spilled scope.
// Restore the context register after a call.
frame_->RestoreContextRegister();
}
......@@ -3503,17 +3533,13 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
if (!each.is_illegal()) {
if (each.size() > 0) {
frame_->EmitPush(frame_->ElementAt(each.size()));
}
// If the reference was to a slot we rely on the convenient property
// that it doesn't matter whether a value (eg, ebx pushed above) is
// right on top of or right underneath a zero-sized reference.
each.SetValue(NOT_CONST_INIT);
if (each.size() > 0) {
// It's safe to pop the value lying on top of the reference before
// unloading the reference itself (which preserves the top of stack,
// ie, now the topmost value of the non-zero sized reference), since
// we will discard the top of stack after unloading the reference
// anyway.
each.SetValue(NOT_CONST_INIT);
frame_->Drop(2);
} else {
// If the reference was to a slot we rely on the convenient property
// that it doesn't matter whether a value (eg, ebx pushed above) is
// right on top of or right underneath a zero-sized reference.
each.SetValue(NOT_CONST_INIT);
frame_->Drop();
}
}
......@@ -3521,10 +3547,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Unloading a reference may leave the frame in an unspilled state.
frame_->SpillAll();
// Discard the i'th entry pushed above or else the remainder of the
// reference, whichever is currently on top of the stack.
frame_->Drop();
// Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
VisitAndSpill(node->body());
......@@ -4574,9 +4596,12 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
void CodeGenerator::VisitAssignment(Assignment* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
Comment cmnt(masm_, "[ Assignment");
{ Reference target(this, node->target());
{ Reference target(this, node->target(), node->is_compound());
if (target.is_illegal()) {
// Fool the virtual frame into thinking that we left the assignment's
// value on the frame.
......@@ -4598,12 +4623,27 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
frame_->PushElementAt(target.size() - 1);
Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
}
if (node->ends_initialization_block()) {
// Add an extra copy of the receiver to the frame, so that it can be
// converted back to fast case after the assignment.
ASSERT(target.type() == Reference::NAMED ||
target.type() == Reference::KEYED);
if (target.type() == Reference::NAMED) {
frame_->Dup();
// Dup target receiver on stack.
} else {
ASSERT(target.type() == Reference::KEYED);
Result temp = frame_->Pop();
frame_->Dup();
frame_->Push(&temp);
}
}
if (node->op() == Token::ASSIGN ||
node->op() == Token::INIT_VAR ||
node->op() == Token::INIT_CONST) {
Load(node->value());
} else {
} else { // Assignment is a compound assignment.
Literal* literal = node->value()->AsLiteral();
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
......@@ -4629,6 +4669,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
var->mode() == Variable::CONST &&
node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
// Assignment ignored - leave the value on the stack.
UnloadReference(&target);
} else {
CodeForSourcePosition(node->position());
if (node->op() == Token::INIT_CONST) {
......@@ -4640,17 +4681,20 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
target.SetValue(NOT_CONST_INIT);
}
if (node->ends_initialization_block()) {
ASSERT(target.type() == Reference::NAMED ||
target.type() == Reference::KEYED);
ASSERT(target.type() == Reference::UNLOADED);
// End of initialization block. Revert to fast case. The
// argument to the runtime call is the receiver, which is the
// first value pushed as part of the reference, which is below
// the lhs value.
frame_->PushElementAt(target.size());
// argument to the runtime call is the extra copy of the receiver,
// which is below the value of the assignment.
// Swap the receiver and the value of the assignment expression.
Result lhs = frame_->Pop();
Result receiver = frame_->Pop();
frame_->Push(&lhs);
frame_->Push(&receiver);
Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
}
}
}
ASSERT(frame_->height() == original_height + 1);
}
......@@ -4813,7 +4857,7 @@ void CodeGenerator::VisitCall(Call* node) {
args->at(1)->AsVariableProxy()->IsArguments()) {
// Use the optimized Function.prototype.apply that avoids
// allocating lazily allocated arguments objects.
CallApplyLazy(property,
CallApplyLazy(property->obj(),
args->at(0),
args->at(1)->AsVariableProxy(),
node->position());
......@@ -4846,16 +4890,21 @@ void CodeGenerator::VisitCall(Call* node) {
// -------------------------------------------
// Load the function to call from the property through a reference.
Reference ref(this, property);
ref.GetValue();
// Pass receiver to called function.
if (property->is_synthetic()) {
Reference ref(this, property);
ref.GetValue();
// Use global object as receiver.
LoadGlobalReceiver();
} else {
// The reference's size is non-negative.
frame_->PushElementAt(ref.size());
Load(property->obj());
Load(property->key());
Result function = EmitKeyedLoad(false);
frame_->Drop(); // Key.
Result receiver = frame_->Pop();
frame_->Push(&function);
frame_->Push(&receiver);
}
// Call the function.
......@@ -5766,7 +5815,9 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
// value will be in the frame to be spilled.
if (is_postfix) frame_->Push(Smi::FromInt(0));
{ Reference target(this, node->expression());
// A constant reference is not saved to, so a constant reference is not a
// compound assignment reference.
{ Reference target(this, node->expression(), !is_const);
if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
......@@ -6369,6 +6420,114 @@ void DeferredReferenceSetKeyedValue::Generate() {
}
Result CodeGenerator::EmitKeyedLoad(bool is_global) {
Comment cmnt(masm_, "[ Load from keyed Property");
// Inline array load code if inside of a loop. We do not know
// the receiver map yet, so we initially generate the code with
// a check against an invalid map. In the inline cache code, we
// patch the map check if appropriate.
if (loop_nesting() > 0) {
Comment cmnt(masm_, "[ Inlined load from keyed Property");
Result key = frame_->Pop();
Result receiver = frame_->Pop();
key.ToRegister();
receiver.ToRegister();
// Use a fresh temporary to load the elements without destroying
// the receiver which is needed for the deferred slow case.
Result elements = allocator()->Allocate();
ASSERT(elements.is_valid());
// Use a fresh temporary for the index and later the loaded
// value.
Result index = allocator()->Allocate();
ASSERT(index.is_valid());
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue(index.reg(),
receiver.reg(),
key.reg(),
is_global);
// Check that the receiver is not a smi (only needed if this
// is not a load from the global context) and that it has the
// expected map.
if (!is_global) {
__ test(receiver.reg(), Immediate(kSmiTagMask));
deferred->Branch(zero);
}
// Initially, use an invalid map. The map is patched in the IC
// initialization code.
__ bind(deferred->patch_site());
// Use masm-> here instead of the double underscore macro since extra
// coverage code can interfere with the patching.
masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
Immediate(Factory::null_value()));
deferred->Branch(not_equal);
// Check that the key is a smi.
__ test(key.reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
// Get the elements array from the receiver and check that it
// is not a dictionary.
__ mov(elements.reg(),
FieldOperand(receiver.reg(), JSObject::kElementsOffset));
__ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map()));
deferred->Branch(not_equal);
// Shift the key to get the actual index value and check that
// it is within bounds.
__ mov(index.reg(), key.reg());
__ SmiUntag(index.reg());
__ cmp(index.reg(),
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
deferred->Branch(above_equal);
// Load and check that the result is not the hole. We could
// reuse the index or elements register for the value.
//
// TODO(206): Consider whether it makes sense to try some
// heuristic about which register to reuse. For example, if
// one is eax, the we can reuse that one because the value
// coming from the deferred code will be in eax.
Result value = index;
__ mov(value.reg(), Operand(elements.reg(),
index.reg(),
times_4,
FixedArray::kHeaderSize - kHeapObjectTag));
elements.Unuse();
index.Unuse();
__ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
deferred->Branch(equal);
__ IncrementCounter(&Counters::keyed_load_inline, 1);
deferred->BindExit();
// Restore the receiver and key to the frame and push the
// result on top of it.
frame_->Push(&receiver);
frame_->Push(&key);
return value;
} else {
Comment cmnt(masm_, "[ Load from keyed Property");
RelocInfo::Mode mode = is_global
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET;
Result answer = frame_->CallKeyedLoadIC(mode);
// Make sure that we do not have a test instruction after the
// call. A test instruction after the call is used to
// indicate that we have generated an inline version of the
// keyed load. The explicit nop instruction is here because
// the push that follows might be peep-hole optimized away.
__ nop();
return answer;
}
}
#undef __
#define __ ACCESS_MASM(masm)
......@@ -6481,121 +6640,21 @@ void Reference::GetValue() {
}
case KEYED: {
Comment cmnt(masm, "[ Load from keyed Property");
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
// Inline array load code if inside of a loop. We do not know
// the receiver map yet, so we initially generate the code with
// a check against an invalid map. In the inline cache code, we
// patch the map check if appropriate.
if (cgen_->loop_nesting() > 0) {
Comment cmnt(masm, "[ Inlined load from keyed Property");
Result key = cgen_->frame()->Pop();
Result receiver = cgen_->frame()->Pop();
key.ToRegister();
receiver.ToRegister();
// Use a fresh temporary to load the elements without destroying
// the receiver which is needed for the deferred slow case.
Result elements = cgen_->allocator()->Allocate();
ASSERT(elements.is_valid());
// Use a fresh temporary for the index and later the loaded
// value.
Result index = cgen_->allocator()->Allocate();
ASSERT(index.is_valid());
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue(index.reg(),
receiver.reg(),
key.reg(),
is_global);
// Check that the receiver is not a smi (only needed if this
// is not a load from the global context) and that it has the
// expected map.
if (!is_global) {
__ test(receiver.reg(), Immediate(kSmiTagMask));
deferred->Branch(zero);
}
// Initially, use an invalid map. The map is patched in the IC
// initialization code.
__ bind(deferred->patch_site());
// Use masm-> here instead of the double underscore macro since extra
// coverage code can interfere with the patching.
masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
Immediate(Factory::null_value()));
deferred->Branch(not_equal);
// Check that the key is a smi.
__ test(key.reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
// Get the elements array from the receiver and check that it
// is not a dictionary.
__ mov(elements.reg(),
FieldOperand(receiver.reg(), JSObject::kElementsOffset));
__ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map()));
deferred->Branch(not_equal);
// Shift the key to get the actual index value and check that
// it is within bounds.
__ mov(index.reg(), key.reg());
__ SmiUntag(index.reg());
__ cmp(index.reg(),
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
deferred->Branch(above_equal);
// Load and check that the result is not the hole. We could
// reuse the index or elements register for the value.
//
// TODO(206): Consider whether it makes sense to try some
// heuristic about which register to reuse. For example, if
// one is eax, the we can reuse that one because the value
// coming from the deferred code will be in eax.
Result value = index;
__ mov(value.reg(), Operand(elements.reg(),
index.reg(),
times_4,
FixedArray::kHeaderSize - kHeapObjectTag));
elements.Unuse();
index.Unuse();
__ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
deferred->Branch(equal);
__ IncrementCounter(&Counters::keyed_load_inline, 1);
deferred->BindExit();
// Restore the receiver and key to the frame and push the
// result on top of it.
cgen_->frame()->Push(&receiver);
cgen_->frame()->Push(&key);
cgen_->frame()->Push(&value);
} else {
Comment cmnt(masm, "[ Load from keyed Property");
RelocInfo::Mode mode = is_global
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET;
Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
// Make sure that we do not have a test instruction after the
// call. A test instruction after the call is used to
// indicate that we have generated an inline version of the
// keyed load. The explicit nop instruction is here because
// the push that follows might be peep-hole optimized away.
__ nop();
cgen_->frame()->Push(&answer);
}
Result value = cgen_->EmitKeyedLoad(is_global);
cgen_->frame()->Push(&value);
break;
}
default:
UNREACHABLE();
}
if (!persist_after_get_) {
cgen_->UnloadReference(this);
}
}
......@@ -6629,6 +6688,9 @@ void Reference::TakeValue() {
ASSERT(slot->type() == Slot::LOCAL);
cgen_->frame()->TakeLocalAt(slot->index());
}
ASSERT(persist_after_get_);
// Do not unload the reference, because it is used in SetValue.
}
......@@ -6758,6 +6820,7 @@ void Reference::SetValue(InitState init_state) {
default:
UNREACHABLE();
}
cgen_->UnloadReference(this);
}
......
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
......@@ -43,57 +43,70 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// Reference support
// A reference is a C++ stack-allocated object that keeps an ECMA
// reference on the execution stack while in scope. For variables
// the reference is empty, indicating that it isn't necessary to
// store state on the stack for keeping track of references to those.
// For properties, we keep either one (named) or two (indexed) values
// on the execution stack to represent the reference.
// A reference is a C++ stack-allocated object that puts a
// reference on the virtual frame. The reference may be consumed
// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
// When the lifetime (scope) of a valid reference ends, it must have
// been consumed, and be in state UNLOADED.
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
Reference(CodeGenerator* cgen, Expression* expression);
enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
Reference(CodeGenerator* cgen,
Expression* expression,
bool persist_after_get = false);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
ASSERT(type_ == ILLEGAL);
ASSERT_EQ(ILLEGAL, type_);
type_ = value;
}
void set_unloaded() {
ASSERT_NE(ILLEGAL, type_);
ASSERT_NE(UNLOADED, type_);
type_ = UNLOADED;
}
// The size the reference takes up on the stack.
int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
int size() const {
return (type_ < SLOT) ? 0 : type_;
}
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
bool is_unloaded() const { return type_ == UNLOADED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
// the expression stack, and it is left in place with its value above it.
// the expression stack, and it is consumed by the call unless the
// reference is for a compound assignment.
// If the reference is not consumed, it is left in place under its value.
void GetValue();
// Like GetValue except that the slot is expected to be written to before
// being read from again. Thae value of the reference may be invalidated,
// being read from again. The value of the reference may be invalidated,
// causing subsequent attempts to read it to fail.
void TakeValue();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
// on the expression stack. The stored value is left in place (with the
// reference intact below it) to support chained assignments.
// on the expression stack. The value is stored in the location specified
// by the reference, and is left on top of the stack, after the reference
// is popped from beneath it (unloaded).
void SetValue(InitState init_state);
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
// Keep the reference on the stack after get, so it can be used by set later.
bool persist_after_get_;
};
......@@ -420,6 +433,11 @@ class CodeGenerator: public AstVisitor {
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
// Load a property of an object, returning it in a Result.
// The object and the property name are passed on the stack, and
// not changed.
Result EmitKeyedLoad(bool is_global);
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for
......@@ -479,10 +497,10 @@ class CodeGenerator: public AstVisitor {
CallFunctionFlags flags,
int position);
// Use an optimized version of Function.prototype.apply that avoid
// allocating the arguments object and just copies the arguments
// from the stack.
void CallApplyLazy(Property* apply,
// An optimized implementation of expressions of the form
// x.apply(y, arguments). We call x the applicand and y the receiver.
// The optimization avoids allocating an arguments object if possible.
void CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position);
......
// Copyright 2009 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
......@@ -651,20 +651,29 @@ void DeferredReferenceSetKeyedValue::Generate() {
}
void CodeGenerator::CallApplyLazy(Property* apply,
void CodeGenerator::CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position) {
// An optimized implementation of expressions of the form
// x.apply(y, arguments).
// If the arguments object of the scope has not been allocated,
// and x.apply is Function.prototype.apply, this optimization
// just copies y and the arguments of the current function on the
// stack, as receiver and arguments, and calls x.
// In the implementation comments, we call x the applicand
// and y the receiver.
ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
ASSERT(arguments->IsArguments());
JumpTarget slow, done;
// Load the apply function onto the stack. This will usually
// Load applicand.apply onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
Reference ref(this, apply);
ref.GetValue();
ASSERT(ref.type() == Reference::NAMED);
Load(applicand);
Handle<String> name = Factory::LookupAsciiSymbol("apply");
frame()->Push(name);
Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
__ nop();
frame()->Push(&answer);
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
......@@ -674,6 +683,11 @@ void CodeGenerator::CallApplyLazy(Property* apply,
// Emit the source position information after having loaded the
// receiver and the arguments.
CodeForSourcePosition(position);
// Contents of frame at this point:
// Frame[0]: arguments object of the current function or the hole.
// Frame[1]: receiver
// Frame[2]: applicand.apply
// Frame[3]: applicand.
// Check if the arguments object has been lazily allocated
// already. If so, just use that instead of copying the arguments
......@@ -681,143 +695,149 @@ void CodeGenerator::CallApplyLazy(Property* apply,
// named 'arguments' has been introduced.
frame_->Dup();
Result probe = frame_->Pop();
bool try_lazy = true;
if (probe.is_constant()) {
try_lazy = probe.handle()->IsTheHole();
} else {
__ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
probe.Unuse();
slow.Branch(not_equal);
}
if (try_lazy) {
JumpTarget build_args;
// Get rid of the arguments object probe.
frame_->Drop();
// Before messing with the execution stack, we sync all
// elements. This is bound to happen anyway because we're
// about to call a function.
frame_->SyncRange(0, frame_->element_count() - 1);
{ VirtualFrame::SpilledScope spilled_scope;
Label slow, done;
bool try_lazy = true;
if (probe.is_constant()) {
try_lazy = probe.handle()->IsTheHole();
} else {
__ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
probe.Unuse();
__ j(not_equal, &slow);
}
// Check that the receiver really is a JavaScript object.
{
frame_->PushElementAt(0);
Result receiver = frame_->Pop();
receiver.ToRegister();
Condition is_smi = masm_->CheckSmi(receiver.reg());
build_args.Branch(is_smi);
if (try_lazy) {
Label build_args;
// Get rid of the arguments object probe.
frame_->Drop(); // Can be called on a spilled frame.
// Stack now has 3 elements on it.
// Contents of stack at this point:
// rsp[0]: receiver
// rsp[1]: applicand.apply
// rsp[2]: applicand.
// Check that the receiver really is a JavaScript object.
__ movq(rax, Operand(rsp, 0));
Condition is_smi = masm_->CheckSmi(rax);
__ j(is_smi, &build_args);
// We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
__ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
build_args.Branch(below);
}
// Verify that we're invoking Function.prototype.apply.
{
frame_->PushElementAt(1);
Result apply = frame_->Pop();
apply.ToRegister();
Condition is_smi = masm_->CheckSmi(apply.reg());
build_args.Branch(is_smi);
Result tmp = allocator_->Allocate();
__ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
build_args.Branch(not_equal);
__ movq(tmp.reg(),
FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
__ j(below, &build_args);
// Check that applicand.apply is Function.prototype.apply.
__ movq(rax, Operand(rsp, kPointerSize));
is_smi = masm_->CheckSmi(rax);
__ j(is_smi, &build_args);
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &build_args);
__ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
__ Cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
apply_code);
build_args.Branch(not_equal);
}
__ Cmp(FieldOperand(rax, SharedFunctionInfo::kCodeOffset), apply_code);
__ j(not_equal, &build_args);
// Check that applicand is a function.
__ movq(rdi, Operand(rsp, 2 * kPointerSize));
is_smi = masm_->CheckSmi(rdi);
__ j(is_smi, &build_args);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &build_args);
// Copy the arguments to this function possibly from the
// adaptor frame below it.
Label invoke, adapted;
__ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adapted);
// No arguments adaptor frame. Copy fixed number of arguments.
__ movq(rax, Immediate(scope_->num_parameters()));
for (int i = 0; i < scope_->num_parameters(); i++) {
__ push(frame_->ParameterAt(i));
}
__ jmp(&invoke);
// Arguments adaptor frame present. Copy arguments from there, but
// avoid copying too many arguments to avoid stack overflows.
__ bind(&adapted);
static const uint32_t kArgumentsLimit = 1 * KB;
__ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiToInteger32(rax, rax);
__ movq(rcx, rax);
__ cmpq(rax, Immediate(kArgumentsLimit));
__ j(above, &build_args);
// Loop through the arguments pushing them onto the execution
// stack. We don't inform the virtual frame of the push, so we don't
// have to worry about getting rid of the elements from the virtual
// frame.
Label loop;
// rcx is a small non-negative integer, due to the test above.
__ testl(rcx, rcx);
__ j(zero, &invoke);
__ bind(&loop);
__ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
__ decl(rcx);
__ j(not_zero, &loop);
// Invoke the function.
__ bind(&invoke);
ParameterCount actual(rax);
__ InvokeFunction(rdi, actual, CALL_FUNCTION);
// Drop applicand.apply and applicand from the stack, and push
// the result of the function call, but leave the spilled frame
// unchanged, with 3 elements, so it is correct when we compile the
// slow-case code.
__ addq(rsp, Immediate(2 * kPointerSize));
__ push(rax);
// Stack now has 1 element:
// rsp[0]: result
__ jmp(&done);
// Get the function receiver from the stack. Check that it
// really is a function.
__ movq(rdi, Operand(rsp, 2 * kPointerSize));
Condition is_smi = masm_->CheckSmi(rdi);
build_args.Branch(is_smi);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
build_args.Branch(not_equal);
// Copy the arguments to this function possibly from the
// adaptor frame below it.
Label invoke, adapted;
__ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adapted);
// No arguments adaptor frame. Copy fixed number of arguments.
__ movq(rax, Immediate(scope_->num_parameters()));
for (int i = 0; i < scope_->num_parameters(); i++) {
__ push(frame_->ParameterAt(i));
// Slow-case: Allocate the arguments object since we know it isn't
// there, and fall-through to the slow-case where we call
// applicand.apply.
__ bind(&build_args);
// Stack now has 3 elements, because we have jumped from where:
// rsp[0]: receiver
// rsp[1]: applicand.apply
// rsp[2]: applicand.
// StoreArgumentsObject requires a correct frame, and may modify it.
Result arguments_object = StoreArgumentsObject(false);
frame_->SpillAll();
arguments_object.ToRegister();
frame_->EmitPush(arguments_object.reg());
arguments_object.Unuse();
// Stack and frame now have 4 elements.
__ bind(&slow);
}
__ jmp(&invoke);
// Arguments adaptor frame present. Copy arguments from there, but
// avoid copying too many arguments to avoid stack overflows.
__ bind(&adapted);
static const uint32_t kArgumentsLimit = 1 * KB;
__ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiToInteger32(rax, rax);
__ movq(rcx, rax);
__ cmpq(rax, Immediate(kArgumentsLimit));
build_args.Branch(above);
// Loop through the arguments pushing them onto the execution
// stack. We don't inform the virtual frame of the push, so we don't
// have to worry about getting rid of the elements from the virtual
// frame.
Label loop;
__ testl(rcx, rcx);
__ j(zero, &invoke);
__ bind(&loop);
__ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
__ decl(rcx);
__ j(not_zero, &loop);
// Invoke the function. The virtual frame knows about the receiver
// so make sure to forget that explicitly.
__ bind(&invoke);
ParameterCount actual(rax);
__ InvokeFunction(rdi, actual, CALL_FUNCTION);
frame_->Forget(1);
Result result = allocator()->Allocate(rax);
frame_->SetElementAt(0, &result);
done.Jump();
// Slow-case: Allocate the arguments object since we know it isn't
// there, and fall-through to the slow-case where we call
// Function.prototype.apply.
build_args.Bind();
Result arguments_object = StoreArgumentsObject(false);
frame_->Push(&arguments_object);
slow.Bind();
}
// Flip the apply function and the function to call on the stack, so
// the function looks like the receiver of the apply call. This way,
// the generic Function.prototype.apply implementation can deal with
// the call like it usually does.
Result a2 = frame_->Pop();
Result a1 = frame_->Pop();
Result ap = frame_->Pop();
Result fn = frame_->Pop();
frame_->Push(&ap);
frame_->Push(&fn);
frame_->Push(&a1);
frame_->Push(&a2);
CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
Result res = frame_->CallStub(&call_function, 3);
frame_->Push(&res);
// All done. Restore context register after call.
if (try_lazy) done.Bind();
// Generic computation of x.apply(y, args) with no special optimization.
// Flip applicand.apply and applicand on the stack, so
// applicand looks like the receiver of the applicand.apply call.
// Then process it as a normal function call.
__ movq(rax, Operand(rsp, 3 * kPointerSize));
__ movq(rbx, Operand(rsp, 2 * kPointerSize));
__ movq(Operand(rsp, 2 * kPointerSize), rax);
__ movq(Operand(rsp, 3 * kPointerSize), rbx);
CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
Result res = frame_->CallStub(&call_function, 3);
// The function and its two arguments have been dropped.
frame_->Drop(1); // Drop the receiver as well.
res.ToRegister();
frame_->EmitPush(res.reg());
// Stack now has 1 element:
// rsp[0]: result
if (try_lazy) __ bind(&done);
} // End of spilled scope.
// Restore the context register after a call.
frame_->RestoreContextRegister();
}
......@@ -1814,28 +1834,20 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
if (!each.is_illegal()) {
if (each.size() > 0) {
frame_->EmitPush(frame_->ElementAt(each.size()));
}
// If the reference was to a slot we rely on the convenient property
// that it doesn't matter whether a value (eg, ebx pushed above) is
// right on top of or right underneath a zero-sized reference.
each.SetValue(NOT_CONST_INIT);
if (each.size() > 0) {
// It's safe to pop the value lying on top of the reference before
// unloading the reference itself (which preserves the top of stack,
// ie, now the topmost value of the non-zero sized reference), since
// we will discard the top of stack after unloading the reference
// anyway.
frame_->Drop();
each.SetValue(NOT_CONST_INIT);
frame_->Drop(2); // Drop the original and the copy of the element.
} else {
// If the reference has size zero then we can use the value below
// the reference as if it were above the reference, instead of pushing
// a new copy of it above the reference.
each.SetValue(NOT_CONST_INIT);
frame_->Drop(); // Drop the original of the element.
}
}
}
// Unloading a reference may leave the frame in an unspilled state.
frame_->SpillAll();
// Discard the i'th entry pushed above or else the remainder of the
// reference, whichever is currently on top of the stack.
frame_->Drop();
// Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
VisitAndSpill(node->body());
......@@ -2546,7 +2558,7 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
void CodeGenerator::VisitAssignment(Assignment* node) {
Comment cmnt(masm_, "[ Assignment");
{ Reference target(this, node->target());
{ Reference target(this, node->target(), node->is_compound());
if (target.is_illegal()) {
// Fool the virtual frame into thinking that we left the assignment's
// value on the frame.
......@@ -2568,12 +2580,27 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
frame_->PushElementAt(target.size() - 1);
Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
}
if (node->ends_initialization_block()) {
// Add an extra copy of the receiver to the frame, so that it can be
// converted back to fast case after the assignment.
ASSERT(target.type() == Reference::NAMED ||
target.type() == Reference::KEYED);
if (target.type() == Reference::NAMED) {
frame_->Dup();
// Dup target receiver on stack.
} else {
ASSERT(target.type() == Reference::KEYED);
Result temp = frame_->Pop();
frame_->Dup();
frame_->Push(&temp);
}
}
if (node->op() == Token::ASSIGN ||
node->op() == Token::INIT_VAR ||
node->op() == Token::INIT_CONST) {
Load(node->value());
} else {
} else { // Assignment is a compound assignment.
Literal* literal = node->value()->AsLiteral();
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
......@@ -2599,6 +2626,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
var->mode() == Variable::CONST &&
node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
// Assignment ignored - leave the value on the stack.
UnloadReference(&target);
} else {
CodeForSourcePosition(node->position());
if (node->op() == Token::INIT_CONST) {
......@@ -2610,13 +2638,15 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
target.SetValue(NOT_CONST_INIT);
}
if (node->ends_initialization_block()) {
ASSERT(target.type() == Reference::NAMED ||
target.type() == Reference::KEYED);
ASSERT(target.type() == Reference::UNLOADED);
// End of initialization block. Revert to fast case. The
// argument to the runtime call is the receiver, which is the
// first value pushed as part of the reference, which is below
// the lhs value.
frame_->PushElementAt(target.size());
// argument to the runtime call is the extra copy of the receiver,
// which is below the value of the assignment.
// Swap the receiver and the value of the assignment expression.
Result lhs = frame_->Pop();
Result receiver = frame_->Pop();
frame_->Push(&lhs);
frame_->Push(&receiver);
Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
}
}
......@@ -2784,7 +2814,7 @@ void CodeGenerator::VisitCall(Call* node) {
args->at(1)->AsVariableProxy()->IsArguments()) {
// Use the optimized Function.prototype.apply that avoids
// allocating lazily allocated arguments objects.
CallApplyLazy(property,
CallApplyLazy(property->obj(),
args->at(0),
args->at(1)->AsVariableProxy(),
node->position());
......@@ -2816,16 +2846,24 @@ void CodeGenerator::VisitCall(Call* node) {
// -------------------------------------------
// Load the function to call from the property through a reference.
Reference ref(this, property);
ref.GetValue();
// Pass receiver to called function.
if (property->is_synthetic()) {
Reference ref(this, property, false);
ref.GetValue();
// Use global object as receiver.
LoadGlobalReceiver();
} else {
// The reference's size is non-negative.
frame_->PushElementAt(ref.size());
Reference ref(this, property, false);
ASSERT(ref.size() == 2);
Result key = frame_->Pop();
frame_->Dup(); // Duplicate the receiver.
frame_->Push(&key);
ref.GetValue();
// Top of frame contains function to call, with duplicate copy of
// receiver below it. Swap them.
Result function = frame_->Pop();
Result receiver = frame_->Pop();
frame_->Push(&function);
frame_->Push(&receiver);
}
// Call the function.
......@@ -3164,7 +3202,9 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
// value will be in the frame to be spilled.
if (is_postfix) frame_->Push(Smi::FromInt(0));
{ Reference target(this, node->expression());
// A constant reference is not saved to, so the reference is not a
// compound assignment reference.
{ Reference target(this, node->expression(), !is_const);
if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
......@@ -4253,14 +4293,19 @@ bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
//------------------------------------------------------------------------------
// CodeGenerator implementation of variables, lookups, and stores.
Reference::Reference(CodeGenerator* cgen, Expression* expression)
: cgen_(cgen), expression_(expression), type_(ILLEGAL) {
Reference::Reference(CodeGenerator* cgen,
Expression* expression,
bool persist_after_get)
: cgen_(cgen),
expression_(expression),
type_(ILLEGAL),
persist_after_get_(persist_after_get) {
cgen->LoadReference(this);
}
Reference::~Reference() {
cgen_->UnloadReference(this);
ASSERT(is_unloaded() || is_illegal());
}
......@@ -4310,6 +4355,7 @@ void CodeGenerator::UnloadReference(Reference* ref) {
// Pop a reference from the stack while preserving TOS.
Comment cmnt(masm_, "[ UnloadReference");
frame_->Nip(ref->size());
ref->set_unloaded();
}
......@@ -5656,6 +5702,120 @@ Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
}
Result CodeGenerator::EmitKeyedLoad(bool is_global) {
Comment cmnt(masm_, "[ Load from keyed Property");
// Inline array load code if inside of a loop. We do not know
// the receiver map yet, so we initially generate the code with
// a check against an invalid map. In the inline cache code, we
// patch the map check if appropriate.
if (loop_nesting() > 0) {
Comment cmnt(masm_, "[ Inlined load from keyed Property");
Result key = frame_->Pop();
Result receiver = frame_->Pop();
key.ToRegister();
receiver.ToRegister();
// Use a fresh temporary to load the elements without destroying
// the receiver which is needed for the deferred slow case.
Result elements = allocator()->Allocate();
ASSERT(elements.is_valid());
// Use a fresh temporary for the index and later the loaded
// value.
Result index = allocator()->Allocate();
ASSERT(index.is_valid());
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue(index.reg(),
receiver.reg(),
key.reg(),
is_global);
// Check that the receiver is not a smi (only needed if this
// is not a load from the global context) and that it has the
// expected map.
if (!is_global) {
__ JumpIfSmi(receiver.reg(), deferred->entry_label());
}
// Initially, use an invalid map. The map is patched in the IC
// initialization code.
__ bind(deferred->patch_site());
// Use masm-> here instead of the double underscore macro since extra
// coverage code can interfere with the patching. Do not use
// root array to load null_value, since it must be patched with
// the expected receiver map.
masm_->movq(kScratchRegister, Factory::null_value(),
RelocInfo::EMBEDDED_OBJECT);
masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
kScratchRegister);
deferred->Branch(not_equal);
// Check that the key is a non-negative smi.
__ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
// Get the elements array from the receiver and check that it
// is not a dictionary.
__ movq(elements.reg(),
FieldOperand(receiver.reg(), JSObject::kElementsOffset));
__ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
Factory::fixed_array_map());
deferred->Branch(not_equal);
// Shift the key to get the actual index value and check that
// it is within bounds.
__ SmiToInteger32(index.reg(), key.reg());
__ cmpl(index.reg(),
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
deferred->Branch(above_equal);
// The index register holds the un-smi-tagged key. It has been
// zero-extended to 64-bits, so it can be used directly as index in the
// operand below.
// Load and check that the result is not the hole. We could
// reuse the index or elements register for the value.
//
// TODO(206): Consider whether it makes sense to try some
// heuristic about which register to reuse. For example, if
// one is rax, the we can reuse that one because the value
// coming from the deferred code will be in rax.
Result value = index;
__ movq(value.reg(),
Operand(elements.reg(),
index.reg(),
times_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
elements.Unuse();
index.Unuse();
__ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
deferred->Branch(equal);
__ IncrementCounter(&Counters::keyed_load_inline, 1);
deferred->BindExit();
// Restore the receiver and key to the frame and push the
// result on top of it.
frame_->Push(&receiver);
frame_->Push(&key);
return value;
} else {
Comment cmnt(masm_, "[ Load from keyed Property");
RelocInfo::Mode mode = is_global
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET;
Result answer = frame_->CallKeyedLoadIC(mode);
// Make sure that we do not have a test instruction after the
// call. A test instruction after the call is used to
// indicate that we have generated an inline version of the
// keyed load. The explicit nop instruction is here because
// the push that follows might be peep-hole optimized away.
__ nop();
return answer;
}
}
#undef __
#define __ ACCESS_MASM(masm)
......@@ -5785,119 +5945,18 @@ void Reference::GetValue() {
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
// Inline array load code if inside of a loop. We do not know
// the receiver map yet, so we initially generate the code with
// a check against an invalid map. In the inline cache code, we
// patch the map check if appropriate.
if (cgen_->loop_nesting() > 0) {
Comment cmnt(masm, "[ Inlined load from keyed Property");
Result key = cgen_->frame()->Pop();
Result receiver = cgen_->frame()->Pop();
key.ToRegister();
receiver.ToRegister();
// Use a fresh temporary to load the elements without destroying
// the receiver which is needed for the deferred slow case.
Result elements = cgen_->allocator()->Allocate();
ASSERT(elements.is_valid());
// Use a fresh temporary for the index and later the loaded
// value.
Result index = cgen_->allocator()->Allocate();
ASSERT(index.is_valid());
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue(index.reg(),
receiver.reg(),
key.reg(),
is_global);
// Check that the receiver is not a smi (only needed if this
// is not a load from the global context) and that it has the
// expected map.
if (!is_global) {
__ JumpIfSmi(receiver.reg(), deferred->entry_label());
}
// Initially, use an invalid map. The map is patched in the IC
// initialization code.
__ bind(deferred->patch_site());
// Use masm-> here instead of the double underscore macro since extra
// coverage code can interfere with the patching.
masm->movq(kScratchRegister, Factory::null_value(),
RelocInfo::EMBEDDED_OBJECT);
masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
kScratchRegister);
deferred->Branch(not_equal);
// Check that the key is a non-negative smi.
__ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
// Get the elements array from the receiver and check that it
// is not a dictionary.
__ movq(elements.reg(),
FieldOperand(receiver.reg(), JSObject::kElementsOffset));
__ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
Factory::fixed_array_map());
deferred->Branch(not_equal);
// Shift the key to get the actual index value and check that
// it is within bounds.
__ SmiToInteger32(index.reg(), key.reg());
__ cmpl(index.reg(),
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
deferred->Branch(above_equal);
// The index register holds the un-smi-tagged key. It has been
// zero-extended to 64-bits, so it can be used directly as index in the
// operand below.
// Load and check that the result is not the hole. We could
// reuse the index or elements register for the value.
//
// TODO(206): Consider whether it makes sense to try some
// heuristic about which register to reuse. For example, if
// one is rax, the we can reuse that one because the value
// coming from the deferred code will be in rax.
Result value = index;
__ movq(value.reg(),
Operand(elements.reg(),
index.reg(),
times_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
elements.Unuse();
index.Unuse();
__ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
deferred->Branch(equal);
__ IncrementCounter(&Counters::keyed_load_inline, 1);
deferred->BindExit();
// Restore the receiver and key to the frame and push the
// result on top of it.
cgen_->frame()->Push(&receiver);
cgen_->frame()->Push(&key);
cgen_->frame()->Push(&value);
} else {
Comment cmnt(masm, "[ Load from keyed Property");
RelocInfo::Mode mode = is_global
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET;
Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
// Make sure that we do not have a test instruction after the
// call. A test instruction after the call is used to
// indicate that we have generated an inline version of the
// keyed load. The explicit nop instruction is here because
// the push that follows might be peep-hole optimized away.
__ nop();
cgen_->frame()->Push(&answer);
}
Result value = cgen_->EmitKeyedLoad(is_global);
cgen_->frame()->Push(&value);
break;
}
default:
UNREACHABLE();
}
if (!persist_after_get_) {
cgen_->UnloadReference(this);
}
}
......@@ -5934,6 +5993,9 @@ void Reference::TakeValue() {
ASSERT(slot->type() == Slot::LOCAL);
cgen_->frame()->TakeLocalAt(slot->index());
}
ASSERT(persist_after_get_);
// Do not unload the reference, because it is used in SetValue.
}
......@@ -6062,6 +6124,7 @@ void Reference::SetValue(InitState init_state) {
default:
UNREACHABLE();
}
cgen_->UnloadReference(this);
}
......
// Copyright 2009 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
......@@ -43,57 +43,70 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// Reference support
// A reference is a C++ stack-allocated object that keeps an ECMA
// reference on the execution stack while in scope. For variables
// the reference is empty, indicating that it isn't necessary to
// store state on the stack for keeping track of references to those.
// For properties, we keep either one (named) or two (indexed) values
// on the execution stack to represent the reference.
// A reference is a C++ stack-allocated object that puts a
// reference on the virtual frame. The reference may be consumed
// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
// When the lifetime (scope) of a valid reference ends, it must have
// been consumed, and be in state UNLOADED.
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
Reference(CodeGenerator* cgen, Expression* expression);
enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
Reference(CodeGenerator* cgen,
Expression* expression,
bool persist_after_get = false);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
ASSERT(type_ == ILLEGAL);
ASSERT_EQ(ILLEGAL, type_);
type_ = value;
}
void set_unloaded() {
ASSERT_NE(ILLEGAL, type_);
ASSERT_NE(UNLOADED, type_);
type_ = UNLOADED;
}
// The size the reference takes up on the stack.
int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
int size() const {
return (type_ < SLOT) ? 0 : type_;
}
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
bool is_unloaded() const { return type_ == UNLOADED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
// the expression stack, and it is left in place with its value above it.
// the expression stack, and it is consumed by the call unless the
// reference is for a compound assignment.
// If the reference is not consumed, it is left in place under its value.
void GetValue();
// Like GetValue except that the slot is expected to be written to before
// being read from again. Thae value of the reference may be invalidated,
// being read from again. The value of the reference may be invalidated,
// causing subsequent attempts to read it to fail.
void TakeValue();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
// on the expression stack. The stored value is left in place (with the
// reference intact below it) to support chained assignments.
// on the expression stack. The value is stored in the location specified
// by the reference, and is left on top of the stack, after the reference
// is popped from beneath it (unloaded).
void SetValue(InitState init_state);
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
bool persist_after_get_;
};
......@@ -422,6 +435,11 @@ class CodeGenerator: public AstVisitor {
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
// Load a property of an object, returning it in a Result.
// The object and the property name are passed on the stack, and
// not changed.
Result EmitKeyedLoad(bool is_global);
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for
......@@ -478,10 +496,10 @@ class CodeGenerator: public AstVisitor {
CallFunctionFlags flags,
int position);
// Use an optimized version of Function.prototype.apply that avoid
// allocating the arguments object and just copies the arguments
// from the stack.
void CallApplyLazy(Property* apply,
// An optimized implementation of expressions of the form
// x.apply(y, arguments). We call x the applicand and y the receiver.
// The optimization avoids allocating an arguments object if possible.
void CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment