Simplify the processing of deferred code in the code generator. Our

deferred code snippets are highly stylized.  They always make a call
to a stub or the runtime and then return.  This change takes advantage
of that.

Creating a deferred code object now captures a snapshot of the
registers in the virtual frame.  The registers are automatically saved
on entry to the deferred code and restored on exit.

The clients of deferred code must ensure that there is no change to
the registers in the virtual frame (eg, by allocating which can cause
spilling) or to the stack pointer.  That is currently the case.

As a separate change, I will add either code to verify this constraint
or else code to forbid any frame effect.

The deferred code itself does not use the virtual frame or register
allocator (or even the code generator).  It is raw macro assembler
code.
Review URL: http://codereview.chromium.org/118226

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2112 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 1e55c821
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM_CODEGEN_ARM_INL_H_
#define V8_ARM_CODEGEN_ARM_INL_H_
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
// Platform-specific inline functions.
void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ b(cc, &entry_label_); }
#undef __
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_INL_H_
...@@ -41,6 +41,34 @@ namespace internal { ...@@ -41,6 +41,34 @@ namespace internal {
#define __ ACCESS_MASM(masm_) #define __ ACCESS_MASM(masm_)
// -------------------------------------------------------------------------
// Platform-specific DeferredCode functions.
void DeferredCode::SaveRegisters() {
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
int action = registers_[i];
if (action == kPush) {
__ push(RegisterAllocator::ToRegister(i));
} else if (action != kIgnore && (action & kSyncedFlag) == 0) {
__ str(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
}
}
}
void DeferredCode::RestoreRegisters() {
// Restore registers in reverse order due to the stack.
for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
int action = registers_[i];
if (action == kPush) {
__ pop(RegisterAllocator::ToRegister(i));
} else if (action != kIgnore) {
action &= ~kSyncedFlag;
__ ldr(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
}
}
}
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// CodeGenState implementation. // CodeGenState implementation.
...@@ -776,23 +804,14 @@ class DeferredInlineSmiOperation: public DeferredCode { ...@@ -776,23 +804,14 @@ class DeferredInlineSmiOperation: public DeferredCode {
}; };
#undef __
#define __ ACCESS_MASM(masm)
void DeferredInlineSmiOperation::Generate() { void DeferredInlineSmiOperation::Generate() {
MacroAssembler* masm = cgen()->masm();
enter()->Bind();
VirtualFrame::SpilledScope spilled_scope;
switch (op_) { switch (op_) {
case Token::ADD: { case Token::ADD: {
// Revert optimistic add.
if (reversed_) { if (reversed_) {
// revert optimistic add
__ sub(r0, r0, Operand(Smi::FromInt(value_))); __ sub(r0, r0, Operand(Smi::FromInt(value_)));
__ mov(r1, Operand(Smi::FromInt(value_))); __ mov(r1, Operand(Smi::FromInt(value_)));
} else { } else {
// revert optimistic add
__ sub(r1, r0, Operand(Smi::FromInt(value_))); __ sub(r1, r0, Operand(Smi::FromInt(value_)));
__ mov(r0, Operand(Smi::FromInt(value_))); __ mov(r0, Operand(Smi::FromInt(value_)));
} }
...@@ -800,8 +819,8 @@ void DeferredInlineSmiOperation::Generate() { ...@@ -800,8 +819,8 @@ void DeferredInlineSmiOperation::Generate() {
} }
case Token::SUB: { case Token::SUB: {
// Revert optimistic sub.
if (reversed_) { if (reversed_) {
// revert optimistic sub
__ rsb(r0, r0, Operand(Smi::FromInt(value_))); __ rsb(r0, r0, Operand(Smi::FromInt(value_)));
__ mov(r1, Operand(Smi::FromInt(value_))); __ mov(r1, Operand(Smi::FromInt(value_)));
} else { } else {
...@@ -830,31 +849,22 @@ void DeferredInlineSmiOperation::Generate() { ...@@ -830,31 +849,22 @@ void DeferredInlineSmiOperation::Generate() {
__ mov(r1, Operand(r0)); __ mov(r1, Operand(r0));
__ mov(r0, Operand(Smi::FromInt(value_))); __ mov(r0, Operand(Smi::FromInt(value_)));
} else { } else {
UNREACHABLE(); // should have been handled in SmiOperation UNREACHABLE(); // Should have been handled in SmiOperation.
} }
break; break;
} }
default: default:
// other cases should have been handled before this point. // Other cases should have been handled before this point.
UNREACHABLE(); UNREACHABLE();
break; break;
} }
GenericBinaryOpStub igostub(op_, overwrite_mode_); GenericBinaryOpStub stub(op_, overwrite_mode_);
Result arg0 = cgen()->allocator()->Allocate(r1); __ CallStub(&stub);
ASSERT(arg0.is_valid());
Result arg1 = cgen()->allocator()->Allocate(r0);
ASSERT(arg1.is_valid());
cgen()->frame()->CallStub(&igostub, &arg0, &arg1);
exit_.Jump();
} }
#undef __
#define __ ACCESS_MASM(masm_)
void CodeGenerator::SmiOperation(Token::Value op, void CodeGenerator::SmiOperation(Token::Value op,
Handle<Object> value, Handle<Object> value,
bool reversed, bool reversed,
...@@ -877,28 +887,28 @@ void CodeGenerator::SmiOperation(Token::Value op, ...@@ -877,28 +887,28 @@ void CodeGenerator::SmiOperation(Token::Value op,
switch (op) { switch (op) {
case Token::ADD: { case Token::ADD: {
DeferredCode* deferred = DeferredCode* deferred =
new DeferredInlineSmiOperation(op, int_value, reversed, mode); new DeferredInlineSmiOperation(op, int_value, reversed, mode);
__ add(r0, r0, Operand(value), SetCC); __ add(r0, r0, Operand(value), SetCC);
deferred->enter()->Branch(vs); deferred->Branch(vs);
__ tst(r0, Operand(kSmiTagMask)); __ tst(r0, Operand(kSmiTagMask));
deferred->enter()->Branch(ne); deferred->Branch(ne);
deferred->BindExit(); deferred->BindExit();
break; break;
} }
case Token::SUB: { case Token::SUB: {
DeferredCode* deferred = DeferredCode* deferred =
new DeferredInlineSmiOperation(op, int_value, reversed, mode); new DeferredInlineSmiOperation(op, int_value, reversed, mode);
if (!reversed) { if (reversed) {
__ sub(r0, r0, Operand(value), SetCC);
} else {
__ rsb(r0, r0, Operand(value), SetCC); __ rsb(r0, r0, Operand(value), SetCC);
} else {
__ sub(r0, r0, Operand(value), SetCC);
} }
deferred->enter()->Branch(vs); deferred->Branch(vs);
__ tst(r0, Operand(kSmiTagMask)); __ tst(r0, Operand(kSmiTagMask));
deferred->enter()->Branch(ne); deferred->Branch(ne);
deferred->BindExit(); deferred->BindExit();
break; break;
} }
...@@ -909,7 +919,7 @@ void CodeGenerator::SmiOperation(Token::Value op, ...@@ -909,7 +919,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
DeferredCode* deferred = DeferredCode* deferred =
new DeferredInlineSmiOperation(op, int_value, reversed, mode); new DeferredInlineSmiOperation(op, int_value, reversed, mode);
__ tst(r0, Operand(kSmiTagMask)); __ tst(r0, Operand(kSmiTagMask));
deferred->enter()->Branch(ne); deferred->Branch(ne);
switch (op) { switch (op) {
case Token::BIT_OR: __ orr(r0, r0, Operand(value)); break; case Token::BIT_OR: __ orr(r0, r0, Operand(value)); break;
case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break; case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break;
...@@ -934,14 +944,14 @@ void CodeGenerator::SmiOperation(Token::Value op, ...@@ -934,14 +944,14 @@ void CodeGenerator::SmiOperation(Token::Value op,
DeferredCode* deferred = DeferredCode* deferred =
new DeferredInlineSmiOperation(op, shift_value, false, mode); new DeferredInlineSmiOperation(op, shift_value, false, mode);
__ tst(r0, Operand(kSmiTagMask)); __ tst(r0, Operand(kSmiTagMask));
deferred->enter()->Branch(ne); deferred->Branch(ne);
__ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
switch (op) { switch (op) {
case Token::SHL: { case Token::SHL: {
__ mov(r2, Operand(r2, LSL, shift_value)); __ mov(r2, Operand(r2, LSL, shift_value));
// check that the *unsigned* result fits in a smi // check that the *unsigned* result fits in a smi
__ add(r3, r2, Operand(0x40000000), SetCC); __ add(r3, r2, Operand(0x40000000), SetCC);
deferred->enter()->Branch(mi); deferred->Branch(mi);
break; break;
} }
case Token::SHR: { case Token::SHR: {
...@@ -956,7 +966,7 @@ void CodeGenerator::SmiOperation(Token::Value op, ...@@ -956,7 +966,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
// smi tagging these two cases can only happen with shifts // smi tagging these two cases can only happen with shifts
// by 0 or 1 when handed a valid smi // by 0 or 1 when handed a valid smi
__ and_(r3, r2, Operand(0xc0000000), SetCC); __ and_(r3, r2, Operand(0xc0000000), SetCC);
deferred->enter()->Branch(ne); deferred->Branch(ne);
break; break;
} }
case Token::SAR: { case Token::SAR: {
...@@ -2670,40 +2680,25 @@ class DeferredObjectLiteral: public DeferredCode { ...@@ -2670,40 +2680,25 @@ class DeferredObjectLiteral: public DeferredCode {
}; };
#undef __
#define __ ACCESS_MASM(masm)
void DeferredObjectLiteral::Generate() { void DeferredObjectLiteral::Generate() {
MacroAssembler* masm = cgen()->masm();
// Argument is passed in r1. // Argument is passed in r1.
enter()->Bind();
VirtualFrame::SpilledScope spilled_scope;
// If the entry is undefined we call the runtime system to compute // If the entry is undefined we call the runtime system to compute
// the literal. // the literal.
VirtualFrame* frame = cgen()->frame();
// Literal array (0). // Literal array (0).
frame->EmitPush(r1); __ push(r1);
// Literal index (1). // Literal index (1).
__ mov(r0, Operand(Smi::FromInt(node_->literal_index()))); __ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
frame->EmitPush(r0); __ push(r0);
// Constant properties (2). // Constant properties (2).
__ mov(r0, Operand(node_->constant_properties())); __ mov(r0, Operand(node_->constant_properties()));
frame->EmitPush(r0); __ push(r0);
Result boilerplate = __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
frame->CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3); __ mov(r2, Operand(r0));
__ mov(r2, Operand(boilerplate.reg()));
// Result is returned in r2. // Result is returned in r2.
exit_.Jump();
} }
#undef __
#define __ ACCESS_MASM(masm_)
void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
#ifdef DEBUG #ifdef DEBUG
int original_height = frame_->height(); int original_height = frame_->height();
...@@ -2729,7 +2724,7 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { ...@@ -2729,7 +2724,7 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
// Check whether we need to materialize the object literal boilerplate. // Check whether we need to materialize the object literal boilerplate.
// If so, jump to the deferred code. // If so, jump to the deferred code.
__ cmp(r2, Operand(Factory::undefined_value())); __ cmp(r2, Operand(Factory::undefined_value()));
deferred->enter()->Branch(eq); deferred->Branch(eq);
deferred->BindExit(); deferred->BindExit();
// Push the object literal boilerplate. // Push the object literal boilerplate.
...@@ -2807,40 +2802,25 @@ class DeferredArrayLiteral: public DeferredCode { ...@@ -2807,40 +2802,25 @@ class DeferredArrayLiteral: public DeferredCode {
}; };
#undef __
#define __ ACCESS_MASM(masm)
void DeferredArrayLiteral::Generate() { void DeferredArrayLiteral::Generate() {
MacroAssembler* masm = cgen()->masm();
// Argument is passed in r1. // Argument is passed in r1.
enter()->Bind();
VirtualFrame::SpilledScope spilled_scope;
// If the entry is undefined we call the runtime system to computed // If the entry is undefined we call the runtime system to computed
// the literal. // the literal.
VirtualFrame* frame = cgen()->frame();
// Literal array (0). // Literal array (0).
frame->EmitPush(r1); __ push(r1);
// Literal index (1). // Literal index (1).
__ mov(r0, Operand(Smi::FromInt(node_->literal_index()))); __ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
frame->EmitPush(r0); __ push(r0);
// Constant properties (2). // Constant properties (2).
__ mov(r0, Operand(node_->literals())); __ mov(r0, Operand(node_->literals()));
frame->EmitPush(r0); __ push(r0);
Result boilerplate = __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
frame->CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3); __ mov(r2, Operand(r0));
__ mov(r2, Operand(boilerplate.reg()));
// Result is returned in r2. // Result is returned in r2.
exit_.Jump();
} }
#undef __
#define __ ACCESS_MASM(masm_)
void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
#ifdef DEBUG #ifdef DEBUG
int original_height = frame_->height(); int original_height = frame_->height();
...@@ -2866,7 +2846,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { ...@@ -2866,7 +2846,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
// Check whether we need to materialize the object literal boilerplate. // Check whether we need to materialize the object literal boilerplate.
// If so, jump to the deferred code. // If so, jump to the deferred code.
__ cmp(r2, Operand(Factory::undefined_value())); __ cmp(r2, Operand(Factory::undefined_value()));
deferred->enter()->Branch(eq); deferred->Branch(eq);
deferred->BindExit(); deferred->BindExit();
// Push the object literal boilerplate. // Push the object literal boilerplate.
......
...@@ -525,6 +525,8 @@ class VirtualFrame : public ZoneObject { ...@@ -525,6 +525,8 @@ class VirtualFrame : public ZoneObject {
bool Equals(VirtualFrame* other); bool Equals(VirtualFrame* other);
// Classes that need raw access to the elements_ array.
friend class DeferredCode;
friend class JumpTarget; friend class JumpTarget;
}; };
......
...@@ -32,27 +32,21 @@ ...@@ -32,27 +32,21 @@
#include "codegen.h" #include "codegen.h"
#include "register-allocator-inl.h" #include "register-allocator-inl.h"
namespace v8 { #if V8_TARGET_ARCH_IA32
namespace internal { #include "ia32/codegen-ia32-inl.h"
#elif V8_TARGET_ARCH_X64
#include "x64/codegen-x64-inl.h"
void DeferredCode::SetEntryFrame(Result* arg) { #elif V8_TARGET_ARCH_ARM
ASSERT(cgen()->has_valid_frame()); #include "arm/codegen-arm-inl.h"
cgen()->frame()->Push(arg); #else
enter()->set_entry_frame(new VirtualFrame(cgen()->frame())); #error Unsupported target architecture.
*arg = cgen()->frame()->Pop(); #endif
}
void DeferredCode::SetEntryFrame(Result* arg0, Result* arg1) { namespace v8 {
ASSERT(cgen()->has_valid_frame()); namespace internal {
cgen()->frame()->Push(arg0);
cgen()->frame()->Push(arg1);
enter()->set_entry_frame(new VirtualFrame(cgen()->frame()));
*arg1 = cgen()->frame()->Pop();
*arg0 = cgen()->frame()->Pop();
}
#define __ ACCESS_MASM(masm_)
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Support for "structured" code comments. // Support for "structured" code comments.
...@@ -64,15 +58,12 @@ void DeferredCode::SetEntryFrame(Result* arg0, Result* arg1) { ...@@ -64,15 +58,12 @@ void DeferredCode::SetEntryFrame(Result* arg0, Result* arg1) {
class Comment BASE_EMBEDDED { class Comment BASE_EMBEDDED {
public: public:
Comment(MacroAssembler* masm, const char* msg) Comment(MacroAssembler* masm, const char* msg) : masm_(masm), msg_(msg) {
: masm_(masm), __ RecordComment(msg);
msg_(msg) {
masm_->RecordComment(msg);
} }
~Comment() { ~Comment() {
if (msg_[0] == '[') if (msg_[0] == '[') __ RecordComment("]");
masm_->RecordComment("]");
} }
private: private:
...@@ -89,6 +80,8 @@ class Comment BASE_EMBEDDED { ...@@ -89,6 +80,8 @@ class Comment BASE_EMBEDDED {
#endif // DEBUG #endif // DEBUG
#undef __
} } // namespace v8::internal } } // namespace v8::internal
......
...@@ -45,33 +45,54 @@ namespace internal { ...@@ -45,33 +45,54 @@ namespace internal {
CodeGenerator* CodeGeneratorScope::top_ = NULL; CodeGenerator* CodeGeneratorScope::top_ = NULL;
DeferredCode::DeferredCode() : exit_(JumpTarget::BIDIRECTIONAL) { DeferredCode::DeferredCode()
MacroAssembler* masm = cgen()->masm(); : masm_(CodeGeneratorScope::Current()->masm()),
statement_position_ = masm->current_statement_position(); statement_position_(masm_->current_statement_position()),
position_ = masm->current_position(); position_(masm_->current_position()) {
ASSERT(statement_position_ != RelocInfo::kNoPosition); ASSERT(statement_position_ != RelocInfo::kNoPosition);
ASSERT(position_ != RelocInfo::kNoPosition); ASSERT(position_ != RelocInfo::kNoPosition);
cgen()->AddDeferred(this); CodeGeneratorScope::Current()->AddDeferred(this);
#ifdef DEBUG #ifdef DEBUG
comment_ = ""; comment_ = "";
#endif #endif
// Copy the register locations from the code generator's frame.
// These are the registers that will be spilled on entry to the
// deferred code and restored on exit.
VirtualFrame* frame = CodeGeneratorScope::Current()->frame();
int sp_offset = frame->fp_relative(frame->stack_pointer_);
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
int loc = frame->register_location(i);
if (loc == VirtualFrame::kIllegalIndex) {
registers_[i] = kIgnore;
} else if (frame->elements_[loc].is_synced()) {
// Needs to be restored on exit but not saved on entry.
registers_[i] = frame->fp_relative(loc) | kSyncedFlag;
} else {
int offset = frame->fp_relative(loc);
registers_[i] = (offset < sp_offset) ? kPush : offset;
}
}
} }
void CodeGenerator::ProcessDeferred() { void CodeGenerator::ProcessDeferred() {
while (!deferred_.is_empty()) { while (!deferred_.is_empty()) {
DeferredCode* code = deferred_.RemoveLast(); DeferredCode* code = deferred_.RemoveLast();
MacroAssembler* masm = code->cgen()->masm(); ASSERT(masm_ == code->masm());
// Record position of deferred code stub. // Record position of deferred code stub.
masm->RecordStatementPosition(code->statement_position()); masm_->RecordStatementPosition(code->statement_position());
if (code->position() != RelocInfo::kNoPosition) { if (code->position() != RelocInfo::kNoPosition) {
masm->RecordPosition(code->position()); masm_->RecordPosition(code->position());
} }
// Generate the code. // Generate the code.
Comment cmnt(masm, code->comment()); Comment cmnt(masm_, code->comment());
masm_->bind(code->entry_label());
code->SaveRegisters();
code->Generate(); code->Generate();
ASSERT(code->enter()->is_bound()); code->RestoreRegisters();
masm_->jmp(code->exit_label());
} }
} }
......
...@@ -125,29 +125,14 @@ class DeferredCode: public ZoneObject { ...@@ -125,29 +125,14 @@ class DeferredCode: public ZoneObject {
virtual void Generate() = 0; virtual void Generate() = 0;
CodeGenerator* cgen() const { return CodeGeneratorScope::Current(); } MacroAssembler* masm() { return masm_; }
// Set the virtual frame for entry to the deferred code as a
// snapshot of the code generator's current frame (plus additional
// results). This is optional, but should be done before branching
// or jumping to the deferred code.
inline void SetEntryFrame(Result* arg);
inline void SetEntryFrame(Result* arg0, Result* arg1);
JumpTarget* enter() { return &enter_; }
void BindExit() { exit_.Bind(0); }
void BindExit(Result* result) { exit_.Bind(result, 1); }
void BindExit(Result* result0, Result* result1) {
exit_.Bind(result0, result1, 2);
}
void BindExit(Result* result0, Result* result1, Result* result2) {
exit_.Bind(result0, result1, result2, 3);
}
int statement_position() const { return statement_position_; } int statement_position() const { return statement_position_; }
int position() const { return position_; } int position() const { return position_; }
Label* entry_label() { return &entry_label_; }
Label* exit_label() { return &exit_label_; }
#ifdef DEBUG #ifdef DEBUG
void set_comment(const char* comment) { comment_ = comment; } void set_comment(const char* comment) { comment_ = comment; }
const char* comment() const { return comment_; } const char* comment() const { return comment_; }
...@@ -156,13 +141,35 @@ class DeferredCode: public ZoneObject { ...@@ -156,13 +141,35 @@ class DeferredCode: public ZoneObject {
const char* comment() const { return ""; } const char* comment() const { return ""; }
#endif #endif
inline void Jump();
inline void Branch(Condition cc);
void BindExit() { masm_->bind(&exit_label_); }
void SaveRegisters();
void RestoreRegisters();
protected: protected:
JumpTarget enter_; MacroAssembler* masm_;
JumpTarget exit_;
private: private:
// Constants indicating special actions. They should not be multiples
// of kPointerSize so they will not collide with valid offsets from
// the frame pointer.
static const int kIgnore = -1;
static const int kPush = 1;
// This flag is ored with a valid offset from the frame pointer, so
// it should fit in the low zero bits of a valid offset.
static const int kSyncedFlag = 2;
int statement_position_; int statement_position_;
int position_; int position_;
Label entry_label_;
Label exit_label_;
int registers_[RegisterAllocator::kNumRegisters];
#ifdef DEBUG #ifdef DEBUG
const char* comment_; const char* comment_;
#endif #endif
......
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_IA32_CODEGEN_IA32_INL_H_
#define V8_IA32_CODEGEN_IA32_INL_H_
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
// Platform-specific inline functions.
void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
#undef __
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_INL_H_
...@@ -41,6 +41,35 @@ namespace internal { ...@@ -41,6 +41,35 @@ namespace internal {
#define __ ACCESS_MASM(masm_) #define __ ACCESS_MASM(masm_)
// -------------------------------------------------------------------------
// Platform-specific DeferredCode functions.
void DeferredCode::SaveRegisters() {
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
int action = registers_[i];
if (action == kPush) {
__ push(RegisterAllocator::ToRegister(i));
} else if (action != kIgnore && (action & kSyncedFlag) == 0) {
__ mov(Operand(ebp, action), RegisterAllocator::ToRegister(i));
}
}
}
void DeferredCode::RestoreRegisters() {
// Restore registers in reverse order due to the stack.
for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
int action = registers_[i];
if (action == kPush) {
__ pop(RegisterAllocator::ToRegister(i));
} else if (action != kIgnore) {
action &= ~kSyncedFlag;
__ mov(RegisterAllocator::ToRegister(i), Operand(ebp, action));
}
}
}
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// CodeGenState implementation. // CodeGenState implementation.
...@@ -73,7 +102,8 @@ CodeGenState::~CodeGenState() { ...@@ -73,7 +102,8 @@ CodeGenState::~CodeGenState() {
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// CodeGenerator implementation // CodeGenerator implementation
CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script, CodeGenerator::CodeGenerator(int buffer_size,
Handle<Script> script,
bool is_eval) bool is_eval)
: is_eval_(is_eval), : is_eval_(is_eval),
script_(script), script_(script),
...@@ -779,8 +809,12 @@ const char* GenericBinaryOpStub::GetName() { ...@@ -779,8 +809,12 @@ const char* GenericBinaryOpStub::GetName() {
// Call the specialized stub for a binary operation. // Call the specialized stub for a binary operation.
class DeferredInlineBinaryOperation: public DeferredCode { class DeferredInlineBinaryOperation: public DeferredCode {
public: public:
DeferredInlineBinaryOperation(Token::Value op, OverwriteMode mode) DeferredInlineBinaryOperation(Token::Value op,
: op_(op), mode_(mode) { Register dst,
Register left,
Register right,
OverwriteMode mode)
: op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
set_comment("[ DeferredInlineBinaryOperation"); set_comment("[ DeferredInlineBinaryOperation");
} }
...@@ -788,19 +822,19 @@ class DeferredInlineBinaryOperation: public DeferredCode { ...@@ -788,19 +822,19 @@ class DeferredInlineBinaryOperation: public DeferredCode {
private: private:
Token::Value op_; Token::Value op_;
Register dst_;
Register left_;
Register right_;
OverwriteMode mode_; OverwriteMode mode_;
}; };
void DeferredInlineBinaryOperation::Generate() { void DeferredInlineBinaryOperation::Generate() {
Result left; __ push(left_);
Result right; __ push(right_);
enter()->Bind(&left, &right);
cgen()->frame()->Push(&left);
cgen()->frame()->Push(&right);
GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED); GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
Result answer = cgen()->frame()->CallStub(&stub, 2); __ CallStub(&stub);
exit_.Jump(&answer); if (!dst_.is(eax)) __ mov(dst_, eax);
} }
...@@ -996,15 +1030,12 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) { ...@@ -996,15 +1030,12 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
} }
// Implements a binary operation using a deferred code object and some
// inline code to operate on smis quickly.
void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
Result* left, Result* left,
Result* right, Result* right,
OverwriteMode overwrite_mode) { OverwriteMode overwrite_mode) {
// Implements a binary operation using a deferred code object and some
// inline code to operate on smis quickly.
DeferredInlineBinaryOperation* deferred =
new DeferredInlineBinaryOperation(op, overwrite_mode);
// Special handling of div and mod because they use fixed registers. // Special handling of div and mod because they use fixed registers.
if (op == Token::DIV || op == Token::MOD) { if (op == Token::DIV || op == Token::MOD) {
// We need eax as the quotient register, edx as the remainder // We need eax as the quotient register, edx as the remainder
...@@ -1069,30 +1100,34 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, ...@@ -1069,30 +1100,34 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
left->ToRegister(); left->ToRegister();
right->ToRegister(); right->ToRegister();
frame_->Spill(quotient.reg()); frame_->Spill(eax);
frame_->Spill(remainder.reg()); frame_->Spill(edx);
// Check that left and right are smi tagged. // Check that left and right are smi tagged.
DeferredInlineBinaryOperation* deferred =
new DeferredInlineBinaryOperation(op,
(op == Token::DIV) ? eax : edx,
left->reg(),
right->reg(),
overwrite_mode);
if (left->reg().is(right->reg())) { if (left->reg().is(right->reg())) {
__ test(left->reg(), Immediate(kSmiTagMask)); __ test(left->reg(), Immediate(kSmiTagMask));
} else { } else {
// Use the quotient register as a scratch for the tag check. // Use the quotient register as a scratch for the tag check.
if (!left_is_in_eax) __ mov(quotient.reg(), left->reg()); if (!left_is_in_eax) __ mov(eax, left->reg());
left_is_in_eax = false; left_is_in_eax = false; // About to destroy the value in eax.
__ or_(quotient.reg(), Operand(right->reg())); __ or_(eax, Operand(right->reg()));
ASSERT(kSmiTag == 0); // Adjust test if not the case. ASSERT(kSmiTag == 0); // Adjust test if not the case.
__ test(quotient.reg(), Immediate(kSmiTagMask)); __ test(eax, Immediate(kSmiTagMask));
} }
deferred->SetEntryFrame(left, right); deferred->Branch(not_zero);
deferred->enter()->Branch(not_zero, left, right);
if (!left_is_in_eax) __ mov(quotient.reg(), left->reg());
if (!left_is_in_eax) __ mov(eax, left->reg());
// Sign extend eax into edx:eax. // Sign extend eax into edx:eax.
__ cdq(); __ cdq();
// Check for 0 divisor. // Check for 0 divisor.
__ test(right->reg(), Operand(right->reg())); __ test(right->reg(), Operand(right->reg()));
deferred->enter()->Branch(zero, left, right); deferred->Branch(zero);
// Divide edx:eax by the right operand. // Divide edx:eax by the right operand.
__ idiv(right->reg()); __ idiv(right->reg());
...@@ -1106,42 +1141,39 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, ...@@ -1106,42 +1141,39 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
__ test(left->reg(), Operand(left->reg())); __ test(left->reg(), Operand(left->reg()));
__ j(not_zero, &non_zero_result); __ j(not_zero, &non_zero_result);
__ test(right->reg(), Operand(right->reg())); __ test(right->reg(), Operand(right->reg()));
deferred->enter()->Branch(negative, left, right); deferred->Branch(negative);
__ bind(&non_zero_result); __ bind(&non_zero_result);
// Check for the corner case of dividing the most negative smi by // Check for the corner case of dividing the most negative smi by
// -1. We cannot use the overflow flag, since it is not set by // -1. We cannot use the overflow flag, since it is not set by
// idiv instruction. // idiv instruction.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1); ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
__ cmp(quotient.reg(), 0x40000000); __ cmp(eax, 0x40000000);
deferred->enter()->Branch(equal, left, right); deferred->Branch(equal);
// Check that the remainder is zero. // Check that the remainder is zero.
__ test(remainder.reg(), Operand(remainder.reg())); __ test(edx, Operand(edx));
remainder.Unuse(); deferred->Branch(not_zero);
deferred->enter()->Branch(not_zero, left, right);
left->Unuse();
right->Unuse();
// Tag the result and store it in the quotient register. // Tag the result and store it in the quotient register.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(quotient.reg(), __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
Operand(quotient.reg(), quotient.reg(), times_1, kSmiTag)); deferred->BindExit();
deferred->BindExit(&quotient); left->Unuse();
right->Unuse();
frame_->Push(&quotient); frame_->Push(&quotient);
} else { } else {
ASSERT(op == Token::MOD); ASSERT(op == Token::MOD);
quotient.Unuse();
// Check for a negative zero result. If the result is zero, and // Check for a negative zero result. If the result is zero, and
// the dividend is negative, return a floating point negative // the dividend is negative, return a floating point negative
// zero. The frame is unchanged in this block, so local control // zero. The frame is unchanged in this block, so local control
// flow can use a Label rather than a JumpTarget. // flow can use a Label rather than a JumpTarget.
Label non_zero_result; Label non_zero_result;
__ test(remainder.reg(), Operand(remainder.reg())); __ test(edx, Operand(edx));
__ j(not_zero, &non_zero_result, taken); __ j(not_zero, &non_zero_result, taken);
__ test(left->reg(), Operand(left->reg())); __ test(left->reg(), Operand(left->reg()));
deferred->enter()->Branch(negative, left, right); deferred->Branch(negative);
__ bind(&non_zero_result);
deferred->BindExit();
left->Unuse(); left->Unuse();
right->Unuse(); right->Unuse();
__ bind(&non_zero_result);
deferred->BindExit(&remainder);
frame_->Push(&remainder); frame_->Push(&remainder);
} }
return; return;
...@@ -1169,10 +1201,16 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, ...@@ -1169,10 +1201,16 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
ASSERT(answer.is_valid()); ASSERT(answer.is_valid());
// Check that both operands are smis using the answer register as a // Check that both operands are smis using the answer register as a
// temporary. // temporary.
DeferredInlineBinaryOperation* deferred =
new DeferredInlineBinaryOperation(op,
answer.reg(),
left->reg(),
ecx,
overwrite_mode);
__ mov(answer.reg(), left->reg()); __ mov(answer.reg(), left->reg());
__ or_(answer.reg(), Operand(ecx)); __ or_(answer.reg(), Operand(ecx));
__ test(answer.reg(), Immediate(kSmiTagMask)); __ test(answer.reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(not_zero, left, right); deferred->Branch(not_zero);
// Untag both operands. // Untag both operands.
__ mov(answer.reg(), left->reg()); __ mov(answer.reg(), left->reg());
...@@ -1185,7 +1223,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, ...@@ -1185,7 +1223,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// No checks of result necessary // No checks of result necessary
break; break;
case Token::SHR: { case Token::SHR: {
JumpTarget result_ok; Label result_ok;
__ shr(answer.reg()); __ shr(answer.reg());
// Check that the *unsigned* result fits in a smi. Neither of // Check that the *unsigned* result fits in a smi. Neither of
// the two high-order bits can be set: // the two high-order bits can be set:
...@@ -1198,37 +1236,35 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, ...@@ -1198,37 +1236,35 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// case. The low bit of the left argument may be lost, but only // case. The low bit of the left argument may be lost, but only
// in a case where it is dropped anyway. // in a case where it is dropped anyway.
__ test(answer.reg(), Immediate(0xc0000000)); __ test(answer.reg(), Immediate(0xc0000000));
result_ok.Branch(zero, &answer); __ j(zero, &result_ok);
ASSERT(kSmiTag == 0); ASSERT(kSmiTag == 0);
__ shl(ecx, kSmiTagSize); __ shl(ecx, kSmiTagSize);
answer.Unuse(); deferred->Jump();
deferred->enter()->Jump(left, right); __ bind(&result_ok);
result_ok.Bind(&answer);
break; break;
} }
case Token::SHL: { case Token::SHL: {
JumpTarget result_ok; Label result_ok;
__ shl(answer.reg()); __ shl(answer.reg());
// Check that the *signed* result fits in a smi. // Check that the *signed* result fits in a smi.
__ cmp(answer.reg(), 0xc0000000); __ cmp(answer.reg(), 0xc0000000);
result_ok.Branch(positive, &answer); __ j(positive, &result_ok);
ASSERT(kSmiTag == 0); ASSERT(kSmiTag == 0);
__ shl(ecx, kSmiTagSize); __ shl(ecx, kSmiTagSize);
answer.Unuse(); deferred->Jump();
deferred->enter()->Jump(left, right); __ bind(&result_ok);
result_ok.Bind(&answer);
break; break;
} }
default: default:
UNREACHABLE(); UNREACHABLE();
} }
left->Unuse();
right->Unuse();
// Smi-tag the result in answer. // Smi-tag the result in answer.
ASSERT(kSmiTagSize == 1); // Adjust code if not the case. ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
__ lea(answer.reg(), __ lea(answer.reg(),
Operand(answer.reg(), answer.reg(), times_1, kSmiTag)); Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
deferred->BindExit(&answer); deferred->BindExit();
left->Unuse();
right->Unuse();
frame_->Push(&answer); frame_->Push(&answer);
return; return;
} }
...@@ -1240,9 +1276,15 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, ...@@ -1240,9 +1276,15 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// registers containing left and right are not modified so they don't // registers containing left and right are not modified so they don't
// need to be spilled in the fast case. // need to be spilled in the fast case.
Result answer = allocator_->Allocate(); Result answer = allocator_->Allocate();
ASSERT(answer.is_valid()); ASSERT(answer.is_valid());
// Perform the smi tag check. // Perform the smi tag check.
DeferredInlineBinaryOperation* deferred =
new DeferredInlineBinaryOperation(op,
answer.reg(),
left->reg(),
right->reg(),
overwrite_mode);
if (left->reg().is(right->reg())) { if (left->reg().is(right->reg())) {
__ test(left->reg(), Immediate(kSmiTagMask)); __ test(left->reg(), Immediate(kSmiTagMask));
} else { } else {
...@@ -1251,27 +1293,20 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, ...@@ -1251,27 +1293,20 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
ASSERT(kSmiTag == 0); // Adjust test if not the case. ASSERT(kSmiTag == 0); // Adjust test if not the case.
__ test(answer.reg(), Immediate(kSmiTagMask)); __ test(answer.reg(), Immediate(kSmiTagMask));
} }
deferred->Branch(not_zero);
__ mov(answer.reg(), left->reg());
switch (op) { switch (op) {
case Token::ADD: case Token::ADD:
deferred->SetEntryFrame(left, right);
deferred->enter()->Branch(not_zero, left, right, not_taken);
__ mov(answer.reg(), left->reg());
__ add(answer.reg(), Operand(right->reg())); // Add optimistically. __ add(answer.reg(), Operand(right->reg())); // Add optimistically.
deferred->enter()->Branch(overflow, left, right, not_taken); deferred->Branch(overflow);
break; break;
case Token::SUB: case Token::SUB:
deferred->SetEntryFrame(left, right);
deferred->enter()->Branch(not_zero, left, right, not_taken);
__ mov(answer.reg(), left->reg());
__ sub(answer.reg(), Operand(right->reg())); // Subtract optimistically. __ sub(answer.reg(), Operand(right->reg())); // Subtract optimistically.
deferred->enter()->Branch(overflow, left, right, not_taken); deferred->Branch(overflow);
break; break;
case Token::MUL: { case Token::MUL: {
deferred->SetEntryFrame(left, right);
deferred->enter()->Branch(not_zero, left, right, not_taken);
__ mov(answer.reg(), left->reg());
// If the smi tag is 0 we can just leave the tag on one operand. // If the smi tag is 0 we can just leave the tag on one operand.
ASSERT(kSmiTag == 0); // Adjust code below if not the case. ASSERT(kSmiTag == 0); // Adjust code below if not the case.
// Remove smi tag from the left operand (but keep sign). // Remove smi tag from the left operand (but keep sign).
...@@ -1280,7 +1315,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, ...@@ -1280,7 +1315,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// Do multiplication of smis, leaving result in answer. // Do multiplication of smis, leaving result in answer.
__ imul(answer.reg(), Operand(right->reg())); __ imul(answer.reg(), Operand(right->reg()));
// Go slow on overflows. // Go slow on overflows.
deferred->enter()->Branch(overflow, left, right, not_taken); deferred->Branch(overflow);
// Check for negative zero result. If product is zero, and one // Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case. The frame is unchanged // argument is negative, go to slow case. The frame is unchanged
// in this block, so local control flow can use a Label rather // in this block, so local control flow can use a Label rather
...@@ -1290,27 +1325,21 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, ...@@ -1290,27 +1325,21 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
__ j(not_zero, &non_zero_result, taken); __ j(not_zero, &non_zero_result, taken);
__ mov(answer.reg(), left->reg()); __ mov(answer.reg(), left->reg());
__ or_(answer.reg(), Operand(right->reg())); __ or_(answer.reg(), Operand(right->reg()));
deferred->enter()->Branch(negative, left, right, not_taken); deferred->Branch(negative);
__ xor_(answer.reg(), Operand(answer.reg())); // Positive 0 is correct. __ xor_(answer.reg(), Operand(answer.reg())); // Positive 0 is correct.
__ bind(&non_zero_result); __ bind(&non_zero_result);
break; break;
} }
case Token::BIT_OR: case Token::BIT_OR:
deferred->enter()->Branch(not_zero, left, right, not_taken);
__ mov(answer.reg(), left->reg());
__ or_(answer.reg(), Operand(right->reg())); __ or_(answer.reg(), Operand(right->reg()));
break; break;
case Token::BIT_AND: case Token::BIT_AND:
deferred->enter()->Branch(not_zero, left, right, not_taken);
__ mov(answer.reg(), left->reg());
__ and_(answer.reg(), Operand(right->reg())); __ and_(answer.reg(), Operand(right->reg()));
break; break;
case Token::BIT_XOR: case Token::BIT_XOR:
deferred->enter()->Branch(not_zero, left, right, not_taken);
__ mov(answer.reg(), left->reg());
__ xor_(answer.reg(), Operand(right->reg())); __ xor_(answer.reg(), Operand(right->reg()));
break; break;
...@@ -1318,19 +1347,25 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, ...@@ -1318,19 +1347,25 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
UNREACHABLE(); UNREACHABLE();
break; break;
} }
deferred->BindExit();
left->Unuse(); left->Unuse();
right->Unuse(); right->Unuse();
deferred->BindExit(&answer);
frame_->Push(&answer); frame_->Push(&answer);
} }
// Call the appropriate binary operation stub to compute src op value
// and leave the result in dst.
class DeferredInlineSmiOperation: public DeferredCode { class DeferredInlineSmiOperation: public DeferredCode {
public: public:
DeferredInlineSmiOperation(Token::Value op, DeferredInlineSmiOperation(Token::Value op,
Register dst,
Register src,
Smi* value, Smi* value,
OverwriteMode overwrite_mode) OverwriteMode overwrite_mode)
: op_(op), : op_(op),
dst_(dst),
src_(src),
value_(value), value_(value),
overwrite_mode_(overwrite_mode) { overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlineSmiOperation"); set_comment("[ DeferredInlineSmiOperation");
...@@ -1340,29 +1375,35 @@ class DeferredInlineSmiOperation: public DeferredCode { ...@@ -1340,29 +1375,35 @@ class DeferredInlineSmiOperation: public DeferredCode {
private: private:
Token::Value op_; Token::Value op_;
Register dst_;
Register src_;
Smi* value_; Smi* value_;
OverwriteMode overwrite_mode_; OverwriteMode overwrite_mode_;
}; };
void DeferredInlineSmiOperation::Generate() { void DeferredInlineSmiOperation::Generate() {
Result left; __ push(src_);
enter()->Bind(&left); __ push(Immediate(value_));
cgen()->frame()->Push(&left); GenericBinaryOpStub stub(op_, overwrite_mode_, SMI_CODE_INLINED);
cgen()->frame()->Push(value_); __ CallStub(&stub);
GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED); if (!dst_.is(eax)) __ mov(dst_, eax);
Result answer = cgen()->frame()->CallStub(&igostub, 2);
exit_.Jump(&answer);
} }
// Call the appropriate binary operation stub to compute value op src
// and leave the result in dst.
class DeferredInlineSmiOperationReversed: public DeferredCode { class DeferredInlineSmiOperationReversed: public DeferredCode {
public: public:
DeferredInlineSmiOperationReversed(Token::Value op, DeferredInlineSmiOperationReversed(Token::Value op,
Register dst,
Smi* value, Smi* value,
Register src,
OverwriteMode overwrite_mode) OverwriteMode overwrite_mode)
: op_(op), : op_(op),
dst_(dst),
value_(value), value_(value),
src_(src),
overwrite_mode_(overwrite_mode) { overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlineSmiOperationReversed"); set_comment("[ DeferredInlineSmiOperationReversed");
} }
...@@ -1371,152 +1412,116 @@ class DeferredInlineSmiOperationReversed: public DeferredCode { ...@@ -1371,152 +1412,116 @@ class DeferredInlineSmiOperationReversed: public DeferredCode {
private: private:
Token::Value op_; Token::Value op_;
Register dst_;
Smi* value_; Smi* value_;
Register src_;
OverwriteMode overwrite_mode_; OverwriteMode overwrite_mode_;
}; };
void DeferredInlineSmiOperationReversed::Generate() { void DeferredInlineSmiOperationReversed::Generate() {
Result right; __ push(Immediate(value_));
enter()->Bind(&right); __ push(src_);
cgen()->frame()->Push(value_);
cgen()->frame()->Push(&right);
GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED); GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
Result answer = cgen()->frame()->CallStub(&igostub, 2); __ CallStub(&igostub);
exit_.Jump(&answer); if (!dst_.is(eax)) __ mov(dst_, eax);
} }
// The result of src + value is in dst. It either overflowed or was not
// smi tagged. Undo the speculative addition and call the appropriate
// specialized stub for add. The result is left in dst.
class DeferredInlineSmiAdd: public DeferredCode { class DeferredInlineSmiAdd: public DeferredCode {
public: public:
DeferredInlineSmiAdd(Smi* value, DeferredInlineSmiAdd(Register dst,
Smi* value,
OverwriteMode overwrite_mode) OverwriteMode overwrite_mode)
: value_(value), : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlineSmiAdd"); set_comment("[ DeferredInlineSmiAdd");
} }
virtual void Generate(); virtual void Generate();
private: private:
Register dst_;
Smi* value_; Smi* value_;
OverwriteMode overwrite_mode_; OverwriteMode overwrite_mode_;
}; };
void DeferredInlineSmiAdd::Generate() {
// Undo the optimistic add operation and call the shared stub.
__ sub(Operand(dst_), Immediate(value_));
__ push(dst_);
__ push(Immediate(value_));
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub);
if (!dst_.is(eax)) __ mov(dst_, eax);
}
// The result of value + src is in dst. It either overflowed or was not
// smi tagged. Undo the speculative addition and call the appropriate
// specialized stub for add. The result is left in dst.
class DeferredInlineSmiAddReversed: public DeferredCode { class DeferredInlineSmiAddReversed: public DeferredCode {
public: public:
DeferredInlineSmiAddReversed(Smi* value, DeferredInlineSmiAddReversed(Register dst,
Smi* value,
OverwriteMode overwrite_mode) OverwriteMode overwrite_mode)
: value_(value), : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlineSmiAddReversed"); set_comment("[ DeferredInlineSmiAddReversed");
} }
virtual void Generate(); virtual void Generate();
private: private:
Register dst_;
Smi* value_; Smi* value_;
OverwriteMode overwrite_mode_; OverwriteMode overwrite_mode_;
}; };
class DeferredInlineSmiSub: public DeferredCode {
public:
DeferredInlineSmiSub(Smi* value,
OverwriteMode overwrite_mode)
: value_(value),
overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlineSmiSub");
}
virtual void Generate();
private:
Smi* value_;
OverwriteMode overwrite_mode_;
};
#undef __
#define __ ACCESS_MASM(cgen()->masm())
void DeferredInlineSmiAdd::Generate() {
// Undo the optimistic add operation and call the shared stub.
Result left; // Initially left + value_.
enter()->Bind(&left);
left.ToRegister();
cgen()->frame()->Spill(left.reg());
__ sub(Operand(left.reg()), Immediate(value_));
cgen()->frame()->Push(&left);
cgen()->frame()->Push(value_);
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
Result answer = cgen()->frame()->CallStub(&igostub, 2);
exit_.Jump(&answer);
}
void DeferredInlineSmiAddReversed::Generate() { void DeferredInlineSmiAddReversed::Generate() {
// Undo the optimistic add operation and call the shared stub. // Undo the optimistic add operation and call the shared stub.
Result right; // Initially value_ + right. __ sub(Operand(dst_), Immediate(value_));
enter()->Bind(&right); __ push(Immediate(value_));
right.ToRegister(); __ push(dst_);
cgen()->frame()->Spill(right.reg());
__ sub(Operand(right.reg()), Immediate(value_));
cgen()->frame()->Push(value_);
cgen()->frame()->Push(&right);
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
Result answer = cgen()->frame()->CallStub(&igostub, 2); __ CallStub(&igostub);
exit_.Jump(&answer); if (!dst_.is(eax)) __ mov(dst_, eax);
}
void DeferredInlineSmiSub::Generate() {
// Undo the optimistic sub operation and call the shared stub.
Result left; // Initially left - value_.
enter()->Bind(&left);
left.ToRegister();
cgen()->frame()->Spill(left.reg());
__ add(Operand(left.reg()), Immediate(value_));
cgen()->frame()->Push(&left);
cgen()->frame()->Push(value_);
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
Result answer = cgen()->frame()->CallStub(&igostub, 2);
exit_.Jump(&answer);
} }
#undef __ // The result of src - value is in dst. It either overflowed or was not
#define __ ACCESS_MASM(masm_) // smi tagged. Undo the speculative subtraction and call the
// appropriate specialized stub for subtract. The result is left in
// dst.
class DeferredInlineSmiSubReversed: public DeferredCode { class DeferredInlineSmiSub: public DeferredCode {
public: public:
DeferredInlineSmiSubReversed(Smi* value, DeferredInlineSmiSub(Register dst,
OverwriteMode overwrite_mode) Smi* value,
: value_(value), OverwriteMode overwrite_mode)
overwrite_mode_(overwrite_mode) { : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlineSmiSubReversed"); set_comment("[ DeferredInlineSmiSub");
} }
virtual void Generate(); virtual void Generate();
private: private:
Register dst_;
Smi* value_; Smi* value_;
OverwriteMode overwrite_mode_; OverwriteMode overwrite_mode_;
}; };
void DeferredInlineSmiSubReversed::Generate() { void DeferredInlineSmiSub::Generate() {
// Call the shared stub. // Undo the optimistic sub operation and call the shared stub.
Result right; __ add(Operand(dst_), Immediate(value_));
enter()->Bind(&right); __ push(dst_);
cgen()->frame()->Push(value_); __ push(Immediate(value_));
cgen()->frame()->Push(&right);
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED); GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
Result answer = cgen()->frame()->CallStub(&igostub, 2); __ CallStub(&igostub);
exit_.Jump(&answer); if (!dst_.is(eax)) __ mov(dst_, eax);
} }
...@@ -1554,19 +1559,24 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, ...@@ -1554,19 +1559,24 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
case Token::ADD: { case Token::ADD: {
operand->ToRegister(); operand->ToRegister();
frame_->Spill(operand->reg()); frame_->Spill(operand->reg());
__ add(Operand(operand->reg()), Immediate(value));
// Optimistically add. Call the specialized add stub if the
// result is not a smi or overflows.
DeferredCode* deferred = NULL; DeferredCode* deferred = NULL;
if (reversed) { if (reversed) {
deferred = new DeferredInlineSmiAddReversed(smi_value, overwrite_mode); deferred = new DeferredInlineSmiAddReversed(operand->reg(),
smi_value,
overwrite_mode);
} else { } else {
deferred = new DeferredInlineSmiAdd(smi_value, overwrite_mode); deferred = new DeferredInlineSmiAdd(operand->reg(),
smi_value,
overwrite_mode);
} }
deferred->SetEntryFrame(operand); __ add(Operand(operand->reg()), Immediate(value));
deferred->enter()->Branch(overflow, operand, not_taken); deferred->Branch(overflow);
__ test(operand->reg(), Immediate(kSmiTagMask)); __ test(operand->reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(not_zero, operand, not_taken); deferred->Branch(not_zero);
deferred->BindExit(operand); deferred->BindExit();
frame_->Push(operand); frame_->Push(operand);
break; break;
} }
...@@ -1575,31 +1585,37 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, ...@@ -1575,31 +1585,37 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
DeferredCode* deferred = NULL; DeferredCode* deferred = NULL;
Result answer; // Only allocate a new register if reversed. Result answer; // Only allocate a new register if reversed.
if (reversed) { if (reversed) {
// The reversed case is only hit when the right operand is not a
// constant.
ASSERT(operand->is_register());
answer = allocator()->Allocate(); answer = allocator()->Allocate();
ASSERT(answer.is_valid()); ASSERT(answer.is_valid());
deferred = new DeferredInlineSmiSubReversed(smi_value, overwrite_mode);
__ Set(answer.reg(), Immediate(value)); __ Set(answer.reg(), Immediate(value));
// We are in the reversed case so they can't both be Smi constants. deferred = new DeferredInlineSmiOperationReversed(op,
ASSERT(operand->is_register()); answer.reg(),
smi_value,
operand->reg(),
overwrite_mode);
__ sub(answer.reg(), Operand(operand->reg())); __ sub(answer.reg(), Operand(operand->reg()));
} else { } else {
operand->ToRegister(); operand->ToRegister();
frame_->Spill(operand->reg()); frame_->Spill(operand->reg());
deferred = new DeferredInlineSmiSub(smi_value, overwrite_mode);
__ sub(Operand(operand->reg()), Immediate(value));
answer = *operand; answer = *operand;
deferred = new DeferredInlineSmiSub(operand->reg(),
smi_value,
overwrite_mode);
__ sub(Operand(operand->reg()), Immediate(value));
} }
deferred->SetEntryFrame(operand); deferred->Branch(overflow);
deferred->enter()->Branch(overflow, operand, not_taken);
__ test(answer.reg(), Immediate(kSmiTagMask)); __ test(answer.reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(not_zero, operand, not_taken); deferred->Branch(not_zero);
deferred->BindExit();
operand->Unuse(); operand->Unuse();
deferred->BindExit(&answer);
frame_->Push(&answer); frame_->Push(&answer);
break; break;
} }
case Token::SAR: { case Token::SAR:
if (reversed) { if (reversed) {
Result constant_operand(value); Result constant_operand(value);
LikelySmiBinaryOperation(op, &constant_operand, operand, LikelySmiBinaryOperation(op, &constant_operand, operand,
...@@ -1608,23 +1624,26 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, ...@@ -1608,23 +1624,26 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
// Only the least significant 5 bits of the shift value are used. // Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call. // In the slow case, this masking is done inside the runtime call.
int shift_value = int_value & 0x1f; int shift_value = int_value & 0x1f;
DeferredCode* deferred =
new DeferredInlineSmiOperation(op, smi_value, overwrite_mode);
operand->ToRegister(); operand->ToRegister();
frame_->Spill(operand->reg());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
operand->reg(),
operand->reg(),
smi_value,
overwrite_mode);
__ test(operand->reg(), Immediate(kSmiTagMask)); __ test(operand->reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(not_zero, operand, not_taken); deferred->Branch(not_zero);
if (shift_value > 0) { if (shift_value > 0) {
frame_->Spill(operand->reg());
__ sar(operand->reg(), shift_value); __ sar(operand->reg(), shift_value);
__ and_(operand->reg(), ~kSmiTagMask); __ and_(operand->reg(), ~kSmiTagMask);
} }
deferred->BindExit(operand); deferred->BindExit();
frame_->Push(operand); frame_->Push(operand);
} }
break; break;
}
case Token::SHR: { case Token::SHR:
if (reversed) { if (reversed) {
Result constant_operand(value); Result constant_operand(value);
LikelySmiBinaryOperation(op, &constant_operand, operand, LikelySmiBinaryOperation(op, &constant_operand, operand,
...@@ -1633,32 +1652,35 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, ...@@ -1633,32 +1652,35 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
// Only the least significant 5 bits of the shift value are used. // Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call. // In the slow case, this masking is done inside the runtime call.
int shift_value = int_value & 0x1f; int shift_value = int_value & 0x1f;
DeferredCode* deferred =
new DeferredInlineSmiOperation(op, smi_value, overwrite_mode);
operand->ToRegister(); operand->ToRegister();
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(not_zero, operand, not_taken);
Result answer = allocator()->Allocate(); Result answer = allocator()->Allocate();
ASSERT(answer.is_valid()); ASSERT(answer.is_valid());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
answer.reg(),
operand->reg(),
smi_value,
overwrite_mode);
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
__ mov(answer.reg(), operand->reg()); __ mov(answer.reg(), operand->reg());
__ sar(answer.reg(), kSmiTagSize); __ sar(answer.reg(), kSmiTagSize);
__ shr(answer.reg(), shift_value); __ shr(answer.reg(), shift_value);
// A negative Smi shifted right two is in the positive Smi range. // A negative Smi shifted right two is in the positive Smi range.
if (shift_value < 2) { if (shift_value < 2) {
__ test(answer.reg(), Immediate(0xc0000000)); __ test(answer.reg(), Immediate(0xc0000000));
deferred->enter()->Branch(not_zero, operand, not_taken); deferred->Branch(not_zero);
} }
operand->Unuse(); operand->Unuse();
ASSERT(kSmiTagSize == times_2); // Adjust the code if not true. ASSERT(kSmiTagSize == times_2); // Adjust the code if not true.
__ lea(answer.reg(), __ lea(answer.reg(),
Operand(answer.reg(), answer.reg(), times_1, kSmiTag)); Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
deferred->BindExit(&answer); deferred->BindExit();
frame_->Push(&answer); frame_->Push(&answer);
} }
break; break;
}
case Token::SHL: { case Token::SHL:
if (reversed) { if (reversed) {
Result constant_operand(value); Result constant_operand(value);
LikelySmiBinaryOperation(op, &constant_operand, operand, LikelySmiBinaryOperation(op, &constant_operand, operand,
...@@ -1667,14 +1689,30 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, ...@@ -1667,14 +1689,30 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
// Only the least significant 5 bits of the shift value are used. // Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call. // In the slow case, this masking is done inside the runtime call.
int shift_value = int_value & 0x1f; int shift_value = int_value & 0x1f;
DeferredCode* deferred =
new DeferredInlineSmiOperation(op, smi_value, overwrite_mode);
operand->ToRegister(); operand->ToRegister();
__ test(operand->reg(), Immediate(kSmiTagMask)); if (shift_value == 0) {
deferred->enter()->Branch(not_zero, operand, not_taken); DeferredInlineSmiOperation* deferred =
if (shift_value != 0) { new DeferredInlineSmiOperation(op,
operand->reg(),
operand->reg(),
smi_value,
overwrite_mode);
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
deferred->BindExit();
frame_->Push(operand);
} else {
// Use a fresh temporary for nonzero shift values.
Result answer = allocator()->Allocate(); Result answer = allocator()->Allocate();
ASSERT(answer.is_valid()); ASSERT(answer.is_valid());
DeferredInlineSmiOperation* deferred =
new DeferredInlineSmiOperation(op,
answer.reg(),
operand->reg(),
smi_value,
overwrite_mode);
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
__ mov(answer.reg(), operand->reg()); __ mov(answer.reg(), operand->reg());
ASSERT(kSmiTag == 0); // adjust code if not the case ASSERT(kSmiTag == 0); // adjust code if not the case
// We do no shifts, only the Smi conversion, if shift_value is 1. // We do no shifts, only the Smi conversion, if shift_value is 1.
...@@ -1682,35 +1720,37 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, ...@@ -1682,35 +1720,37 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
__ shl(answer.reg(), shift_value - 1); __ shl(answer.reg(), shift_value - 1);
} }
// Convert int result to Smi, checking that it is in int range. // Convert int result to Smi, checking that it is in int range.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case ASSERT(kSmiTagSize == 1); // adjust code if not the case
__ add(answer.reg(), Operand(answer.reg())); __ add(answer.reg(), Operand(answer.reg()));
deferred->enter()->Branch(overflow, operand, not_taken); deferred->Branch(overflow);
deferred->BindExit();
operand->Unuse(); operand->Unuse();
deferred->BindExit(&answer);
frame_->Push(&answer); frame_->Push(&answer);
} else {
deferred->BindExit(operand);
frame_->Push(operand);
} }
} }
break; break;
}
case Token::BIT_OR: case Token::BIT_OR:
case Token::BIT_XOR: case Token::BIT_XOR:
case Token::BIT_AND: { case Token::BIT_AND: {
operand->ToRegister();
frame_->Spill(operand->reg());
DeferredCode* deferred = NULL; DeferredCode* deferred = NULL;
if (reversed) { if (reversed) {
deferred = new DeferredInlineSmiOperationReversed(op, smi_value, deferred = new DeferredInlineSmiOperationReversed(op,
operand->reg(),
smi_value,
operand->reg(),
overwrite_mode); overwrite_mode);
} else { } else {
deferred = new DeferredInlineSmiOperation(op, smi_value, deferred = new DeferredInlineSmiOperation(op,
operand->reg(),
operand->reg(),
smi_value,
overwrite_mode); overwrite_mode);
} }
operand->ToRegister();
__ test(operand->reg(), Immediate(kSmiTagMask)); __ test(operand->reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(not_zero, operand, not_taken); deferred->Branch(not_zero);
frame_->Spill(operand->reg());
if (op == Token::BIT_AND) { if (op == Token::BIT_AND) {
__ and_(Operand(operand->reg()), Immediate(value)); __ and_(Operand(operand->reg()), Immediate(value));
} else if (op == Token::BIT_XOR) { } else if (op == Token::BIT_XOR) {
...@@ -1723,7 +1763,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, ...@@ -1723,7 +1763,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
__ or_(Operand(operand->reg()), Immediate(value)); __ or_(Operand(operand->reg()), Immediate(value));
} }
} }
deferred->BindExit(operand); deferred->BindExit();
frame_->Push(operand); frame_->Push(operand);
break; break;
} }
...@@ -1990,7 +2030,7 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args, ...@@ -1990,7 +2030,7 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
class DeferredStackCheck: public DeferredCode { class DeferredStackCheck: public DeferredCode {
public: public:
explicit DeferredStackCheck() { DeferredStackCheck() {
set_comment("[ DeferredStackCheck"); set_comment("[ DeferredStackCheck");
} }
...@@ -1999,11 +2039,8 @@ class DeferredStackCheck: public DeferredCode { ...@@ -1999,11 +2039,8 @@ class DeferredStackCheck: public DeferredCode {
void DeferredStackCheck::Generate() { void DeferredStackCheck::Generate() {
enter()->Bind();
StackCheckStub stub; StackCheckStub stub;
Result ignored = cgen()->frame()->CallStub(&stub, 0); __ CallStub(&stub);
ignored.Unuse();
exit_.Jump();
} }
...@@ -2013,7 +2050,7 @@ void CodeGenerator::CheckStack() { ...@@ -2013,7 +2050,7 @@ void CodeGenerator::CheckStack() {
ExternalReference stack_guard_limit = ExternalReference stack_guard_limit =
ExternalReference::address_of_stack_guard_limit(); ExternalReference::address_of_stack_guard_limit();
__ cmp(esp, Operand::StaticVariable(stack_guard_limit)); __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
deferred->enter()->Branch(below, not_taken); deferred->Branch(below);
deferred->BindExit(); deferred->BindExit();
} }
} }
...@@ -3865,43 +3902,45 @@ bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) { ...@@ -3865,43 +3902,45 @@ bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
} }
// Materialize the regexp literal 'node' in the literals array
// 'literals' of the function. Leave the regexp boilerplate in
// 'boilerplate'.
class DeferredRegExpLiteral: public DeferredCode { class DeferredRegExpLiteral: public DeferredCode {
public: public:
explicit DeferredRegExpLiteral(RegExpLiteral* node) : node_(node) { DeferredRegExpLiteral(Register boilerplate,
Register literals,
RegExpLiteral* node)
: boilerplate_(boilerplate), literals_(literals), node_(node) {
set_comment("[ DeferredRegExpLiteral"); set_comment("[ DeferredRegExpLiteral");
} }
virtual void Generate(); void Generate();
private: private:
Register boilerplate_;
Register literals_;
RegExpLiteral* node_; RegExpLiteral* node_;
}; };
void DeferredRegExpLiteral::Generate() { void DeferredRegExpLiteral::Generate() {
Result literals;
enter()->Bind(&literals);
// Since the entry is undefined we call the runtime system to // Since the entry is undefined we call the runtime system to
// compute the literal. // compute the literal.
VirtualFrame* frame = cgen()->frame();
// Literal array (0). // Literal array (0).
frame->Push(&literals); __ push(literals_);
// Literal index (1). // Literal index (1).
frame->Push(Smi::FromInt(node_->literal_index())); __ push(Immediate(Smi::FromInt(node_->literal_index())));
// RegExp pattern (2). // RegExp pattern (2).
frame->Push(node_->pattern()); __ push(Immediate(node_->pattern()));
// RegExp flags (3). // RegExp flags (3).
frame->Push(node_->flags()); __ push(Immediate(node_->flags()));
Result boilerplate = __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
frame->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
exit_.Jump(&boilerplate);
} }
void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) { void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
Comment cmnt(masm_, "[ RegExp Literal"); Comment cmnt(masm_, "[ RegExp Literal");
DeferredRegExpLiteral* deferred = new DeferredRegExpLiteral(node);
// Retrieve the literals array and check the allocated entry. Begin // Retrieve the literals array and check the allocated entry. Begin
// with a writable copy of the function of this activation in a // with a writable copy of the function of this activation in a
...@@ -3916,65 +3955,63 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) { ...@@ -3916,65 +3955,63 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
FieldOperand(literals.reg(), JSFunction::kLiteralsOffset)); FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
// Load the literal at the ast saved index. // Load the literal at the ast saved index.
int literal_offset =
FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
Result boilerplate = allocator_->Allocate(); Result boilerplate = allocator_->Allocate();
ASSERT(boilerplate.is_valid()); ASSERT(boilerplate.is_valid());
int literal_offset =
FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
__ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset)); __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
// Check whether we need to materialize the RegExp object. If so, // Check whether we need to materialize the RegExp object. If so,
// jump to the deferred code passing the literals array. // jump to the deferred code passing the literals array.
DeferredRegExpLiteral* deferred =
new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
__ cmp(boilerplate.reg(), Factory::undefined_value()); __ cmp(boilerplate.reg(), Factory::undefined_value());
deferred->enter()->Branch(equal, &literals, not_taken); deferred->Branch(equal);
deferred->BindExit();
literals.Unuse(); literals.Unuse();
// The deferred code returns the boilerplate object.
deferred->BindExit(&boilerplate);
// Push the boilerplate object. // Push the boilerplate object.
frame_->Push(&boilerplate); frame_->Push(&boilerplate);
} }
// This deferred code stub will be used for creating the boilerplate // Materialize the object literal 'node' in the literals array
// by calling Runtime_CreateObjectLiteral. // 'literals' of the function. Leave the object boilerplate in
// Each created boilerplate is stored in the JSFunction and they are // 'boilerplate'.
// therefore context dependent.
class DeferredObjectLiteral: public DeferredCode { class DeferredObjectLiteral: public DeferredCode {
public: public:
explicit DeferredObjectLiteral(ObjectLiteral* node) : node_(node) { DeferredObjectLiteral(Register boilerplate,
Register literals,
ObjectLiteral* node)
: boilerplate_(boilerplate), literals_(literals), node_(node) {
set_comment("[ DeferredObjectLiteral"); set_comment("[ DeferredObjectLiteral");
} }
virtual void Generate(); void Generate();
private: private:
Register boilerplate_;
Register literals_;
ObjectLiteral* node_; ObjectLiteral* node_;
}; };
void DeferredObjectLiteral::Generate() { void DeferredObjectLiteral::Generate() {
Result literals;
enter()->Bind(&literals);
// Since the entry is undefined we call the runtime system to // Since the entry is undefined we call the runtime system to
// compute the literal. // compute the literal.
VirtualFrame* frame = cgen()->frame();
// Literal array (0). // Literal array (0).
frame->Push(&literals); __ push(literals_);
// Literal index (1). // Literal index (1).
frame->Push(Smi::FromInt(node_->literal_index())); __ push(Immediate(Smi::FromInt(node_->literal_index())));
// Constant properties (2). // Constant properties (2).
frame->Push(node_->constant_properties()); __ push(Immediate(node_->constant_properties()));
Result boilerplate = __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
frame->CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3); if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
exit_.Jump(&boilerplate);
} }
void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Comment cmnt(masm_, "[ ObjectLiteral"); Comment cmnt(masm_, "[ ObjectLiteral");
DeferredObjectLiteral* deferred = new DeferredObjectLiteral(node);
// Retrieve the literals array and check the allocated entry. Begin // Retrieve the literals array and check the allocated entry. Begin
// with a writable copy of the function of this activation in a // with a writable copy of the function of this activation in a
...@@ -3989,20 +4026,20 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { ...@@ -3989,20 +4026,20 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
FieldOperand(literals.reg(), JSFunction::kLiteralsOffset)); FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
// Load the literal at the ast saved index. // Load the literal at the ast saved index.
int literal_offset =
FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
Result boilerplate = allocator_->Allocate(); Result boilerplate = allocator_->Allocate();
ASSERT(boilerplate.is_valid()); ASSERT(boilerplate.is_valid());
int literal_offset =
FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
__ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset)); __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
// Check whether we need to materialize the object literal boilerplate. // Check whether we need to materialize the object literal boilerplate.
// If so, jump to the deferred code passing the literals array. // If so, jump to the deferred code passing the literals array.
DeferredObjectLiteral* deferred =
new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node);
__ cmp(boilerplate.reg(), Factory::undefined_value()); __ cmp(boilerplate.reg(), Factory::undefined_value());
deferred->enter()->Branch(equal, &literals, not_taken); deferred->Branch(equal);
deferred->BindExit();
literals.Unuse(); literals.Unuse();
// The deferred code returns the boilerplate object.
deferred->BindExit(&boilerplate);
// Push the boilerplate object. // Push the boilerplate object.
frame_->Push(&boilerplate); frame_->Push(&boilerplate);
...@@ -4072,45 +4109,42 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { ...@@ -4072,45 +4109,42 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
} }
// This deferred code stub will be used for creating the boilerplate // Materialize the array literal 'node' in the literals array 'literals'
// by calling Runtime_CreateArrayLiteralBoilerplate. // of the function. Leave the array boilerplate in 'boilerplate'.
// Each created boilerplate is stored in the JSFunction and they are
// therefore context dependent.
class DeferredArrayLiteral: public DeferredCode { class DeferredArrayLiteral: public DeferredCode {
public: public:
explicit DeferredArrayLiteral(ArrayLiteral* node) : node_(node) { DeferredArrayLiteral(Register boilerplate,
Register literals,
ArrayLiteral* node)
: boilerplate_(boilerplate), literals_(literals), node_(node) {
set_comment("[ DeferredArrayLiteral"); set_comment("[ DeferredArrayLiteral");
} }
virtual void Generate(); void Generate();
private: private:
Register boilerplate_;
Register literals_;
ArrayLiteral* node_; ArrayLiteral* node_;
}; };
void DeferredArrayLiteral::Generate() { void DeferredArrayLiteral::Generate() {
Result literals;
enter()->Bind(&literals);
// Since the entry is undefined we call the runtime system to // Since the entry is undefined we call the runtime system to
// compute the literal. // compute the literal.
VirtualFrame* frame = cgen()->frame();
// Literal array (0). // Literal array (0).
frame->Push(&literals); __ push(literals_);
// Literal index (1). // Literal index (1).
frame->Push(Smi::FromInt(node_->literal_index())); __ push(Immediate(Smi::FromInt(node_->literal_index())));
// Constant properties (2). // Constant properties (2).
frame->Push(node_->literals()); __ push(Immediate(node_->literals()));
Result boilerplate = __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
frame->CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3); if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
exit_.Jump(&boilerplate);
} }
void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
Comment cmnt(masm_, "[ ArrayLiteral"); Comment cmnt(masm_, "[ ArrayLiteral");
DeferredArrayLiteral* deferred = new DeferredArrayLiteral(node);
// Retrieve the literals array and check the allocated entry. Begin // Retrieve the literals array and check the allocated entry. Begin
// with a writable copy of the function of this activation in a // with a writable copy of the function of this activation in a
...@@ -4125,24 +4159,23 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { ...@@ -4125,24 +4159,23 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
FieldOperand(literals.reg(), JSFunction::kLiteralsOffset)); FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
// Load the literal at the ast saved index. // Load the literal at the ast saved index.
int literal_offset =
FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
Result boilerplate = allocator_->Allocate(); Result boilerplate = allocator_->Allocate();
ASSERT(boilerplate.is_valid()); ASSERT(boilerplate.is_valid());
int literal_offset =
FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
__ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset)); __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
// Check whether we need to materialize the object literal boilerplate. // Check whether we need to materialize the object literal boilerplate.
// If so, jump to the deferred code passing the literals array. // If so, jump to the deferred code passing the literals array.
DeferredArrayLiteral* deferred =
new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node);
__ cmp(boilerplate.reg(), Factory::undefined_value()); __ cmp(boilerplate.reg(), Factory::undefined_value());
deferred->enter()->Branch(equal, &literals, not_taken); deferred->Branch(equal);
deferred->BindExit();
literals.Unuse(); literals.Unuse();
// The deferred code returns the boilerplate object.
deferred->BindExit(&boilerplate);
// Push the resulting array literal on the stack. // Push the resulting array literal boilerplate on the stack.
frame_->Push(&boilerplate); frame_->Push(&boilerplate);
// Clone the boilerplate object. // Clone the boilerplate object.
Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate; Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
if (node->depth() == 1) { if (node->depth() == 1) {
...@@ -5063,63 +5096,90 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { ...@@ -5063,63 +5096,90 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
} }
class DeferredCountOperation: public DeferredCode { // The value in dst was optimistically incremented or decremented. The
// result overflowed or was not smi tagged. Undo the operation, call
// into the runtime to convert the argument to a number, and call the
// specialized add or subtract stub. The result is left in dst.
class DeferredPrefixCountOperation: public DeferredCode {
public: public:
DeferredCountOperation(bool is_postfix, DeferredPrefixCountOperation(Register dst, bool is_increment)
bool is_increment, : dst_(dst), is_increment_(is_increment) {
int target_size)
: is_postfix_(is_postfix),
is_increment_(is_increment),
target_size_(target_size) {
set_comment("[ DeferredCountOperation"); set_comment("[ DeferredCountOperation");
} }
virtual void Generate(); virtual void Generate();
private: private:
bool is_postfix_; Register dst_;
bool is_increment_; bool is_increment_;
int target_size_;
}; };
#undef __ void DeferredPrefixCountOperation::Generate() {
#define __ ACCESS_MASM(cgen()->masm())
void DeferredCountOperation::Generate() {
Result value;
enter()->Bind(&value);
VirtualFrame* frame = cgen()->frame();
// Undo the optimistic smi operation. // Undo the optimistic smi operation.
value.ToRegister();
frame->Spill(value.reg());
if (is_increment_) { if (is_increment_) {
__ sub(Operand(value.reg()), Immediate(Smi::FromInt(1))); __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
} else { } else {
__ add(Operand(value.reg()), Immediate(Smi::FromInt(1))); __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
} }
frame->Push(&value); __ push(dst_);
value = frame->InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION, 1); __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
frame->Push(&value); __ push(eax);
if (is_postfix_) { // Fix up copy of old value with ToNumber(value). __ push(Immediate(Smi::FromInt(1)));
// This is only safe because VisitCountOperation makes this frame slot
// beneath the reference a register, which is spilled at the above call.
// We cannot safely write to constants or copies below the water line.
frame->StoreToElementAt(target_size_ + 1);
}
frame->Push(Smi::FromInt(1));
if (is_increment_) { if (is_increment_) {
value = frame->CallRuntime(Runtime::kNumberAdd, 2); __ CallRuntime(Runtime::kNumberAdd, 2);
} else { } else {
value = frame->CallRuntime(Runtime::kNumberSub, 2); __ CallRuntime(Runtime::kNumberSub, 2);
} }
exit_.Jump(&value); if (!dst_.is(eax)) __ mov(dst_, eax);
} }
#undef __ // The value in dst was optimistically incremented or decremented. The
#define __ ACCESS_MASM(masm_) // result overflowed or was not smi tagged. Undo the operation and call
// into the runtime to convert the argument to a number. Update the
// original value in old. Call the specialized add or subtract stub.
// The result is left in dst.
class DeferredPostfixCountOperation: public DeferredCode {
public:
DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
: dst_(dst), old_(old), is_increment_(is_increment) {
set_comment("[ DeferredCountOperation");
}
virtual void Generate();
private:
Register dst_;
Register old_;
bool is_increment_;
};
void DeferredPostfixCountOperation::Generate() {
// Undo the optimistic smi operation.
if (is_increment_) {
__ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
} else {
__ add(Operand(dst_), Immediate(Smi::FromInt(1)));
}
__ push(dst_);
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
// Save the result of ToNumber to use as the old value.
__ push(eax);
// Call the runtime for the addition or subtraction.
__ push(eax);
__ push(Immediate(Smi::FromInt(1)));
if (is_increment_) {
__ CallRuntime(Runtime::kNumberAdd, 2);
} else {
__ CallRuntime(Runtime::kNumberSub, 2);
}
if (!dst_.is(eax)) __ mov(dst_, eax);
__ pop(old_);
}
void CodeGenerator::VisitCountOperation(CountOperation* node) { void CodeGenerator::VisitCountOperation(CountOperation* node) {
...@@ -5131,96 +5191,93 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { ...@@ -5131,96 +5191,93 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
Variable* var = node->expression()->AsVariableProxy()->AsVariable(); Variable* var = node->expression()->AsVariableProxy()->AsVariable();
bool is_const = (var != NULL && var->mode() == Variable::CONST); bool is_const = (var != NULL && var->mode() == Variable::CONST);
// Postfix operators need a stack slot under the reference to hold // Postfix operations need a stack slot under the reference to hold
// the old value while the new one is being stored. // the old value while the new value is being stored. This is so that
if (is_postfix) { // in the case that storing the new value requires a call, the old
frame_->Push(Smi::FromInt(0)); // value will be in the frame to be spilled.
} if (is_postfix) frame_->Push(Smi::FromInt(0));
{ Reference target(this, node->expression()); { Reference target(this, node->expression());
if (target.is_illegal()) { if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher // Spoof the virtual frame to have the expected height (one higher
// than on entry). // than on entry).
if (!is_postfix) { if (!is_postfix) frame_->Push(Smi::FromInt(0));
frame_->Push(Smi::FromInt(0));
}
return; return;
} }
target.TakeValue(NOT_INSIDE_TYPEOF); target.TakeValue(NOT_INSIDE_TYPEOF);
DeferredCountOperation* deferred = Result new_value = frame_->Pop();
new DeferredCountOperation(is_postfix, is_increment, target.size()); new_value.ToRegister();
Result value = frame_->Pop(); Result old_value; // Only allocated in the postfix case.
value.ToRegister();
// Postfix: Store the old value as the result.
if (is_postfix) { if (is_postfix) {
// Explicitly back the slot for the old value with a new register. // Allocate a temporary to preserve the old value.
// This improves performance in some cases. old_value = allocator_->Allocate();
Result old_value = allocator_->Allocate();
ASSERT(old_value.is_valid()); ASSERT(old_value.is_valid());
__ mov(old_value.reg(), value.reg()); __ mov(old_value.reg(), new_value.reg());
// SetElement must not create a constant element or a copy in this slot,
// since we will write to it, below the waterline, in deferred code.
frame_->SetElementAt(target.size(), &old_value);
} }
// Ensure the new value is writable.
frame_->Spill(new_value.reg());
// Perform optimistic increment/decrement. Ensure the value is // In order to combine the overflow and the smi tag check, we need
// writable. // to be able to allocate a byte register. We attempt to do so
frame_->Spill(value.reg()); // without spilling. If we fail, we will generate separate overflow
ASSERT(allocator_->count(value.reg()) == 1); // and smi tag checks.
// In order to combine the overflow and the smi check, we need to
// be able to allocate a byte register. We attempt to do so
// without spilling. If we fail, we will generate separate
// overflow and smi checks.
// //
// We need to allocate and clear the temporary byte register // We allocate and clear the temporary byte register before
// before performing the count operation since clearing the // performing the count operation since clearing the register using
// register using xor will clear the overflow flag. // xor will clear the overflow flag.
Result tmp = allocator_->AllocateByteRegisterWithoutSpilling(); Result tmp = allocator_->AllocateByteRegisterWithoutSpilling();
if (tmp.is_valid()) { if (tmp.is_valid()) {
__ Set(tmp.reg(), Immediate(0)); __ Set(tmp.reg(), Immediate(0));
} }
DeferredCode* deferred = NULL;
if (is_postfix) {
deferred = new DeferredPostfixCountOperation(new_value.reg(),
old_value.reg(),
is_increment);
} else {
deferred = new DeferredPrefixCountOperation(new_value.reg(),
is_increment);
}
if (is_increment) { if (is_increment) {
__ add(Operand(value.reg()), Immediate(Smi::FromInt(1))); __ add(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
} else { } else {
__ sub(Operand(value.reg()), Immediate(Smi::FromInt(1))); __ sub(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
} }
// If the count operation didn't overflow and the result is a // If the count operation didn't overflow and the result is a valid
// valid smi, we're done. Otherwise, we jump to the deferred // smi, we're done. Otherwise, we jump to the deferred slow-case
// slow-case code. // code.
//
// We combine the overflow and the smi check if we could
// successfully allocate a temporary byte register.
if (tmp.is_valid()) { if (tmp.is_valid()) {
// We combine the overflow and the smi tag check if we could
// successfully allocate a temporary byte register.
__ setcc(overflow, tmp.reg()); __ setcc(overflow, tmp.reg());
__ or_(Operand(tmp.reg()), value.reg()); __ or_(Operand(tmp.reg()), new_value.reg());
__ test(tmp.reg(), Immediate(kSmiTagMask)); __ test(tmp.reg(), Immediate(kSmiTagMask));
tmp.Unuse(); tmp.Unuse();
deferred->enter()->Branch(not_zero, &value, not_taken); deferred->Branch(not_zero);
} else { // Otherwise we test separately for overflow and smi check. } else {
deferred->SetEntryFrame(&value); // Otherwise we test separately for overflow and smi tag.
deferred->enter()->Branch(overflow, &value, not_taken); deferred->Branch(overflow);
__ test(value.reg(), Immediate(kSmiTagMask)); __ test(new_value.reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(not_zero, &value, not_taken); deferred->Branch(not_zero);
} }
deferred->BindExit();
// Store the new value in the target if not const. // Postfix: store the old value in the allocated slot under the
deferred->BindExit(&value); // reference.
frame_->Push(&value); if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
if (!is_const) {
target.SetValue(NOT_CONST_INIT);
}
}
// Postfix: Discard the new value and use the old. frame_->Push(&new_value);
if (is_postfix) { // Non-constant: update the reference.
frame_->Drop(); if (!is_const) target.SetValue(NOT_CONST_INIT);
} }
// Postfix: drop the new value and use the old.
if (is_postfix) frame_->Drop();
} }
...@@ -5571,9 +5628,14 @@ bool CodeGenerator::HasValidEntryRegisters() { ...@@ -5571,9 +5628,14 @@ bool CodeGenerator::HasValidEntryRegisters() {
#endif #endif
// Emit a LoadIC call to get the value from receiver and leave it in
// dst. The receiver register is restored after the call.
class DeferredReferenceGetNamedValue: public DeferredCode { class DeferredReferenceGetNamedValue: public DeferredCode {
public: public:
explicit DeferredReferenceGetNamedValue(Handle<String> name) : name_(name) { DeferredReferenceGetNamedValue(Register dst,
Register receiver,
Handle<String> name)
: dst_(dst), receiver_(receiver), name_(name) {
set_comment("[ DeferredReferenceGetNamedValue"); set_comment("[ DeferredReferenceGetNamedValue");
} }
...@@ -5583,14 +5645,41 @@ class DeferredReferenceGetNamedValue: public DeferredCode { ...@@ -5583,14 +5645,41 @@ class DeferredReferenceGetNamedValue: public DeferredCode {
private: private:
Label patch_site_; Label patch_site_;
Register dst_;
Register receiver_;
Handle<String> name_; Handle<String> name_;
}; };
void DeferredReferenceGetNamedValue::Generate() {
__ push(receiver_);
__ Set(ecx, Immediate(name_));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a test eax instruction to indicate
// that the inobject property case was inlined.
//
// Store the delta to the map check instruction here in the test
// instruction. Use masm_-> instead of the __ macro since the
// latter can't return a value.
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
// Here we use masm_-> instead of the __ macro because this is the
// instruction that gets patched and coverage code gets in the way.
masm_->test(eax, Immediate(-delta_to_patch_site));
__ IncrementCounter(&Counters::named_load_inline_miss, 1);
if (!dst_.is(eax)) __ mov(dst_, eax);
__ pop(receiver_);
}
class DeferredReferenceGetKeyedValue: public DeferredCode { class DeferredReferenceGetKeyedValue: public DeferredCode {
public: public:
explicit DeferredReferenceGetKeyedValue(bool is_global) explicit DeferredReferenceGetKeyedValue(Register dst,
: is_global_(is_global) { Register receiver,
Register key,
bool is_global)
: dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
set_comment("[ DeferredReferenceGetKeyedValue"); set_comment("[ DeferredReferenceGetKeyedValue");
} }
...@@ -5600,45 +5689,16 @@ class DeferredReferenceGetKeyedValue: public DeferredCode { ...@@ -5600,45 +5689,16 @@ class DeferredReferenceGetKeyedValue: public DeferredCode {
private: private:
Label patch_site_; Label patch_site_;
Register dst_;
Register receiver_;
Register key_;
bool is_global_; bool is_global_;
}; };
#undef __
#define __ ACCESS_MASM(cgen()->masm())
void DeferredReferenceGetNamedValue::Generate() {
Result receiver;
enter()->Bind(&receiver);
cgen()->frame()->Push(&receiver);
cgen()->frame()->Push(name_);
Result answer = cgen()->frame()->CallLoadIC(RelocInfo::CODE_TARGET);
// The call must be followed by a test eax instruction to indicate
// that the inobject property case was inlined.
ASSERT(answer.is_register() && answer.reg().is(eax));
// Store the delta to the map check instruction here in the test
// instruction. Use cgen()->masm()-> instead of the __ macro since
// the latter can't return a value.
int delta_to_patch_site =
cgen()->masm()->SizeOfCodeGeneratedSince(patch_site());
// Here we use cgen()->masm()-> instead of the __ macro because this
// is the instruction that gets patched and coverage code gets in the
// way.
cgen()->masm()->test(answer.reg(), Immediate(-delta_to_patch_site));
__ IncrementCounter(&Counters::named_load_inline_miss, 1);
receiver = cgen()->frame()->Pop();
exit_.Jump(&receiver, &answer);
}
void DeferredReferenceGetKeyedValue::Generate() { void DeferredReferenceGetKeyedValue::Generate() {
Result receiver; __ push(receiver_); // First IC argument.
Result key; __ push(key_); // Second IC argument.
enter()->Bind(&receiver, &key);
cgen()->frame()->Push(&receiver); // First IC argument.
cgen()->frame()->Push(&key); // Second IC argument.
// Calculate the delta from the IC call instruction to the map check // Calculate the delta from the IC call instruction to the map check
// cmp instruction in the inlined version. This delta is stored in // cmp instruction in the inlined version. This delta is stored in
...@@ -5646,34 +5706,25 @@ void DeferredReferenceGetKeyedValue::Generate() { ...@@ -5646,34 +5706,25 @@ void DeferredReferenceGetKeyedValue::Generate() {
// it in the IC initialization code and patch the cmp instruction. // it in the IC initialization code and patch the cmp instruction.
// This means that we cannot allow test instructions after calls to // This means that we cannot allow test instructions after calls to
// KeyedLoadIC stubs in other places. // KeyedLoadIC stubs in other places.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
RelocInfo::Mode mode = is_global_ RelocInfo::Mode mode = is_global_
? RelocInfo::CODE_TARGET_CONTEXT ? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET; : RelocInfo::CODE_TARGET;
Result value = cgen()->frame()->CallKeyedLoadIC(mode); __ call(ic, mode);
// The result needs to be specifically the eax register because the // The delta from the start of the map-compare instruction to the
// offset to the patch site will be expected in a test eax // test instruction. We use masm_-> directly here instead of the __
// instruction. // macro because the macro sometimes uses macro expansion to turn
ASSERT(value.is_register() && value.reg().is(eax)); // into something that can't return a value. This is encountered
// The delta from the start of the map-compare instruction to the test // when doing generated code coverage tests.
// instruction. We use cgen()->masm() directly here instead of the __ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
// macro because the macro sometimes uses macro expansion to turn into // Here we use masm_-> instead of the __ macro because this is the
// something that can't return a value. This is encountered when // instruction that gets patched and coverage code gets in the way.
// doing generated code coverage tests. masm_->test(eax, Immediate(-delta_to_patch_site));
int delta_to_patch_site =
cgen()->masm()->SizeOfCodeGeneratedSince(patch_site());
// Here we use cgen()->masm()-> instead of the __ macro because this
// is the instruction that gets patched and coverage code gets in the
// way.
cgen()->masm()->test(value.reg(), Immediate(-delta_to_patch_site));
__ IncrementCounter(&Counters::keyed_load_inline_miss, 1); __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
// The receiver and key were spilled by the call, so their state as if (!dst_.is(eax)) __ mov(dst_, eax);
// constants or copies has been changed. Thus, they need to be __ pop(key_);
// "mergable" in the block at the exit label and are therefore __ pop(receiver_);
// passed as return results here.
key = cgen()->frame()->Pop();
receiver = cgen()->frame()->Pop();
exit_.Jump(&receiver, &key, &value);
} }
...@@ -5744,29 +5795,19 @@ void Reference::GetValue(TypeofState typeof_state) { ...@@ -5744,29 +5795,19 @@ void Reference::GetValue(TypeofState typeof_state) {
} else { } else {
// Inline the inobject property case. // Inline the inobject property case.
Comment cmnt(masm, "[ Inlined named property load"); Comment cmnt(masm, "[ Inlined named property load");
DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(GetName());
Result receiver = cgen_->frame()->Pop(); Result receiver = cgen_->frame()->Pop();
receiver.ToRegister(); receiver.ToRegister();
// Try to preallocate the value register so that all frames Result value = cgen_->allocator()->Allocate();
// reaching the deferred code are identical. ASSERT(value.is_valid());
Result value = cgen_->allocator()->AllocateWithoutSpilling(); DeferredReferenceGetNamedValue* deferred =
if (value.is_valid()) { new DeferredReferenceGetNamedValue(value.reg(),
deferred->SetEntryFrame(&receiver); receiver.reg(),
} GetName());
// Check that the receiver is a heap object. // Check that the receiver is a heap object.
__ test(receiver.reg(), Immediate(kSmiTagMask)); __ test(receiver.reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(zero, &receiver, not_taken); deferred->Branch(zero);
// Do not allocate the value register after binding the patch
// site label. The distance from the patch site to the offset
// must be constant.
if (!value.is_valid()) {
value = cgen_->allocator()->Allocate();
ASSERT(value.is_valid());
}
__ bind(deferred->patch_site()); __ bind(deferred->patch_site());
// This is the map check instruction that will be patched (so we can't // This is the map check instruction that will be patched (so we can't
...@@ -5776,7 +5817,7 @@ void Reference::GetValue(TypeofState typeof_state) { ...@@ -5776,7 +5817,7 @@ void Reference::GetValue(TypeofState typeof_state) {
Immediate(Factory::null_value())); Immediate(Factory::null_value()));
// This branch is always a forwards branch so it's always a fixed // This branch is always a forwards branch so it's always a fixed
// size which allows the assert below to succeed and patching to work. // size which allows the assert below to succeed and patching to work.
deferred->enter()->Branch(not_equal, &receiver, not_taken); deferred->Branch(not_equal);
// The delta from the patch label to the load offset must be // The delta from the patch label to the load offset must be
// statically known. // statically known.
...@@ -5789,7 +5830,7 @@ void Reference::GetValue(TypeofState typeof_state) { ...@@ -5789,7 +5830,7 @@ void Reference::GetValue(TypeofState typeof_state) {
masm->mov(value.reg(), FieldOperand(receiver.reg(), offset)); masm->mov(value.reg(), FieldOperand(receiver.reg(), offset));
__ IncrementCounter(&Counters::named_load_inline, 1); __ IncrementCounter(&Counters::named_load_inline, 1);
deferred->BindExit(&receiver, &value); deferred->BindExit();
cgen_->frame()->Push(&receiver); cgen_->frame()->Push(&receiver);
cgen_->frame()->Push(&value); cgen_->frame()->Push(&value);
} }
...@@ -5809,28 +5850,34 @@ void Reference::GetValue(TypeofState typeof_state) { ...@@ -5809,28 +5850,34 @@ void Reference::GetValue(TypeofState typeof_state) {
// patch the map check if appropriate. // patch the map check if appropriate.
if (cgen_->loop_nesting() > 0) { if (cgen_->loop_nesting() > 0) {
Comment cmnt(masm, "[ Inlined array index load"); Comment cmnt(masm, "[ Inlined array index load");
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue(is_global);
Result key = cgen_->frame()->Pop(); Result key = cgen_->frame()->Pop();
Result receiver = cgen_->frame()->Pop(); Result receiver = cgen_->frame()->Pop();
key.ToRegister(); key.ToRegister();
receiver.ToRegister(); receiver.ToRegister();
// Try to preallocate the elements and index scratch registers // Use a fresh temporary to load the elements without destroying
// so that all frames reaching the deferred code are identical. // the receiver which is needed for the deferred slow case.
Result elements = cgen_->allocator()->AllocateWithoutSpilling(); Result elements = cgen_->allocator()->Allocate();
Result index = cgen_->allocator()->AllocateWithoutSpilling(); ASSERT(elements.is_valid());
if (elements.is_valid() && index.is_valid()) {
deferred->SetEntryFrame(&receiver, &key); // Use a fresh temporary for the index and later the loaded
} // value.
Result index = cgen_->allocator()->Allocate();
ASSERT(index.is_valid());
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue(index.reg(),
receiver.reg(),
key.reg(),
is_global);
// Check that the receiver is not a smi (only needed if this // Check that the receiver is not a smi (only needed if this
// is not a load from the global context) and that it has the // is not a load from the global context) and that it has the
// expected map. // expected map.
if (!is_global) { if (!is_global) {
__ test(receiver.reg(), Immediate(kSmiTagMask)); __ test(receiver.reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(zero, &receiver, &key, not_taken); deferred->Branch(zero);
} }
// Initially, use an invalid map. The map is patched in the IC // Initially, use an invalid map. The map is patched in the IC
...@@ -5839,36 +5886,28 @@ void Reference::GetValue(TypeofState typeof_state) { ...@@ -5839,36 +5886,28 @@ void Reference::GetValue(TypeofState typeof_state) {
// Use masm-> here instead of the double underscore macro since extra // Use masm-> here instead of the double underscore macro since extra
// coverage code can interfere with the patching. // coverage code can interfere with the patching.
masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset), masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
Immediate(Factory::null_value())); Immediate(Factory::null_value()));
deferred->enter()->Branch(not_equal, &receiver, &key, not_taken); deferred->Branch(not_equal);
// Check that the key is a smi. // Check that the key is a smi.
__ test(key.reg(), Immediate(kSmiTagMask)); __ test(key.reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(not_zero, &receiver, &key, not_taken); deferred->Branch(not_zero);
// Get the elements array from the receiver and check that it // Get the elements array from the receiver and check that it
// is not a dictionary. // is not a dictionary.
if (!elements.is_valid()) {
elements = cgen_->allocator()->Allocate();
ASSERT(elements.is_valid());
}
__ mov(elements.reg(), __ mov(elements.reg(),
FieldOperand(receiver.reg(), JSObject::kElementsOffset)); FieldOperand(receiver.reg(), JSObject::kElementsOffset));
__ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset), __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
Immediate(Factory::hash_table_map())); Immediate(Factory::hash_table_map()));
deferred->enter()->Branch(equal, &receiver, &key, not_taken); deferred->Branch(equal);
// Shift the key to get the actual index value and check that // Shift the key to get the actual index value and check that
// it is within bounds. // it is within bounds.
if (!index.is_valid()) {
index = cgen_->allocator()->Allocate();
ASSERT(index.is_valid());
}
__ mov(index.reg(), key.reg()); __ mov(index.reg(), key.reg());
__ sar(index.reg(), kSmiTagSize); __ sar(index.reg(), kSmiTagSize);
__ cmp(index.reg(), __ cmp(index.reg(),
FieldOperand(elements.reg(), Array::kLengthOffset)); FieldOperand(elements.reg(), Array::kLengthOffset));
deferred->enter()->Branch(above_equal, &receiver, &key, not_taken); deferred->Branch(above_equal);
// Load and check that the result is not the hole. We could // Load and check that the result is not the hole. We could
// reuse the index or elements register for the value. // reuse the index or elements register for the value.
...@@ -5885,12 +5924,12 @@ void Reference::GetValue(TypeofState typeof_state) { ...@@ -5885,12 +5924,12 @@ void Reference::GetValue(TypeofState typeof_state) {
elements.Unuse(); elements.Unuse();
index.Unuse(); index.Unuse();
__ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value())); __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
deferred->enter()->Branch(equal, &receiver, &key, not_taken); deferred->Branch(equal);
__ IncrementCounter(&Counters::keyed_load_inline, 1); __ IncrementCounter(&Counters::keyed_load_inline, 1);
deferred->BindExit();
// Restore the receiver and key to the frame and push the // Restore the receiver and key to the frame and push the
// result on top of it. // result on top of it.
deferred->BindExit(&receiver, &key, &value);
cgen_->frame()->Push(&receiver); cgen_->frame()->Push(&receiver);
cgen_->frame()->Push(&key); cgen_->frame()->Push(&key);
cgen_->frame()->Push(&value); cgen_->frame()->Push(&value);
......
...@@ -545,6 +545,8 @@ class VirtualFrame : public ZoneObject { ...@@ -545,6 +545,8 @@ class VirtualFrame : public ZoneObject {
bool Equals(VirtualFrame* other); bool Equals(VirtualFrame* other);
// Classes that need raw access to the elements_ array.
friend class DeferredCode;
friend class JumpTarget; friend class JumpTarget;
}; };
......
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_X64_CODEGEN_X64_INL_H_
#define V8_X64_CODEGEN_X64_INL_H_
namespace v8 {
namespace internal {
// Platform-specific inline functions.
void DeferredCode::Jump() { UNIMPLEMENTED(); }
void DeferredCode::Branch(Condition cc) { UNIMPLEMENTED(); }
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_INL_H_
...@@ -545,6 +545,8 @@ class VirtualFrame : public ZoneObject { ...@@ -545,6 +545,8 @@ class VirtualFrame : public ZoneObject {
bool Equals(VirtualFrame* other); bool Equals(VirtualFrame* other);
// Classes that need raw access to the elements_ array.
friend class DeferredCode;
friend class JumpTarget; friend class JumpTarget;
}; };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment