Commit fa033477 authored by lrn@chromium.org's avatar lrn@chromium.org

X64: Count operations (increment, decrement)

Review URL: http://codereview.chromium.org/149002


git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2273 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent a5331d64
...@@ -82,8 +82,6 @@ struct Register { ...@@ -82,8 +82,6 @@ struct Register {
} }
bool is_valid() const { return 0 <= code_ && code_ < 16; } bool is_valid() const { return 0 <= code_ && code_ < 16; }
bool is(Register reg) const { return code_ == reg.code_; } bool is(Register reg) const { return code_ == reg.code_; }
// The byte-register distinction of ai32 has dissapeared.
bool is_byte_register() const { return false; }
int code() const { int code() const {
ASSERT(is_valid()); ASSERT(is_valid());
return code_; return code_;
......
...@@ -1951,10 +1951,185 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { ...@@ -1951,10 +1951,185 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
} }
void CodeGenerator::VisitCountOperation(CountOperation* a) { // The value in dst was optimistically incremented or decremented. The
UNIMPLEMENTED(); // result overflowed or was not smi tagged. Undo the operation, call
// into the runtime to convert the argument to a number, and call the
// specialized add or subtract stub. The result is left in dst.
class DeferredPrefixCountOperation: public DeferredCode {
public:
DeferredPrefixCountOperation(Register dst, bool is_increment)
: dst_(dst), is_increment_(is_increment) {
set_comment("[ DeferredCountOperation");
}
virtual void Generate();
private:
Register dst_;
bool is_increment_;
};
void DeferredPrefixCountOperation::Generate() {
// Undo the optimistic smi operation.
if (is_increment_) {
__ subq(dst_, Immediate(Smi::FromInt(1)));
} else {
__ addq(dst_, Immediate(Smi::FromInt(1)));
}
__ push(dst_);
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
__ push(rax);
__ push(Immediate(Smi::FromInt(1)));
if (is_increment_) {
__ CallRuntime(Runtime::kNumberAdd, 2);
} else {
__ CallRuntime(Runtime::kNumberSub, 2);
}
if (!dst_.is(rax)) __ movq(dst_, rax);
}
// The value in dst was optimistically incremented or decremented. The
// result overflowed or was not smi tagged. Undo the operation and call
// into the runtime to convert the argument to a number. Update the
// original value in old. Call the specialized add or subtract stub.
// The result is left in dst.
class DeferredPostfixCountOperation: public DeferredCode {
public:
DeferredPostfixCountOperation(Register dst, Register old, bool is_increment)
: dst_(dst), old_(old), is_increment_(is_increment) {
set_comment("[ DeferredCountOperation");
}
virtual void Generate();
private:
Register dst_;
Register old_;
bool is_increment_;
};
void DeferredPostfixCountOperation::Generate() {
// Undo the optimistic smi operation.
if (is_increment_) {
__ subq(dst_, Immediate(Smi::FromInt(1)));
} else {
__ addq(dst_, Immediate(Smi::FromInt(1)));
}
__ push(dst_);
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
// Save the result of ToNumber to use as the old value.
__ push(rax);
// Call the runtime for the addition or subtraction.
__ push(rax);
__ push(Immediate(Smi::FromInt(1)));
if (is_increment_) {
__ CallRuntime(Runtime::kNumberAdd, 2);
} else {
__ CallRuntime(Runtime::kNumberSub, 2);
}
if (!dst_.is(rax)) __ movq(dst_, rax);
__ pop(old_);
}
void CodeGenerator::VisitCountOperation(CountOperation* node) {
Comment cmnt(masm_, "[ CountOperation");
bool is_postfix = node->is_postfix();
bool is_increment = node->op() == Token::INC;
Variable* var = node->expression()->AsVariableProxy()->AsVariable();
bool is_const = (var != NULL && var->mode() == Variable::CONST);
// Postfix operations need a stack slot under the reference to hold
// the old value while the new value is being stored. This is so that
// in the case that storing the new value requires a call, the old
// value will be in the frame to be spilled.
if (is_postfix) frame_->Push(Smi::FromInt(0));
{ Reference target(this, node->expression());
if (target.is_illegal()) {
// Spoof the virtual frame to have the expected height (one higher
// than on entry).
if (!is_postfix) frame_->Push(Smi::FromInt(0));
return;
}
target.TakeValue(NOT_INSIDE_TYPEOF);
Result new_value = frame_->Pop();
new_value.ToRegister();
Result old_value; // Only allocated in the postfix case.
if (is_postfix) {
// Allocate a temporary to preserve the old value.
old_value = allocator_->Allocate();
ASSERT(old_value.is_valid());
__ movq(old_value.reg(), new_value.reg());
}
// Ensure the new value is writable.
frame_->Spill(new_value.reg());
// In order to combine the overflow and the smi tag check, we need
// to be able to allocate a byte register. We attempt to do so
// without spilling. If we fail, we will generate separate overflow
// and smi tag checks.
//
// We allocate and clear the temporary register before
// performing the count operation since clearing the register using
// xor will clear the overflow flag.
Result tmp = allocator_->AllocateWithoutSpilling();
// Clear scratch register to prepare it for setcc after the operation below.
__ xor_(kScratchRegister, kScratchRegister);
DeferredCode* deferred = NULL;
if (is_postfix) {
deferred = new DeferredPostfixCountOperation(new_value.reg(),
old_value.reg(),
is_increment);
} else {
deferred = new DeferredPrefixCountOperation(new_value.reg(),
is_increment);
}
if (is_increment) {
__ addq(new_value.reg(), Immediate(Smi::FromInt(1)));
} else {
__ subq(new_value.reg(), Immediate(Smi::FromInt(1)));
}
// If the count operation didn't overflow and the result is a valid
// smi, we're done. Otherwise, we jump to the deferred slow-case
// code.
// We combine the overflow and the smi tag check.
__ setcc(overflow, kScratchRegister);
__ or_(kScratchRegister, new_value.reg());
__ testl(kScratchRegister, Immediate(kSmiTagMask));
tmp.Unuse();
deferred->Branch(not_zero);
deferred->BindExit();
// Postfix: store the old value in the allocated slot under the
// reference.
if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
frame_->Push(&new_value);
// Non-constant: update the reference.
if (!is_const) target.SetValue(NOT_CONST_INIT);
}
// Postfix: drop the new value and use the old.
if (is_postfix) frame_->Drop();
} }
void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// TODO(X64): This code was copied verbatim from codegen-ia32. // TODO(X64): This code was copied verbatim from codegen-ia32.
// Either find a reason to change it or move it to a shared location. // Either find a reason to change it or move it to a shared location.
...@@ -4022,6 +4197,39 @@ void Reference::GetValue(TypeofState typeof_state) { ...@@ -4022,6 +4197,39 @@ void Reference::GetValue(TypeofState typeof_state) {
} }
void Reference::TakeValue(TypeofState typeof_state) {
// TODO(X64): This function is completely architecture independent. Move
// it somewhere shared.
// For non-constant frame-allocated slots, we invalidate the value in the
// slot. For all others, we fall back on GetValue.
ASSERT(!cgen_->in_spilled_code());
ASSERT(!is_illegal());
if (type_ != SLOT) {
GetValue(typeof_state);
return;
}
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
if (slot->type() == Slot::LOOKUP ||
slot->type() == Slot::CONTEXT ||
slot->var()->mode() == Variable::CONST) {
GetValue(typeof_state);
return;
}
// Only non-constant, frame-allocated parameters and locals can reach
// here.
if (slot->type() == Slot::PARAMETER) {
cgen_->frame()->TakeParameterAt(slot->index());
} else {
ASSERT(slot->type() == Slot::LOCAL);
cgen_->frame()->TakeLocalAt(slot->index());
}
}
void Reference::SetValue(InitState init_state) { void Reference::SetValue(InitState init_state) {
ASSERT(cgen_->HasValidEntryRegisters()); ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal()); ASSERT(!is_illegal());
......
...@@ -71,4 +71,14 @@ void Result::ToRegister(Register target) { ...@@ -71,4 +71,14 @@ void Result::ToRegister(Register target) {
} }
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
// This function is not used in 64-bit code.
UNREACHABLE();
return Result();
}
} } // namespace v8::internal } } // namespace v8::internal
...@@ -284,6 +284,45 @@ int VirtualFrame::InvalidateFrameSlotAt(int index) { ...@@ -284,6 +284,45 @@ int VirtualFrame::InvalidateFrameSlotAt(int index) {
} }
void VirtualFrame::TakeFrameSlotAt(int index) {
ASSERT(index >= 0);
ASSERT(index <= element_count());
FrameElement original = elements_[index];
int new_backing_store_index = InvalidateFrameSlotAt(index);
if (new_backing_store_index != kIllegalIndex) {
elements_.Add(CopyElementAt(new_backing_store_index));
return;
}
switch (original.type()) {
case FrameElement::MEMORY: {
// Emit code to load the original element's data into a register.
// Push that register as a FrameElement on top of the frame.
Result fresh = cgen()->allocator()->Allocate();
ASSERT(fresh.is_valid());
FrameElement new_element =
FrameElement::RegisterElement(fresh.reg(),
FrameElement::NOT_SYNCED);
Use(fresh.reg(), element_count());
elements_.Add(new_element);
__ movq(fresh.reg(), Operand(rbp, fp_relative(index)));
break;
}
case FrameElement::REGISTER:
Use(original.reg(), element_count());
// Fall through.
case FrameElement::CONSTANT:
case FrameElement::COPY:
original.clear_sync();
elements_.Add(original);
break;
case FrameElement::INVALID:
UNREACHABLE();
break;
}
}
void VirtualFrame::StoreToFrameSlotAt(int index) { void VirtualFrame::StoreToFrameSlotAt(int index) {
// Store the value on top of the frame to the virtual frame slot at // Store the value on top of the frame to the virtual frame slot at
// a given index. The value on top of the frame is left in place. // a given index. The value on top of the frame is left in place.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment