Commit 1f9fef3d authored by whesse@chromium.org's avatar whesse@chromium.org

Add inlined code for (constant SHL smi), ported from ia32 to x64. Improve...

Add inlined code for (constant SHL smi), ported from ia32 to x64.  Improve type information on ia32 version of code.
Review URL: http://codereview.chromium.org/1869001

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4571 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent b685df1c
......@@ -2041,8 +2041,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
if (!operand->type_info().IsSmi()) {
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(operand->reg());
} else if (FLAG_debug_code) {
__ AbortIfNotSmi(operand->reg());
}
deferred->BindExit();
answer = *operand;
......@@ -2080,8 +2080,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
if (!operand->type_info().IsSmi()) {
__ test(answer.reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(operand->reg());
} else if (FLAG_debug_code) {
__ AbortIfNotSmi(operand->reg());
}
deferred->BindExit();
operand->Unuse();
......@@ -2115,7 +2115,9 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
}
deferred->BindExit();
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(operand->reg());
if (FLAG_debug_code) {
__ AbortIfNotSmi(operand->reg());
}
if (shift_value > 0) {
__ sar(operand->reg(), shift_value);
__ and_(operand->reg(), ~kSmiTagMask);
......@@ -2147,8 +2149,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
if (!operand->type_info().IsSmi()) {
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(operand->reg());
} else if (FLAG_debug_code) {
__ AbortIfNotSmi(operand->reg());
}
__ mov(answer.reg(), operand->reg());
__ SmiUntag(answer.reg());
......@@ -2166,12 +2168,12 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
case Token::SHL:
if (reversed) {
// Move operand into ecx and also into a second register.
// If operand is already in a register, take advantage of that.
// This lets us modify ecx, but still bail out to deferred code.
Result right;
Result right_copy_in_ecx;
// Make sure to get a copy of the right operand into ecx. This
// allows us to modify it without having to restore it in the
// deferred code.
TypeInfo right_type_info = operand->type_info();
operand->ToRegister();
if (operand->reg().is(ecx)) {
right = allocator()->Allocate();
......@@ -2191,14 +2193,14 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
answer.reg(),
smi_value,
right.reg(),
right.type_info(),
right_type_info,
overwrite_mode);
__ mov(answer.reg(), Immediate(int_value));
__ sar(ecx, kSmiTagSize);
if (!right.type_info().IsSmi()) {
deferred->Branch(carry);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(right.reg());
} else if (FLAG_debug_code) {
__ AbortIfNotSmi(right.reg());
}
__ shl_cl(answer.reg());
__ cmp(answer.reg(), 0xc0000000);
......@@ -2239,8 +2241,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
if (!operand->type_info().IsSmi()) {
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(operand->reg());
} else if (FLAG_debug_code) {
__ AbortIfNotSmi(operand->reg());
}
__ mov(answer.reg(), operand->reg());
ASSERT(kSmiTag == 0); // adjust code if not the case
......@@ -2283,8 +2285,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(
if (!operand->type_info().IsSmi()) {
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(operand->reg());
} else if (FLAG_debug_code) {
__ AbortIfNotSmi(operand->reg());
}
if (op == Token::BIT_AND) {
__ and_(Operand(operand->reg()), Immediate(value));
......
......@@ -193,6 +193,34 @@ class DeferredInlineSmiOperation: public DeferredCode {
};
// Call the appropriate binary operation stub to compute value op src
// and leave the result in dst.
class DeferredInlineSmiOperationReversed: public DeferredCode {
public:
DeferredInlineSmiOperationReversed(Token::Value op,
Register dst,
Smi* value,
Register src,
OverwriteMode overwrite_mode)
: op_(op),
dst_(dst),
value_(value),
src_(src),
overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlineSmiOperationReversed");
}
virtual void Generate();
private:
Token::Value op_;
Register dst_;
Smi* value_;
Register src_;
OverwriteMode overwrite_mode_;
};
class FloatingPointHelper : public AllStatic {
public:
// Code pattern for loading a floating point value. Input value must
......@@ -6363,6 +6391,16 @@ void DeferredInlineSmiOperation::Generate() {
}
void DeferredInlineSmiOperationReversed::Generate() {
GenericBinaryOpStub stub(
op_,
overwrite_mode_,
NO_SMI_CODE_IN_STUB);
stub.GenerateCall(masm_, value_, src_);
if (!dst_.is(rax)) __ movq(dst_, rax);
}
Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
Result* operand,
Handle<Object> value,
......@@ -6492,9 +6530,45 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
case Token::SHL:
if (reversed) {
Result constant_operand(value);
answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
overwrite_mode);
// Move operand into rcx and also into a second register.
// If operand is already in a register, take advantage of that.
// This lets us modify rcx, but still bail out to deferred code.
Result right;
Result right_copy_in_rcx;
TypeInfo right_type_info = operand->type_info();
operand->ToRegister();
if (operand->reg().is(rcx)) {
right = allocator()->Allocate();
__ movq(right.reg(), rcx);
frame_->Spill(rcx);
right_copy_in_rcx = *operand;
} else {
right_copy_in_rcx = allocator()->Allocate(rcx);
__ movq(rcx, operand->reg());
right = *operand;
}
operand->Unuse();
answer = allocator()->Allocate();
DeferredInlineSmiOperationReversed* deferred =
new DeferredInlineSmiOperationReversed(op,
answer.reg(),
smi_value,
right.reg(),
overwrite_mode);
__ movq(answer.reg(), Immediate(int_value));
__ SmiToInteger32(rcx, rcx);
if (!right.type_info().IsSmi()) {
Condition is_smi = masm_->CheckSmi(right.reg());
deferred->Branch(NegateCondition(is_smi));
} else if (FLAG_debug_code) {
__ AbortIfNotSmi(right.reg(),
"Static type info claims non-smi is smi in (const SHL smi).");
}
__ shl_cl(answer.reg());
__ Integer32ToSmi(answer.reg(), answer.reg());
deferred->BindExit();
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
......@@ -7201,10 +7275,8 @@ void Reference::SetValue(InitState init_state) {
// Check that the key is a smi.
if (!key.is_smi()) {
__ JumpIfNotSmi(key.reg(), deferred->entry_label());
} else {
if (FLAG_debug_code) {
__ AbortIfNotSmi(key.reg(), "Non-smi value in smi-typed value.");
}
} else if (FLAG_debug_code) {
__ AbortIfNotSmi(key.reg(), "Non-smi value in smi-typed value.");
}
// Check that the receiver is a JSArray.
......@@ -9961,12 +10033,10 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label not_floats;
// rax: y
// rdx: x
if (static_operands_type_.IsNumber()) {
if (FLAG_debug_code) {
// Assert at runtime that inputs are only numbers.
__ AbortIfNotNumber(rdx, "GenericBinaryOpStub operand not a number.");
__ AbortIfNotNumber(rax, "GenericBinaryOpStub operand not a number.");
}
if (static_operands_type_.IsNumber() && FLAG_debug_code) {
// Assert at runtime that inputs are only numbers.
__ AbortIfNotNumber(rdx, "GenericBinaryOpStub operand not a number.");
__ AbortIfNotNumber(rax, "GenericBinaryOpStub operand not a number.");
} else {
FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment