Commit fde714bb authored by whesse@chromium.org's avatar whesse@chromium.org

Revert change 4201.

Review URL: http://codereview.chromium.org/1113007

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4203 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 03089b66
...@@ -5523,22 +5523,22 @@ static void AllocateHeapNumber( ...@@ -5523,22 +5523,22 @@ static void AllocateHeapNumber(
// to call the C-implemented binary fp operation routines we need to end up // to call the C-implemented binary fp operation routines we need to end up
// with the double precision floating point operands in r0 and r1 (for the // with the double precision floating point operands in r0 and r1 (for the
// value in r1) and r2 and r3 (for the value in r0). // value in r1) and r2 and r3 (for the value in r0).
void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm, static void HandleBinaryOpSlowCases(MacroAssembler* masm,
Label* not_smi, Label* not_smi,
const Builtins::JavaScript& builtin) { const Builtins::JavaScript& builtin,
Token::Value operation,
OverwriteMode mode) {
Label slow, slow_pop_2_first, do_the_call; Label slow, slow_pop_2_first, do_the_call;
Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
// If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
// using registers d7 and d6 for the double values.
bool use_fp_registers = CpuFeatures::IsSupported(VFP3) &&
Token::MOD != op_;
if (ShouldGenerateSmiCode()) {
// Smi-smi case (overflow). // Smi-smi case (overflow).
// Since both are Smis there is no heap number to overwrite, so allocate. // Since both are Smis there is no heap number to overwrite, so allocate.
// The new heap number is in r5. r6 and r7 are scratch. // The new heap number is in r5. r6 and r7 are scratch.
AllocateHeapNumber(masm, &slow, r5, r6, r7); AllocateHeapNumber(masm, &slow, r5, r6, r7);
// If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
// using registers d7 and d6 for the double values.
bool use_fp_registers = CpuFeatures::IsSupported(VFP3) &&
Token::MOD != operation;
if (use_fp_registers) { if (use_fp_registers) {
CpuFeatures::Scope scope(VFP3); CpuFeatures::Scope scope(VFP3);
__ mov(r7, Operand(r0, ASR, kSmiTagSize)); __ mov(r7, Operand(r0, ASR, kSmiTagSize));
...@@ -5561,27 +5561,71 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm, ...@@ -5561,27 +5561,71 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm,
} }
__ jmp(&do_the_call); // Tail call. No return. __ jmp(&do_the_call); // Tail call. No return.
}
// We branch here if at least one of r0 and r1 is not a Smi. // We jump to here if something goes wrong (one param is not a number of any
__ bind(not_smi); // sort or new-space allocation fails).
__ bind(&slow);
if (ShouldGenerateFPCode()) { // Push arguments to the stack
if (runtime_operands_type_ == BinaryOpIC::DEFAULT) { __ push(r1);
switch (op_) { __ push(r0);
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV:
GenerateTypeTransition(masm);
break;
default: if (Token::ADD == operation) {
break; // Test for string arguments before calling runtime.
} // r1 : first argument
// r0 : second argument
// sp[0] : second argument
// sp[4] : first argument
Label not_strings, not_string1, string1, string1_smi2;
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &not_string1);
__ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &not_string1);
// First argument is a a string, test second.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &string1_smi2);
__ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &string1);
// First and second argument are strings.
StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
__ TailCallStub(&string_add_stub);
__ bind(&string1_smi2);
// First argument is a string, second is a smi. Try to lookup the number
// string for the smi in the number string cache.
NumberToStringStub::GenerateLookupNumberStringCache(
masm, r0, r2, r4, r5, true, &string1);
// Replace second argument on stack and tailcall string add stub to make
// the result.
__ str(r2, MemOperand(sp, 0));
__ TailCallStub(&string_add_stub);
// Only first argument is a string.
__ bind(&string1);
__ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
// First argument was not a string, test second.
__ bind(&not_string1);
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &not_strings);
__ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &not_strings);
// Only second argument is a string.
__ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
__ bind(&not_strings);
} }
if (mode_ == NO_OVERWRITE) { __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
// We branch here if at least one of r0 and r1 is not a Smi.
__ bind(not_smi);
if (mode == NO_OVERWRITE) {
// In the case where there is no chance of an overwritable float we may as // In the case where there is no chance of an overwritable float we may as
// well do the allocation immediately while r0 and r1 are untouched. // well do the allocation immediately while r0 and r1 are untouched.
AllocateHeapNumber(masm, &slow, r5, r6, r7); AllocateHeapNumber(masm, &slow, r5, r6, r7);
...@@ -5592,7 +5636,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm, ...@@ -5592,7 +5636,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm,
__ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(ne, &slow); __ b(ne, &slow);
if (mode_ == OVERWRITE_RIGHT) { if (mode == OVERWRITE_RIGHT) {
__ mov(r5, Operand(r0)); // Overwrite this heap number. __ mov(r5, Operand(r0)); // Overwrite this heap number.
} }
if (use_fp_registers) { if (use_fp_registers) {
...@@ -5607,7 +5651,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm, ...@@ -5607,7 +5651,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm,
} }
__ jmp(&finished_loading_r0); __ jmp(&finished_loading_r0);
__ bind(&r0_is_smi); __ bind(&r0_is_smi);
if (mode_ == OVERWRITE_RIGHT) { if (mode == OVERWRITE_RIGHT) {
// We can't overwrite a Smi so get address of new heap number into r5. // We can't overwrite a Smi so get address of new heap number into r5.
AllocateHeapNumber(masm, &slow, r5, r6, r7); AllocateHeapNumber(masm, &slow, r5, r6, r7);
} }
...@@ -5634,7 +5678,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm, ...@@ -5634,7 +5678,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm,
__ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
__ b(ne, &slow); __ b(ne, &slow);
if (mode_ == OVERWRITE_LEFT) { if (mode == OVERWRITE_LEFT) {
__ mov(r5, Operand(r1)); // Overwrite this heap number. __ mov(r5, Operand(r1)); // Overwrite this heap number.
} }
if (use_fp_registers) { if (use_fp_registers) {
...@@ -5649,7 +5693,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm, ...@@ -5649,7 +5693,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm,
} }
__ jmp(&finished_loading_r1); __ jmp(&finished_loading_r1);
__ bind(&r1_is_smi); __ bind(&r1_is_smi);
if (mode_ == OVERWRITE_LEFT) { if (mode == OVERWRITE_LEFT) {
// We can't overwrite a Smi so get address of new heap number into r5. // We can't overwrite a Smi so get address of new heap number into r5.
AllocateHeapNumber(masm, &slow, r5, r6, r7); AllocateHeapNumber(masm, &slow, r5, r6, r7);
} }
...@@ -5679,13 +5723,13 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm, ...@@ -5679,13 +5723,13 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm,
// ARMv7 VFP3 instructions to implement // ARMv7 VFP3 instructions to implement
// double precision, add, subtract, multiply, divide. // double precision, add, subtract, multiply, divide.
if (Token::MUL == op_) { if (Token::MUL == operation) {
__ vmul(d5, d6, d7); __ vmul(d5, d6, d7);
} else if (Token::DIV == op_) { } else if (Token::DIV == operation) {
__ vdiv(d5, d6, d7); __ vdiv(d5, d6, d7);
} else if (Token::ADD == op_) { } else if (Token::ADD == operation) {
__ vadd(d5, d6, d7); __ vadd(d5, d6, d7);
} else if (Token::SUB == op_) { } else if (Token::SUB == operation) {
__ vsub(d5, d6, d7); __ vsub(d5, d6, d7);
} else { } else {
UNREACHABLE(); UNREACHABLE();
...@@ -5694,7 +5738,9 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm, ...@@ -5694,7 +5738,9 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm,
__ vstr(d5, r0, HeapNumber::kValueOffset); __ vstr(d5, r0, HeapNumber::kValueOffset);
__ add(r0, r0, Operand(kHeapObjectTag)); __ add(r0, r0, Operand(kHeapObjectTag));
__ mov(pc, lr); __ mov(pc, lr);
} else { return;
}
// If we did not inline the operation, then the arguments are in: // If we did not inline the operation, then the arguments are in:
// r0: Left value (least significant part of mantissa). // r0: Left value (least significant part of mantissa).
// r1: Left value (sign, exponent, top of mantissa). // r1: Left value (sign, exponent, top of mantissa).
...@@ -5706,89 +5752,26 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm, ...@@ -5706,89 +5752,26 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm,
__ push(r5); // Address of heap number that is answer. __ push(r5); // Address of heap number that is answer.
__ AlignStack(0); __ AlignStack(0);
// Call C routine that may not cause GC or other trouble. // Call C routine that may not cause GC or other trouble.
__ mov(r5, Operand(ExternalReference::double_fp_operation(op_))); __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
__ Call(r5); __ Call(r5);
__ pop(r4); // Address of heap number. __ pop(r4); // Address of heap number.
__ cmp(r4, Operand(Smi::FromInt(0))); __ cmp(r4, Operand(Smi::FromInt(0)));
__ pop(r4, eq); // Conditional pop instruction __ pop(r4, eq); // Conditional pop instruction to get rid of alignment push.
// to get rid of alignment push.
// Store answer in the overwritable heap number. // Store answer in the overwritable heap number.
#if !defined(USE_ARM_EABI) #if !defined(USE_ARM_EABI)
// Double returned in fp coprocessor register 0 and 1, encoded as register // Double returned in fp coprocessor register 0 and 1, encoded as register
// cr8. Offsets must be divisible by 4 for coprocessor so we need to // cr8. Offsets must be divisible by 4 for coprocessor so we need to
// substract the tag from r4. // substract the tag from r4.
__ sub(r5, r4, Operand(kHeapObjectTag)); __ sub(r5, r4, Operand(kHeapObjectTag));
__ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset)); __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
#else #else
// Double returned in registers 0 and 1. // Double returned in registers 0 and 1.
__ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset)); __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
__ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4)); __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4));
#endif #endif
__ mov(r0, Operand(r4)); __ mov(r0, Operand(r4));
// And we are done. // And we are done.
__ pop(pc); __ pop(pc);
}
}
// We jump to here if something goes wrong (one param is not a number of any
// sort or new-space allocation fails).
__ bind(&slow);
// Push arguments to the stack
__ push(r1);
__ push(r0);
if (Token::ADD == op_) {
// Test for string arguments before calling runtime.
// r1 : first argument
// r0 : second argument
// sp[0] : second argument
// sp[4] : first argument
Label not_strings, not_string1, string1, string1_smi2;
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &not_string1);
__ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &not_string1);
// First argument is a a string, test second.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &string1_smi2);
__ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &string1);
// First and second argument are strings.
StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
__ TailCallStub(&string_add_stub);
__ bind(&string1_smi2);
// First argument is a string, second is a smi. Try to lookup the number
// string for the smi in the number string cache.
NumberToStringStub::GenerateLookupNumberStringCache(
masm, r0, r2, r4, r5, true, &string1);
// Replace second argument on stack and tailcall string add stub to make
// the result.
__ str(r2, MemOperand(sp, 0));
__ TailCallStub(&string_add_stub);
// Only first argument is a string.
__ bind(&string1);
__ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
// First argument was not a string, test second.
__ bind(&not_string1);
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &not_strings);
__ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &not_strings);
// Only second argument is a string.
__ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
__ bind(&not_strings);
}
__ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
} }
...@@ -6122,15 +6105,12 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { ...@@ -6122,15 +6105,12 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// All ops need to know whether we are dealing with two Smis. Set up r2 to // All ops need to know whether we are dealing with two Smis. Set up r2 to
// tell us that. // tell us that.
if (ShouldGenerateSmiCode()) {
__ orr(r2, r1, Operand(r0)); // r2 = x | y; __ orr(r2, r1, Operand(r0)); // r2 = x | y;
}
switch (op_) { switch (op_) {
case Token::ADD: { case Token::ADD: {
Label not_smi; Label not_smi;
// Fast path. // Fast path.
if (ShouldGenerateSmiCode()) {
ASSERT(kSmiTag == 0); // Adjust code below. ASSERT(kSmiTag == 0); // Adjust code below.
__ tst(r2, Operand(kSmiTagMask)); __ tst(r2, Operand(kSmiTagMask));
__ b(ne, &not_smi); __ b(ne, &not_smi);
...@@ -6138,15 +6118,18 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { ...@@ -6138,15 +6118,18 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// Return if no overflow. // Return if no overflow.
__ Ret(vc); __ Ret(vc);
__ sub(r0, r0, Operand(r1)); // Revert optimistic add. __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
}
HandleBinaryOpSlowCases(masm, &not_smi, Builtins::ADD); HandleBinaryOpSlowCases(masm,
&not_smi,
Builtins::ADD,
Token::ADD,
mode_);
break; break;
} }
case Token::SUB: { case Token::SUB: {
Label not_smi; Label not_smi;
// Fast path. // Fast path.
if (ShouldGenerateSmiCode()) {
ASSERT(kSmiTag == 0); // Adjust code below. ASSERT(kSmiTag == 0); // Adjust code below.
__ tst(r2, Operand(kSmiTagMask)); __ tst(r2, Operand(kSmiTagMask));
__ b(ne, &not_smi); __ b(ne, &not_smi);
...@@ -6154,14 +6137,17 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { ...@@ -6154,14 +6137,17 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// Return if no overflow. // Return if no overflow.
__ Ret(vc); __ Ret(vc);
__ sub(r0, r1, Operand(r0)); // Revert optimistic subtract. __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
}
HandleBinaryOpSlowCases(masm, &not_smi, Builtins::SUB); HandleBinaryOpSlowCases(masm,
&not_smi,
Builtins::SUB,
Token::SUB,
mode_);
break; break;
} }
case Token::MUL: { case Token::MUL: {
Label not_smi, slow; Label not_smi, slow;
if (ShouldGenerateSmiCode()) {
ASSERT(kSmiTag == 0); // adjust code below ASSERT(kSmiTag == 0); // adjust code below
__ tst(r2, Operand(kSmiTagMask)); __ tst(r2, Operand(kSmiTagMask));
__ b(ne, &not_smi); __ b(ne, &not_smi);
...@@ -6185,15 +6171,19 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { ...@@ -6185,15 +6171,19 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// Slow case. We fall through here if we multiplied a negative number // Slow case. We fall through here if we multiplied a negative number
// with 0, because that would mean we should produce -0. // with 0, because that would mean we should produce -0.
__ bind(&slow); __ bind(&slow);
}
HandleBinaryOpSlowCases(masm, &not_smi, Builtins::MUL); HandleBinaryOpSlowCases(masm,
&not_smi,
Builtins::MUL,
Token::MUL,
mode_);
break; break;
} }
case Token::DIV: case Token::DIV:
case Token::MOD: { case Token::MOD: {
Label not_smi; Label not_smi;
if (ShouldGenerateSmiCode()) { if (specialized_on_rhs_) {
Label smi_is_unsuitable; Label smi_is_unsuitable;
__ BranchOnNotSmi(r1, &not_smi); __ BranchOnNotSmi(r1, &not_smi);
if (IsPowerOf2(constant_rhs_)) { if (IsPowerOf2(constant_rhs_)) {
...@@ -6273,11 +6263,14 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { ...@@ -6273,11 +6263,14 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
} }
__ Ret(); __ Ret();
__ bind(&smi_is_unsuitable); __ bind(&smi_is_unsuitable);
} else {
__ jmp(&not_smi);
} }
HandleBinaryOpSlowCases( HandleBinaryOpSlowCases(masm,
masm,
&not_smi, &not_smi,
op_ == Token::MOD ? Builtins::MOD : Builtins::DIV); op_ == Token::MOD ? Builtins::MOD : Builtins::DIV,
op_,
mode_);
break; break;
} }
...@@ -6337,52 +6330,11 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { ...@@ -6337,52 +6330,11 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
} }
// This code should be unreachable. // This code should be unreachable.
__ stop("Unreachable"); __ stop("Unreachable");
// Generate an unreachable reference to the DEFAULT stub so that it can be
// found at the end of this stub when clearing ICs at GC.
// TODO(kaznacheev): Check performance impact and get rid of this.
if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
__ CallStub(&uninit);
}
}
void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
Label get_result;
__ push(r1);
__ push(r0);
// Internal frame is necessary to handle exceptions properly.
__ EnterInternalFrame();
// Call the stub proper to get the result in r0.
__ Call(&get_result);
__ LeaveInternalFrame();
__ push(r0);
__ mov(r0, Operand(Smi::FromInt(MinorKey())));
__ push(r0);
__ mov(r0, Operand(Smi::FromInt(op_)));
__ push(r0);
__ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
__ push(r0);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
6,
1);
// The entry point for the result calculation is assumed to be immediately
// after this sequence.
__ bind(&get_result);
} }
Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
GenericBinaryOpStub stub(key, type_info); return Handle<Code>::null();
return stub.GetCode();
} }
......
...@@ -28,8 +28,6 @@ ...@@ -28,8 +28,6 @@
#ifndef V8_ARM_CODEGEN_ARM_H_ #ifndef V8_ARM_CODEGEN_ARM_H_
#define V8_ARM_CODEGEN_ARM_H_ #define V8_ARM_CODEGEN_ARM_H_
#include "ic-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
...@@ -474,15 +472,6 @@ class GenericBinaryOpStub : public CodeStub { ...@@ -474,15 +472,6 @@ class GenericBinaryOpStub : public CodeStub {
mode_(mode), mode_(mode),
constant_rhs_(constant_rhs), constant_rhs_(constant_rhs),
specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)), specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
runtime_operands_type_(BinaryOpIC::DEFAULT),
name_(NULL) { }
GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
runtime_operands_type_(type_info),
name_(NULL) { } name_(NULL) { }
private: private:
...@@ -490,32 +479,25 @@ class GenericBinaryOpStub : public CodeStub { ...@@ -490,32 +479,25 @@ class GenericBinaryOpStub : public CodeStub {
OverwriteMode mode_; OverwriteMode mode_;
int constant_rhs_; int constant_rhs_;
bool specialized_on_rhs_; bool specialized_on_rhs_;
BinaryOpIC::TypeInfo runtime_operands_type_;
char* name_; char* name_;
static const int kMaxKnownRhs = 0x40000000; static const int kMaxKnownRhs = 0x40000000;
// Minor key encoding in 18 bits. // Minor key encoding in 16 bits.
class ModeBits: public BitField<OverwriteMode, 0, 2> {}; class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 6> {}; class OpBits: public BitField<Token::Value, 2, 6> {};
class KnownIntBits: public BitField<int, 8, 8> {}; class KnownIntBits: public BitField<int, 8, 8> {};
class TypeInfoBits: public BitField<int, 16, 2> {};
Major MajorKey() { return GenericBinaryOp; } Major MajorKey() { return GenericBinaryOp; }
int MinorKey() { int MinorKey() {
// Encode the parameters in a unique 18 bit value. // Encode the parameters in a unique 16 bit value.
return OpBits::encode(op_) return OpBits::encode(op_)
| ModeBits::encode(mode_) | ModeBits::encode(mode_)
| KnownIntBits::encode(MinorKeyForKnownInt()) | KnownIntBits::encode(MinorKeyForKnownInt());
| TypeInfoBits::encode(runtime_operands_type_);
} }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
void HandleNonSmiBitwiseOp(MacroAssembler* masm); void HandleNonSmiBitwiseOp(MacroAssembler* masm);
void HandleBinaryOpSlowCases(MacroAssembler* masm,
Label* not_smi,
const Builtins::JavaScript& builtin);
void GenerateTypeTransition(MacroAssembler* masm);
static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) { static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
if (constant_rhs == CodeGenerator::kUnknownIntValue) return false; if (constant_rhs == CodeGenerator::kUnknownIntValue) return false;
...@@ -542,33 +524,6 @@ class GenericBinaryOpStub : public CodeStub { ...@@ -542,33 +524,6 @@ class GenericBinaryOpStub : public CodeStub {
return key; return key;
} }
int KnownBitsForMinorKey(int key) {
if (!key) return 0;
if (key <= 11) return key - 1;
int d = 1;
while (key != 12) {
key--;
d <<= 1;
}
return d;
}
bool ShouldGenerateSmiCode() {
return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
runtime_operands_type_ != BinaryOpIC::STRINGS;
}
bool ShouldGenerateFPCode() {
return runtime_operands_type_ != BinaryOpIC::STRINGS;
}
virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return BinaryOpIC::ToState(runtime_operands_type_);
}
const char* GetName(); const char* GetName();
#ifdef DEBUG #ifdef DEBUG
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment