Removed dead code: GenericUnaryOpStub is not used anymore, as a consequence,...

Removed dead code: GenericUnaryOpStub is not used anymore, as a consequence, NegativeZeroHandling and UnaryOpFlags are dead, too.
Review URL: http://codereview.chromium.org/6903124

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@7731 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 9af0a4e4
...@@ -3200,141 +3200,6 @@ void StackCheckStub::Generate(MacroAssembler* masm) { ...@@ -3200,141 +3200,6 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
} }
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
Label slow, done;
Register heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
if (op_ == Token::SUB) {
if (include_smi_code_) {
// Check whether the value is a smi.
Label try_float;
__ tst(r0, Operand(kSmiTagMask));
__ b(ne, &try_float);
// Go slow case if the value of the expression is zero
// to make sure that we switch between 0 and -0.
if (negative_zero_ == kStrictNegativeZero) {
// If we have to check for zero, then we can check for the max negative
// smi while we are at it.
__ bic(ip, r0, Operand(0x80000000), SetCC);
__ b(eq, &slow);
__ rsb(r0, r0, Operand(0, RelocInfo::NONE));
__ Ret();
} else {
// The value of the expression is a smi and 0 is OK for -0. Try
// optimistic subtraction '0 - value'.
__ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC);
__ Ret(vc);
// We don't have to reverse the optimistic neg since the only case
// where we fall through is the minimum negative Smi, which is the case
// where the neg leaves the register unchanged.
__ jmp(&slow); // Go slow on max negative Smi.
}
__ bind(&try_float);
} else if (FLAG_debug_code) {
__ tst(r0, Operand(kSmiTagMask));
__ Assert(ne, "Unexpected smi operand.");
}
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ cmp(r1, heap_number_map);
__ b(ne, &slow);
// r0 is a heap number. Get a new heap number in r1.
if (overwrite_ == UNARY_OVERWRITE) {
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
} else {
__ AllocateHeapNumber(r1, r2, r3, r6, &slow);
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
__ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
__ mov(r0, Operand(r1));
}
} else if (op_ == Token::BIT_NOT) {
if (include_smi_code_) {
Label non_smi;
__ JumpIfNotSmi(r0, &non_smi);
__ mvn(r0, Operand(r0));
// Bit-clear inverted smi-tag.
__ bic(r0, r0, Operand(kSmiTagMask));
__ Ret();
__ bind(&non_smi);
} else if (FLAG_debug_code) {
__ tst(r0, Operand(kSmiTagMask));
__ Assert(ne, "Unexpected smi operand.");
}
// Check if the operand is a heap number.
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ cmp(r1, heap_number_map);
__ b(ne, &slow);
// Convert the heap number is r0 to an untagged integer in r1.
__ ConvertToInt32(r0, r1, r2, r3, d0, &slow);
// Do the bitwise operation (move negated) and check if the result
// fits in a smi.
Label try_float;
__ mvn(r1, Operand(r1));
__ add(r2, r1, Operand(0x40000000), SetCC);
__ b(mi, &try_float);
__ mov(r0, Operand(r1, LSL, kSmiTagSize));
__ b(&done);
__ bind(&try_float);
if (!overwrite_ == UNARY_OVERWRITE) {
// Allocate a fresh heap number, but don't overwrite r0 until
// we're sure we can do it without going through the slow case
// that needs the value in r0.
__ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ mov(r0, Operand(r2));
}
if (CpuFeatures::IsSupported(VFP3)) {
// Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r1);
__ vcvt_f64_s32(d0, s0);
__ sub(r2, r0, Operand(kHeapObjectTag));
__ vstr(d0, r2, HeapNumber::kValueOffset);
} else {
// WriteInt32ToHeapNumberStub does not trigger GC, so we do not
// have to set up a frame.
WriteInt32ToHeapNumberStub stub(r1, r0, r2);
__ push(lr);
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
}
} else {
UNIMPLEMENTED();
}
__ bind(&done);
__ Ret();
// Handle the slow case by jumping to the JavaScript builtin.
__ bind(&slow);
__ push(r0);
switch (op_) {
case Token::SUB:
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
break;
case Token::BIT_NOT:
__ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
break;
default:
UNREACHABLE();
}
}
void MathPowStub::Generate(MacroAssembler* masm) { void MathPowStub::Generate(MacroAssembler* masm) {
Label call_runtime; Label call_runtime;
......
...@@ -389,54 +389,6 @@ class InstanceofStub: public CodeStub { ...@@ -389,54 +389,6 @@ class InstanceofStub: public CodeStub {
}; };
enum NegativeZeroHandling {
kStrictNegativeZero,
kIgnoreNegativeZero
};
enum UnaryOpFlags {
NO_UNARY_FLAGS = 0,
NO_UNARY_SMI_CODE_IN_STUB = 1 << 0
};
class GenericUnaryOpStub : public CodeStub {
public:
GenericUnaryOpStub(Token::Value op,
UnaryOverwriteMode overwrite,
UnaryOpFlags flags,
NegativeZeroHandling negative_zero = kStrictNegativeZero)
: op_(op),
overwrite_(overwrite),
include_smi_code_((flags & NO_UNARY_SMI_CODE_IN_STUB) == 0),
negative_zero_(negative_zero) { }
private:
Token::Value op_;
UnaryOverwriteMode overwrite_;
bool include_smi_code_;
NegativeZeroHandling negative_zero_;
class OverwriteField: public BitField<UnaryOverwriteMode, 0, 1> {};
class IncludeSmiCodeField: public BitField<bool, 1, 1> {};
class NegativeZeroField: public BitField<NegativeZeroHandling, 2, 1> {};
class OpField: public BitField<Token::Value, 3, kMinorBits - 3> {};
Major MajorKey() { return GenericUnaryOp; }
int MinorKey() {
return OpField::encode(op_) |
OverwriteField::encode(overwrite_) |
IncludeSmiCodeField::encode(include_smi_code_) |
NegativeZeroField::encode(negative_zero_);
}
void Generate(MacroAssembler* masm);
const char* GetName();
};
class MathPowStub: public CodeStub { class MathPowStub: public CodeStub {
public: public:
MathPowStub() {} MathPowStub() {}
......
...@@ -205,29 +205,6 @@ bool CodeGenerator::RecordPositions(MacroAssembler* masm, ...@@ -205,29 +205,6 @@ bool CodeGenerator::RecordPositions(MacroAssembler* masm,
} }
const char* GenericUnaryOpStub::GetName() {
switch (op_) {
case Token::SUB:
if (negative_zero_ == kStrictNegativeZero) {
return overwrite_ == UNARY_OVERWRITE
? "GenericUnaryOpStub_SUB_Overwrite_Strict0"
: "GenericUnaryOpStub_SUB_Alloc_Strict0";
} else {
return overwrite_ == UNARY_OVERWRITE
? "GenericUnaryOpStub_SUB_Overwrite_Ignore0"
: "GenericUnaryOpStub_SUB_Alloc_Ignore0";
}
case Token::BIT_NOT:
return overwrite_ == UNARY_OVERWRITE
? "GenericUnaryOpStub_BIT_NOT_Overwrite"
: "GenericUnaryOpStub_BIT_NOT_Alloc";
default:
UNREACHABLE();
return "<unknown>";
}
}
void ArgumentsAccessStub::Generate(MacroAssembler* masm) { void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
switch (type_) { switch (type_) {
case READ_ELEMENT: case READ_ELEMENT:
......
...@@ -2755,140 +2755,6 @@ void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm, ...@@ -2755,140 +2755,6 @@ void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm,
} }
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
Label slow, done, undo;
if (op_ == Token::SUB) {
if (include_smi_code_) {
// Check whether the value is a smi.
NearLabel try_float;
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &try_float, not_taken);
if (negative_zero_ == kStrictNegativeZero) {
// Go slow case if the value of the expression is zero
// to make sure that we switch between 0 and -0.
__ test(eax, Operand(eax));
__ j(zero, &slow, not_taken);
}
// The value of the expression is a smi that is not zero. Try
// optimistic subtraction '0 - value'.
__ mov(edx, Operand(eax));
__ Set(eax, Immediate(0));
__ sub(eax, Operand(edx));
__ j(overflow, &undo, not_taken);
__ StubReturn(1);
// Try floating point case.
__ bind(&try_float);
} else if (FLAG_debug_code) {
__ AbortIfSmi(eax);
}
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
__ cmp(edx, masm->isolate()->factory()->heap_number_map());
__ j(not_equal, &slow);
if (overwrite_ == UNARY_OVERWRITE) {
__ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
__ xor_(edx, HeapNumber::kSignMask); // Flip sign.
__ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
} else {
__ mov(edx, Operand(eax));
// edx: operand
__ AllocateHeapNumber(eax, ebx, ecx, &undo);
// eax: allocated 'empty' number
__ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
__ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
__ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
__ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
__ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
}
} else if (op_ == Token::BIT_NOT) {
if (include_smi_code_) {
Label non_smi;
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &non_smi);
__ not_(eax);
__ and_(eax, ~kSmiTagMask); // Remove inverted smi-tag.
__ ret(0);
__ bind(&non_smi);
} else if (FLAG_debug_code) {
__ AbortIfSmi(eax);
}
// Check if the operand is a heap number.
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
__ cmp(edx, masm->isolate()->factory()->heap_number_map());
__ j(not_equal, &slow, not_taken);
// Convert the heap number in eax to an untagged integer in ecx.
IntegerConvert(masm,
eax,
TypeInfo::Unknown(),
CpuFeatures::IsSupported(SSE3),
&slow);
// Do the bitwise operation and check if the result fits in a smi.
NearLabel try_float;
__ not_(ecx);
__ cmp(ecx, 0xc0000000);
__ j(sign, &try_float, not_taken);
// Tag the result as a smi and we're done.
STATIC_ASSERT(kSmiTagSize == 1);
__ lea(eax, Operand(ecx, times_2, kSmiTag));
__ jmp(&done);
// Try to store the result in a heap number.
__ bind(&try_float);
if (overwrite_ == UNARY_NO_OVERWRITE) {
// Allocate a fresh heap number, but don't overwrite eax until
// we're sure we can do it without going through the slow case
// that needs the value in eax.
__ AllocateHeapNumber(ebx, edx, edi, &slow);
__ mov(eax, Operand(ebx));
}
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(ecx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ push(ecx);
__ fild_s(Operand(esp, 0));
__ pop(ecx);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
}
} else {
UNIMPLEMENTED();
}
// Return from the stub.
__ bind(&done);
__ StubReturn(1);
// Restore eax and go slow case.
__ bind(&undo);
__ mov(eax, Operand(edx));
// Handle the slow case by jumping to the JavaScript builtin.
__ bind(&slow);
__ pop(ecx); // pop return address.
__ push(eax);
__ push(ecx); // push return address
switch (op_) {
case Token::SUB:
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
break;
case Token::BIT_NOT:
__ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
}
}
void MathPowStub::Generate(MacroAssembler* masm) { void MathPowStub::Generate(MacroAssembler* masm) {
// Registers are used as follows: // Registers are used as follows:
// edx = base // edx = base
......
...@@ -426,11 +426,6 @@ void StackCheckStub::Generate(MacroAssembler* masm) { ...@@ -426,11 +426,6 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
} }
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
bool CEntryStub::NeedsImmovableCode() { bool CEntryStub::NeedsImmovableCode() {
return true; return true;
} }
......
...@@ -1736,91 +1736,6 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm, ...@@ -1736,91 +1736,6 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
} }
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
Label slow, done;
if (op_ == Token::SUB) {
if (include_smi_code_) {
// Check whether the value is a smi.
Label try_float;
__ JumpIfNotSmi(rax, &try_float);
if (negative_zero_ == kIgnoreNegativeZero) {
__ SmiCompare(rax, Smi::FromInt(0));
__ j(equal, &done);
}
__ SmiNeg(rax, rax, &done);
__ jmp(&slow); // zero, if not handled above, and Smi::kMinValue.
// Try floating point case.
__ bind(&try_float);
} else if (FLAG_debug_code) {
__ AbortIfSmi(rax);
}
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &slow);
// Operand is a float, negate its value by flipping sign bit.
__ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
__ Set(kScratchRegister, 0x01);
__ shl(kScratchRegister, Immediate(63));
__ xor_(rdx, kScratchRegister); // Flip sign.
// rdx is value to store.
if (overwrite_ == UNARY_OVERWRITE) {
__ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
} else {
__ AllocateHeapNumber(rcx, rbx, &slow);
// rcx: allocated 'empty' number
__ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
__ movq(rax, rcx);
}
} else if (op_ == Token::BIT_NOT) {
if (include_smi_code_) {
Label try_float;
__ JumpIfNotSmi(rax, &try_float);
__ SmiNot(rax, rax);
__ jmp(&done);
// Try floating point case.
__ bind(&try_float);
} else if (FLAG_debug_code) {
__ AbortIfSmi(rax);
}
// Check if the operand is a heap number.
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &slow);
// Convert the heap number in rax to an untagged integer in rcx.
IntegerConvert(masm, rax, rax);
// Do the bitwise operation and smi tag the result.
__ notl(rax);
__ Integer32ToSmi(rax, rax);
}
// Return from the stub.
__ bind(&done);
__ StubReturn(1);
// Handle the slow case by jumping to the JavaScript builtin.
__ bind(&slow);
__ pop(rcx); // pop return address
__ push(rax);
__ push(rcx); // push return address
switch (op_) {
case Token::SUB:
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
break;
case Token::BIT_NOT:
__ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
}
}
void MathPowStub::Generate(MacroAssembler* masm) { void MathPowStub::Generate(MacroAssembler* masm) {
// Registers are used as follows: // Registers are used as follows:
// rdx = base // rdx = base
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment