Commit 6ca58764 authored by kasperl@chromium.org's avatar kasperl@chromium.org

Add fast case stub for BIT_NOT.

Review URL: http://codereview.chromium.org/503079

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@3513 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent af6971d8
......@@ -3698,7 +3698,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
bool overwrite =
(node->expression()->AsBinaryOperation() != NULL &&
node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
UnarySubStub stub(overwrite);
GenericUnaryOpStub stub(Token::SUB, overwrite);
frame_->CallStub(&stub, 0);
break;
}
......@@ -5940,7 +5940,9 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
}
void UnarySubStub::Generate(MacroAssembler* masm) {
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
ASSERT(op_ == Token::SUB);
Label undo;
Label slow;
Label not_smi;
......
......@@ -46,7 +46,7 @@ namespace internal {
V(FastNewClosure) \
V(FastNewContext) \
V(FastCloneShallowArray) \
V(UnarySub) \
V(GenericUnaryOp) \
V(RevertToNumber) \
V(ToBoolean) \
V(Instanceof) \
......
......@@ -448,6 +448,23 @@ const char* RuntimeStub::GetName() {
}
const char* GenericUnaryOpStub::GetName() {
switch (op_) {
case Token::SUB:
return overwrite_
? "GenericUnaryOpStub_SUB_Overwrite"
: "GenericUnaryOpStub_SUB_Alloc";
case Token::BIT_NOT:
return overwrite_
? "GenericUnaryOpStub_BIT_NOT_Overwrite"
: "GenericUnaryOpStub_BIT_NOT_Alloc";
default:
UNREACHABLE();
return "<unknown>";
}
}
void RuntimeStub::Generate(MacroAssembler* masm) {
Runtime::Function* f = Runtime::FunctionForId(id_);
masm->TailCallRuntime(ExternalReference(f),
......
......@@ -294,20 +294,26 @@ class InstanceofStub: public CodeStub {
};
class UnarySubStub : public CodeStub {
class GenericUnaryOpStub : public CodeStub {
public:
explicit UnarySubStub(bool overwrite)
: overwrite_(overwrite) { }
GenericUnaryOpStub(Token::Value op, bool overwrite)
: op_(op), overwrite_(overwrite) { }
private:
Token::Value op_;
bool overwrite_;
Major MajorKey() { return UnarySub; }
int MinorKey() { return overwrite_ ? 1 : 0; }
void Generate(MacroAssembler* masm);
const char* GetName() {
return overwrite_ ? "UnarySubStub_Overwrite" : "UnarySubStub_Alloc";
class OverwriteField: public BitField<int, 0, 1> {};
class OpField: public BitField<Token::Value, 1, kMinorBits - 1> {};
Major MajorKey() { return GenericUnaryOp; }
int MinorKey() {
return OpField::encode(op_) | OverwriteField::encode(overwrite_);
}
void Generate(MacroAssembler* masm);
const char* GetName();
};
......
......@@ -5515,12 +5515,12 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
} else {
Load(node->expression());
bool overwrite =
(node->expression()->AsBinaryOperation() != NULL &&
node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
switch (op) {
case Token::SUB: {
bool overwrite =
(node->expression()->AsBinaryOperation() != NULL &&
node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
UnarySubStub stub(overwrite);
GenericUnaryOpStub stub(Token::SUB, overwrite);
// TODO(1222589): remove dependency of TOS being cached inside stub
Result operand = frame_->Pop();
Result answer = frame_->CallStub(&stub, &operand);
......@@ -5537,16 +5537,16 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
__ test(operand.reg(), Immediate(kSmiTagMask));
smi_label.Branch(zero, &operand, taken);
frame_->Push(&operand); // undo popping of TOS
Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
CALL_FUNCTION, 1);
GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
Result answer = frame_->CallStub(&stub, &operand);
continue_label.Jump(&answer);
smi_label.Bind(&answer);
answer.ToRegister();
frame_->Spill(answer.reg());
__ not_(answer.reg());
__ and_(answer.reg(), ~kSmiTagMask); // Remove inverted smi-tag.
continue_label.Bind(&answer);
frame_->Push(&answer);
break;
......@@ -7282,9 +7282,15 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
default: UNREACHABLE();
}
// Store the result in the HeapNumber and return.
__ mov(Operand(esp, 1 * kPointerSize), ebx);
__ fild_s(Operand(esp, 1 * kPointerSize));
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(ebx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(Operand(esp, 1 * kPointerSize), ebx);
__ fild_s(Operand(esp, 1 * kPointerSize));
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
}
GenerateReturn(masm);
}
......@@ -7711,67 +7717,119 @@ void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
}
void UnarySubStub::Generate(MacroAssembler* masm) {
Label undo;
Label slow;
Label done;
Label try_float;
// Check whether the value is a smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &try_float, not_taken);
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
Label slow, done;
// Enter runtime system if the value of the expression is zero
// to make sure that we switch between 0 and -0.
__ test(eax, Operand(eax));
__ j(zero, &slow, not_taken);
if (op_ == Token::SUB) {
// Check whether the value is a smi.
Label try_float;
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &try_float, not_taken);
// The value of the expression is a smi that is not zero. Try
// optimistic subtraction '0 - value'.
__ mov(edx, Operand(eax));
__ Set(eax, Immediate(0));
__ sub(eax, Operand(edx));
__ j(overflow, &undo, not_taken);
// Go slow case if the value of the expression is zero
// to make sure that we switch between 0 and -0.
__ test(eax, Operand(eax));
__ j(zero, &slow, not_taken);
// If result is a smi we are done.
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &done, taken);
// Restore eax and enter runtime system.
__ bind(&undo);
__ mov(eax, Operand(edx));
// The value of the expression is a smi that is not zero. Try
// optimistic subtraction '0 - value'.
Label undo;
__ mov(edx, Operand(eax));
__ Set(eax, Immediate(0));
__ sub(eax, Operand(edx));
__ j(overflow, &undo, not_taken);
// Enter runtime system.
__ bind(&slow);
__ pop(ecx); // pop return address
__ push(eax);
__ push(ecx); // push return address
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
// If result is a smi we are done.
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &done, taken);
// Restore eax and go slow case.
__ bind(&undo);
__ mov(eax, Operand(edx));
__ jmp(&slow);
// Try floating point case.
__ bind(&try_float);
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
__ cmp(edx, Factory::heap_number_map());
__ j(not_equal, &slow);
if (overwrite_) {
__ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
__ xor_(edx, HeapNumber::kSignMask); // Flip sign.
__ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
} else {
__ mov(edx, Operand(eax));
// edx: operand
__ AllocateHeapNumber(eax, ebx, ecx, &undo);
// eax: allocated 'empty' number
__ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
__ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
__ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
__ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
__ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
}
} else if (op_ == Token::BIT_NOT) {
// Check if the operand is a heap number.
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
__ cmp(edx, Factory::heap_number_map());
__ j(not_equal, &slow, not_taken);
// Convert the heap number in eax to an untagged integer in ecx.
IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), &slow);
// Do the bitwise operation and check if the result fits in a smi.
Label try_float;
__ not_(ecx);
__ cmp(ecx, 0xc0000000);
__ j(sign, &try_float, not_taken);
// Tag the result as a smi and we're done.
ASSERT(kSmiTagSize == 1);
__ lea(eax, Operand(ecx, times_2, kSmiTag));
__ jmp(&done);
// Try floating point case.
__ bind(&try_float);
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
__ cmp(edx, Factory::heap_number_map());
__ j(not_equal, &slow);
if (overwrite_) {
__ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
__ xor_(edx, HeapNumber::kSignMask); // Flip sign.
__ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
// Try to store the result in a heap number.
__ bind(&try_float);
if (!overwrite_) {
// Allocate a fresh heap number, but don't overwrite eax until
// we're sure we can do it without going through the slow case
// that needs the value in eax.
__ AllocateHeapNumber(ebx, edx, edi, &slow);
__ mov(eax, Operand(ebx));
}
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, Operand(ecx));
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ push(ecx);
__ fild_s(Operand(esp, 0));
__ pop(ecx);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
}
} else {
__ mov(edx, Operand(eax));
// edx: operand
__ AllocateHeapNumber(eax, ebx, ecx, &undo);
// eax: allocated 'empty' number
__ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
__ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
__ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
__ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
__ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
UNIMPLEMENTED();
}
// Return from the stub.
__ bind(&done);
__ StubReturn(1);
// Handle the slow case by jumping to the JavaScript builtin.
__ bind(&slow);
__ pop(ecx); // pop return address.
__ push(eax);
__ push(ecx); // push return address
switch (op_) {
case Token::SUB:
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
break;
case Token::BIT_NOT:
__ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
}
}
......
......@@ -3109,7 +3109,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
bool overwrite =
(node->expression()->AsBinaryOperation() != NULL &&
node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
UnarySubStub stub(overwrite);
GenericUnaryOpStub stub(Token::SUB, overwrite);
// TODO(1222589): remove dependency of TOS being cached inside stub
Result operand = frame_->Pop();
Result answer = frame_->CallStub(&stub, &operand);
......@@ -6272,7 +6272,9 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
// End of CodeGenerator implementation.
void UnarySubStub::Generate(MacroAssembler* masm) {
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
ASSERT(op_ == Token::SUB);
Label slow;
Label done;
Label try_float;
......
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
function testBitNot(x) {
// The VM constant folds so we use that to check the result.
var expected = eval("~(" + x + ")");
var actual = ~x;
assertEquals(expected, actual, "x: " + x);
// Test the path where we can overwrite the result. Use -
// to avoid concatenating strings.
expected = eval("~(" + x + " - 0.01)");
actual = ~(x - 0.01);
assertEquals(expected, actual, "x - 0.01: " + x);
}
testBitNot(0);
testBitNot(1);
testBitNot(-1);
testBitNot(100);
testBitNot(0x40000000);
testBitNot(0x7fffffff);
testBitNot(0x80000000);
testBitNot(2.2);
testBitNot(-2.3);
testBitNot(Infinity);
testBitNot(NaN);
testBitNot(-Infinity);
testBitNot(0x40000000 + 0.12345);
testBitNot(0x40000000 - 0.12345);
testBitNot(0x7fffffff + 0.12345);
testBitNot(0x7fffffff - 0.12345);
testBitNot(0x80000000 + 0.12345);
testBitNot(0x80000000 - 0.12345);
testBitNot("0");
testBitNot("2.3");
testBitNot("-9.4");
// Try to test that we can deal with allocation failures in
// the fast path and just use the slow path instead.
function TryToGC() {
var x = 0x40000000;
for (var i = 0; i < 1000000; i++) {
assertEquals(~0x40000000, ~x);
}
}
TryToGC();
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment