Commit b7d48f58 authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

A bunch of changes to speed up math on ARM.

* Identify heap numbers that contain non-Smi int32s and do bit
ops on them without calling the fp hardware or emulation.
* Identify results that are non-Smi int32s and write them into
heap numbers without calling the fp hardware or emulation.
* Do unary minus on heap numbers without going into the runtime
system.
* On add, sub and mul if we have both Smi and heapnumber inputs
to the same operation then convert the Smi to a double and do
the op without going into runtime system.  This also applies
if we have two Smi inputs but the result is not Smi.
Review URL: http://codereview.chromium.org/119241

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2131 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 13e548af
This diff is collapsed.
......@@ -41,6 +41,8 @@ class CodeStub BASE_EMBEDDED {
SmiOp,
Compare,
RecordWrite, // Last stub that allows stub calls inside.
ConvertToDouble,
WriteInt32ToHeapNumber,
StackCheck,
UnarySub,
RevertToNumber,
......
......@@ -230,11 +230,13 @@ class StackCheckStub : public CodeStub {
class UnarySubStub : public CodeStub {
public:
UnarySubStub() { }
explicit UnarySubStub(bool overwrite)
: overwrite_(overwrite) { }
private:
bool overwrite_;
Major MajorKey() { return UnarySub; }
int MinorKey() { return 0; }
int MinorKey() { return overwrite_ ? 1 : 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "UnarySubStub"; }
......
......@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// How crappy is it that I have to implement completely basic stuff
// like this myself? Answer: very.
String.prototype.startsWith = function (str) {
if (str.length > this.length)
return false;
......
......@@ -5057,7 +5057,10 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
break;
case Token::SUB: {
UnarySubStub stub;
bool overwrite =
(node->AsBinaryOperation() != NULL &&
node->AsBinaryOperation()->ResultOverwriteAllowed());
UnarySubStub stub(overwrite);
// TODO(1222589): remove dependency of TOS being cached inside stub
Result operand = frame_->Pop();
Result answer = frame_->CallStub(&stub, &operand);
......@@ -6594,13 +6597,21 @@ void UnarySubStub::Generate(MacroAssembler* masm) {
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
__ cmp(edx, Factory::heap_number_map());
__ j(not_equal, &slow);
__ mov(edx, Operand(eax));
// edx: operand
FloatingPointHelper::AllocateHeapNumber(masm, &undo, ebx, ecx);
// eax: allocated 'empty' number
__ fld_d(FieldOperand(edx, HeapNumber::kValueOffset));
__ fchs();
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
if (overwrite_) {
__ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
__ xor_(edx, HeapNumber::kSignMask); // Flip sign.
__ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
} else {
__ mov(edx, Operand(eax));
// edx: operand
FloatingPointHelper::AllocateHeapNumber(masm, &undo, ebx, ecx);
// eax: allocated 'empty' number
__ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
__ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
__ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
__ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
__ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
}
__ bind(&done);
......@@ -6744,7 +6755,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// The representation of NaN values has all exponent bits (52..62) set,
// and not all mantissa bits (0..51) clear.
// Read top bits of double representation (second word of value).
__ mov(eax, FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize));
__ mov(eax, FieldOperand(edx, HeapNumber::kExponentOffset));
// Test that exponent bits are all set.
__ not_(eax);
__ test(eax, Immediate(0x7ff00000));
......@@ -6754,7 +6765,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Shift out flag and all exponent bits, retaining only mantissa.
__ shl(eax, 12);
// Or with all low-bits of mantissa.
__ or_(eax, FieldOperand(edx, HeapNumber::kValueOffset));
__ or_(eax, FieldOperand(edx, HeapNumber::kMantissaOffset));
// Return zero equal if all bits in mantissa is zero (it's an Infinity)
// and non-zero if not (it's a NaN).
__ ret(0);
......
......@@ -1162,8 +1162,21 @@ class HeapNumber: public HeapObject {
// Layout description.
static const int kValueOffset = HeapObject::kHeaderSize;
// IEEE doubles are two 32 bit words. The first is just mantissa, the second
// is a mixture of sign, exponent and mantissa. This is the ordering on a
// little endian machine with little endian double word ordering.
static const int kMantissaOffset = kValueOffset;
static const int kExponentOffset = kValueOffset + 4;
static const int kSize = kValueOffset + kDoubleSize;
static const uint32_t kSignMask = 0x80000000u;
static const uint32_t kExponentMask = 0x7ff00000u;
static const uint32_t kMantissaMask = 0xfffffu;
static const int kExponentBias = 1023;
static const int kExponentShift = 20;
static const int kMantissaBitsInTopWord = 20;
static const int kNonMantissaBitsInTopWord = 12;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(HeapNumber);
};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment