Support register arguments in more cases.

1. MUL and DIV on SMIs.
2. When calling GenericBinaryOpStub from a virtual frame.
3. When generating code for a loop counter.
Overall performance gain is about 0.6%.
Review URL: http://codereview.chromium.org/555098

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@3708 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 2f0ad64d
...@@ -763,10 +763,8 @@ class FloatingPointHelper : public AllStatic { ...@@ -763,10 +763,8 @@ class FloatingPointHelper : public AllStatic {
ArgLocation arg_location = ARGS_ON_STACK); ArgLocation arg_location = ARGS_ON_STACK);
// Similar to LoadFloatOperand but assumes that both operands are smis. // Similar to LoadFloatOperand but assumes that both operands are smis.
// Accepts operands on stack or in eax, ebx. // Accepts operands in eax, ebx.
static void LoadFloatSmis(MacroAssembler* masm, static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
Register scratch,
ArgLocation arg_location);
// Test if operands are smi or number objects (fp). Requirements: // Test if operands are smi or number objects (fp). Requirements:
// operand_1 in eax, operand_2 in edx; falls through on float // operand_1 in eax, operand_2 in edx; falls through on float
...@@ -786,10 +784,8 @@ class FloatingPointHelper : public AllStatic { ...@@ -786,10 +784,8 @@ class FloatingPointHelper : public AllStatic {
static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers); static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers);
// Similar to LoadSse2Operands but assumes that both operands are smis. // Similar to LoadSse2Operands but assumes that both operands are smis.
// Accepts operands on stack or in eax, ebx. // Accepts operands in eax, ebx.
static void LoadSse2Smis(MacroAssembler* masm, static void LoadSse2Smis(MacroAssembler* masm, Register scratch);
Register scratch,
ArgLocation arg_location);
}; };
...@@ -978,10 +974,8 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op, ...@@ -978,10 +974,8 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
Result answer; Result answer;
if (left_is_non_smi || right_is_non_smi) { if (left_is_non_smi || right_is_non_smi) {
// Go straight to the slow case, with no smi code. // Go straight to the slow case, with no smi code.
frame_->Push(&left);
frame_->Push(&right);
GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB); GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB);
answer = frame_->CallStub(&stub, 2); answer = stub.GenerateCall(masm_, frame_, &left, &right);
} else if (right_is_smi) { } else if (right_is_smi) {
answer = ConstantSmiBinaryOperation(op, &left, right.handle(), answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
type, false, overwrite_mode); type, false, overwrite_mode);
...@@ -997,10 +991,8 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op, ...@@ -997,10 +991,8 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) { if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode); answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
} else { } else {
frame_->Push(&left);
frame_->Push(&right);
GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS); GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS);
answer = frame_->CallStub(&stub, 2); answer = stub.GenerateCall(masm_, frame_, &left, &right);
} }
} }
frame_->Push(&answer); frame_->Push(&answer);
...@@ -7076,6 +7068,21 @@ void GenericBinaryOpStub::GenerateCall( ...@@ -7076,6 +7068,21 @@ void GenericBinaryOpStub::GenerateCall(
} }
Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
VirtualFrame* frame,
Result* left,
Result* right) {
if (ArgsInRegistersSupported()) {
SetArgsInRegisters();
return frame->CallStub(this, left, right);
} else {
frame->Push(left);
frame->Push(right);
return frame->CallStub(this, 2);
}
}
void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
if (HasArgsInRegisters()) { if (HasArgsInRegisters()) {
__ mov(ebx, eax); __ mov(ebx, eax);
...@@ -7107,7 +7114,13 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { ...@@ -7107,7 +7114,13 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
__ j(overflow, &not_smis_or_overflow, not_taken); __ j(overflow, &not_smis_or_overflow, not_taken);
break; break;
case Token::MUL:
__ mov(edi, eax); // Backup the left operand.
break;
case Token::DIV: case Token::DIV:
__ mov(edi, eax); // Backup the left operand.
// Fall through.
case Token::MOD: case Token::MOD:
// Sign extend eax into edx:eax. // Sign extend eax into edx:eax.
__ cdq(); __ cdq();
...@@ -7243,28 +7256,18 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { ...@@ -7243,28 +7256,18 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
__ bind(&use_fp_on_smis); __ bind(&use_fp_on_smis);
// Both operands are known to be SMIs but the result does not fit into a SMI. // Both operands are known to be SMIs but the result does not fit into a SMI.
switch (op_) { switch (op_) {
case Token::ADD:
case Token::SUB:
case Token::MUL: case Token::MUL:
case Token::DIV: { case Token::DIV:
__ mov(eax, edi); // Restore the left operand.
// Fall through.
case Token::ADD:
case Token::SUB: {
Label after_alloc_failure; Label after_alloc_failure;
__ AllocateHeapNumber(edx, ecx, no_reg, &after_alloc_failure);
FloatingPointHelper::ArgLocation arg_location =
(op_ == Token::ADD || op_ == Token::SUB) ?
FloatingPointHelper::ARGS_IN_REGISTERS :
FloatingPointHelper::ARGS_ON_STACK;
__ AllocateHeapNumber(
edx,
ecx,
no_reg,
arg_location == FloatingPointHelper::ARGS_IN_REGISTERS ?
&after_alloc_failure :
slow);
if (CpuFeatures::IsSupported(SSE2)) { if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2); CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSse2Smis(masm, ecx, arg_location); FloatingPointHelper::LoadSse2Smis(masm, ecx);
switch (op_) { switch (op_) {
case Token::ADD: __ addsd(xmm0, xmm1); break; case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break; case Token::SUB: __ subsd(xmm0, xmm1); break;
...@@ -7274,7 +7277,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { ...@@ -7274,7 +7277,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
} }
__ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0); __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
} else { // SSE2 not available, use FPU. } else { // SSE2 not available, use FPU.
FloatingPointHelper::LoadFloatSmis(masm, ecx, arg_location); FloatingPointHelper::LoadFloatSmis(masm, ecx);
switch (op_) { switch (op_) {
case Token::ADD: __ faddp(1); break; case Token::ADD: __ faddp(1); break;
case Token::SUB: __ fsubp(1); break; case Token::SUB: __ fsubp(1); break;
...@@ -7287,12 +7290,10 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { ...@@ -7287,12 +7290,10 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
__ mov(eax, edx); __ mov(eax, edx);
GenerateReturn(masm); GenerateReturn(masm);
if (arg_location == FloatingPointHelper::ARGS_IN_REGISTERS) { __ bind(&after_alloc_failure);
__ bind(&after_alloc_failure); __ mov(edx, eax);
__ mov(edx, eax); __ mov(eax, ebx);
__ mov(eax, ebx); __ jmp(slow);
__ jmp(slow);
}
break; break;
} }
...@@ -7438,7 +7439,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { ...@@ -7438,7 +7439,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ bind(&non_smi_result); __ bind(&non_smi_result);
// Allocate a heap number if needed. // Allocate a heap number if needed.
__ mov(ebx, Operand(eax)); // ebx: result __ mov(ebx, Operand(eax)); // ebx: result
Label skip_allocation; Label skip_allocation;
switch (mode_) { switch (mode_) {
case OVERWRITE_LEFT: case OVERWRITE_LEFT:
case OVERWRITE_RIGHT: case OVERWRITE_RIGHT:
...@@ -7881,22 +7882,12 @@ void FloatingPointHelper::LoadSse2Operands(MacroAssembler* masm, ...@@ -7881,22 +7882,12 @@ void FloatingPointHelper::LoadSse2Operands(MacroAssembler* masm,
void FloatingPointHelper::LoadSse2Smis(MacroAssembler* masm, void FloatingPointHelper::LoadSse2Smis(MacroAssembler* masm,
Register scratch, Register scratch) {
ArgLocation arg_location) { __ mov(scratch, eax);
if (arg_location == ARGS_IN_REGISTERS) {
__ mov(scratch, eax);
} else {
__ mov(scratch, Operand(esp, 2 * kPointerSize));
}
__ SmiUntag(scratch); // Untag smi before converting to float. __ SmiUntag(scratch); // Untag smi before converting to float.
__ cvtsi2sd(xmm0, Operand(scratch)); __ cvtsi2sd(xmm0, Operand(scratch));
__ mov(scratch, ebx);
if (arg_location == ARGS_IN_REGISTERS) {
__ mov(scratch, ebx);
} else {
__ mov(scratch, Operand(esp, 1 * kPointerSize));
}
__ SmiUntag(scratch); // Untag smi before converting to float. __ SmiUntag(scratch); // Untag smi before converting to float.
__ cvtsi2sd(xmm1, Operand(scratch)); __ cvtsi2sd(xmm1, Operand(scratch));
} }
...@@ -7944,23 +7935,14 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, ...@@ -7944,23 +7935,14 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm, void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
Register scratch, Register scratch) {
ArgLocation arg_location) { __ mov(scratch, eax);
if (arg_location == ARGS_IN_REGISTERS) {
__ mov(scratch, eax);
} else {
__ mov(scratch, Operand(esp, 2 * kPointerSize));
}
__ SmiUntag(scratch); __ SmiUntag(scratch);
__ push(scratch); __ push(scratch);
__ fild_s(Operand(esp, 0)); __ fild_s(Operand(esp, 0));
__ pop(scratch); __ pop(scratch);
if (arg_location == ARGS_IN_REGISTERS) { __ mov(scratch, ebx);
__ mov(scratch, ebx);
} else {
__ mov(scratch, Operand(esp, 1 * kPointerSize));
}
__ SmiUntag(scratch); __ SmiUntag(scratch);
__ push(scratch); __ push(scratch);
__ fild_s(Operand(esp, 0)); __ fild_s(Operand(esp, 0));
......
...@@ -652,6 +652,11 @@ class GenericBinaryOpStub: public CodeStub { ...@@ -652,6 +652,11 @@ class GenericBinaryOpStub: public CodeStub {
void GenerateCall(MacroAssembler* masm, Register left, Smi* right); void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
void GenerateCall(MacroAssembler* masm, Smi* left, Register right); void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
Result GenerateCall(MacroAssembler* masm,
VirtualFrame* frame,
Result* left,
Result* right);
private: private:
Token::Value op_; Token::Value op_;
OverwriteMode mode_; OverwriteMode mode_;
...@@ -700,15 +705,9 @@ class GenericBinaryOpStub: public CodeStub { ...@@ -700,15 +705,9 @@ class GenericBinaryOpStub: public CodeStub {
void GenerateReturn(MacroAssembler* masm); void GenerateReturn(MacroAssembler* masm);
void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure); void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
// Args in registers are always OK for ADD and SUB. Floating-point MUL and DIV
// are also OK. Though MUL and DIV on SMIs modify the original registers so
// we need to push args on stack anyway.
bool ArgsInRegistersSupported() { bool ArgsInRegistersSupported() {
if (op_ == Token::ADD || op_ == Token::SUB) return true; return op_ == Token::ADD || op_ == Token::SUB
if (op_ == Token::MUL || op_ == Token::DIV) { || op_ == Token::MUL || op_ == Token::DIV;
return flags_ == NO_SMI_CODE_IN_STUB;
}
return false;
} }
bool IsOperationCommutative() { bool IsOperationCommutative() {
return (op_ == Token::ADD) || (op_ == Token::MUL); return (op_ == Token::ADD) || (op_ == Token::MUL);
......
...@@ -1560,12 +1560,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { ...@@ -1560,12 +1560,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} }
} }
// Call stub for +1/-1. // Call stub for +1/-1.
__ push(eax);
__ push(Immediate(Smi::FromInt(1)));
GenericBinaryOpStub stub(expr->binary_op(), GenericBinaryOpStub stub(expr->binary_op(),
NO_OVERWRITE, NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS); NO_GENERIC_BINARY_FLAGS);
__ CallStub(&stub); stub.GenerateCall(masm(), eax, Smi::FromInt(1));
__ bind(&done); __ bind(&done);
// Store the value returned in eax. // Store the value returned in eax.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment