Commit c84af682 authored by sgjesse@chromium.org's avatar sgjesse@chromium.org

Port optimization of calls to GenericBinaryStub to x64.

See description of the change in the ia32 changelist at http://codereview.chromium.org/246075.

Minor changes to the ia32 version using variables for the registers to pass parameters in (edx and eax) to make the parameter set up code easier to read.
Review URL: http://codereview.chromium.org/335005

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@3136 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent b92a0594
...@@ -6510,42 +6510,47 @@ void GenericBinaryOpStub::GenerateCall( ...@@ -6510,42 +6510,47 @@ void GenericBinaryOpStub::GenerateCall(
__ push(right); __ push(right);
} else { } else {
// The calling convention with registers is left in edx and right in eax. // The calling convention with registers is left in edx and right in eax.
__ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); Register left_arg = edx;
if (!(left.is(edx) && right.is(eax))) { Register right_arg = eax;
if (left.is(eax) && right.is(edx)) { if (!(left.is(left_arg) && right.is(right_arg))) {
if (left.is(right_arg) && right.is(left_arg)) {
if (IsOperationCommutative()) { if (IsOperationCommutative()) {
SetArgsReversed(); SetArgsReversed();
} else { } else {
__ xchg(left, right); __ xchg(left, right);
} }
} else if (left.is(edx)) { } else if (left.is(left_arg)) {
__ mov(eax, right); __ mov(right_arg, right);
} else if (left.is(eax)) { } else if (left.is(right_arg)) {
if (IsOperationCommutative()) { if (IsOperationCommutative()) {
__ mov(edx, right); __ mov(left_arg, right);
SetArgsReversed(); SetArgsReversed();
} else { } else {
__ mov(edx, left); // Order of moves important to avoid destroying left argument.
__ mov(eax, right); __ mov(left_arg, left);
__ mov(right_arg, right);
} }
} else if (right.is(edx)) { } else if (right.is(left_arg)) {
if (IsOperationCommutative()) { if (IsOperationCommutative()) {
__ mov(eax, left); __ mov(right_arg, left);
SetArgsReversed(); SetArgsReversed();
} else { } else {
__ mov(eax, right); // Order of moves important to avoid destroying right argument.
__ mov(edx, left); __ mov(right_arg, right);
__ mov(left_arg, left);
} }
} else if (right.is(eax)) { } else if (right.is(right_arg)) {
__ mov(edx, left); __ mov(left_arg, left);
} else { } else {
__ mov(edx, left); // Order of moves is not important.
__ mov(eax, right); __ mov(left_arg, left);
__ mov(right_arg, right);
} }
} }
// Update flags to indicate that arguments are in registers. // Update flags to indicate that arguments are in registers.
SetArgsInRegisters(); SetArgsInRegisters();
__ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
} }
// Call the stub. // Call the stub.
...@@ -6562,19 +6567,22 @@ void GenericBinaryOpStub::GenerateCall( ...@@ -6562,19 +6567,22 @@ void GenericBinaryOpStub::GenerateCall(
__ push(left); __ push(left);
__ push(Immediate(right)); __ push(Immediate(right));
} else { } else {
// Adapt arguments to the calling convention left in edx and right in eax. // The calling convention with registers is left in edx and right in eax.
if (left.is(edx)) { Register left_arg = edx;
__ mov(eax, Immediate(right)); Register right_arg = eax;
} else if (left.is(eax) && IsOperationCommutative()) { if (left.is(left_arg)) {
__ mov(edx, Immediate(right)); __ mov(right_arg, Immediate(right));
} else if (left.is(right_arg) && IsOperationCommutative()) {
__ mov(left_arg, Immediate(right));
SetArgsReversed(); SetArgsReversed();
} else { } else {
__ mov(edx, left); __ mov(left_arg, left);
__ mov(eax, Immediate(right)); __ mov(right_arg, Immediate(right));
} }
// Update flags to indicate that arguments are in registers. // Update flags to indicate that arguments are in registers.
SetArgsInRegisters(); SetArgsInRegisters();
__ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
} }
// Call the stub. // Call the stub.
...@@ -6591,18 +6599,21 @@ void GenericBinaryOpStub::GenerateCall( ...@@ -6591,18 +6599,21 @@ void GenericBinaryOpStub::GenerateCall(
__ push(Immediate(left)); __ push(Immediate(left));
__ push(right); __ push(right);
} else { } else {
// Adapt arguments to the calling convention left in edx and right in eax. // The calling convention with registers is left in edx and right in eax.
bool is_commutative = (op_ == (Token::ADD) || (op_ == Token::MUL)); Register left_arg = edx;
if (right.is(eax)) { Register right_arg = eax;
__ mov(edx, Immediate(left)); if (right.is(right_arg)) {
} else if (right.is(edx) && is_commutative) { __ mov(left_arg, Immediate(left));
__ mov(eax, Immediate(left)); } else if (right.is(left_arg) && IsOperationCommutative()) {
__ mov(right_arg, Immediate(left));
SetArgsReversed();
} else { } else {
__ mov(edx, Immediate(left)); __ mov(left_arg, Immediate(left));
__ mov(eax, right); __ mov(right_arg, right);
} }
// Update flags to indicate that arguments are in registers. // Update flags to indicate that arguments are in registers.
SetArgsInRegisters(); SetArgsInRegisters();
__ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
} }
// Call the stub. // Call the stub.
...@@ -6926,7 +6937,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { ...@@ -6926,7 +6937,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// Tag smi result and return. // Tag smi result and return.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(eax, Operand(eax, eax, times_1, kSmiTag)); __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
__ ret(2 * kPointerSize); GenerateReturn(masm);
// All ops except SHR return a signed int32 that we load in a HeapNumber. // All ops except SHR return a signed int32 that we load in a HeapNumber.
if (op_ != Token::SHR) { if (op_ != Token::SHR) {
...@@ -6953,7 +6964,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { ...@@ -6953,7 +6964,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ mov(Operand(esp, 1 * kPointerSize), ebx); __ mov(Operand(esp, 1 * kPointerSize), ebx);
__ fild_s(Operand(esp, 1 * kPointerSize)); __ fild_s(Operand(esp, 1 * kPointerSize));
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(2 * kPointerSize); GenerateReturn(masm);
} }
// Clear the FPU exception flag and reset the stack before calling // Clear the FPU exception flag and reset the stack before calling
...@@ -6985,7 +6996,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { ...@@ -6985,7 +6996,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// If all else fails, use the runtime system to get the correct // If all else fails, use the runtime system to get the correct
// result. If arguments was passed in registers now place them on the // result. If arguments was passed in registers now place them on the
// stack in the correct order. // stack in the correct order below the return address.
__ bind(&call_runtime); __ bind(&call_runtime);
if (HasArgumentsInRegisters()) { if (HasArgumentsInRegisters()) {
__ pop(ecx); __ pop(ecx);
......
...@@ -638,7 +638,7 @@ class ToBooleanStub: public CodeStub { ...@@ -638,7 +638,7 @@ class ToBooleanStub: public CodeStub {
}; };
// Flag that indicates whether how to generate code for the stub. // Flag that indicates how to generate code for the stub GenericBinaryOpStub.
enum GenericBinaryFlags { enum GenericBinaryFlags {
NO_GENERIC_BINARY_FLAGS = 0, NO_GENERIC_BINARY_FLAGS = 0,
NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub. NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
...@@ -647,10 +647,10 @@ enum GenericBinaryFlags { ...@@ -647,10 +647,10 @@ enum GenericBinaryFlags {
class GenericBinaryOpStub: public CodeStub { class GenericBinaryOpStub: public CodeStub {
public: public:
GenericBinaryOpStub(Token::Value operation, GenericBinaryOpStub(Token::Value op,
OverwriteMode mode, OverwriteMode mode,
GenericBinaryFlags flags) GenericBinaryFlags flags)
: op_(operation), : op_(op),
mode_(mode), mode_(mode),
flags_(flags), flags_(flags),
args_in_registers_(false), args_in_registers_(false),
......
...@@ -5057,10 +5057,8 @@ class DeferredInlineBinaryOperation: public DeferredCode { ...@@ -5057,10 +5057,8 @@ class DeferredInlineBinaryOperation: public DeferredCode {
void DeferredInlineBinaryOperation::Generate() { void DeferredInlineBinaryOperation::Generate() {
__ push(left_); GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
__ push(right_); stub.GenerateCall(masm_, left_, right_);
GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
__ CallStub(&stub);
if (!dst_.is(rax)) __ movq(dst_, rax); if (!dst_.is(rax)) __ movq(dst_, rax);
} }
...@@ -5089,16 +5087,16 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op, ...@@ -5089,16 +5087,16 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
// Bit operations always assume they likely operate on Smis. Still only // Bit operations always assume they likely operate on Smis. Still only
// generate the inline Smi check code if this operation is part of a loop. // generate the inline Smi check code if this operation is part of a loop.
flags = (loop_nesting() > 0) flags = (loop_nesting() > 0)
? SMI_CODE_INLINED ? NO_SMI_CODE_IN_STUB
: SMI_CODE_IN_STUB; : NO_GENERIC_BINARY_FLAGS;
break; break;
default: default:
// By default only inline the Smi check code for likely smis if this // By default only inline the Smi check code for likely smis if this
// operation is part of a loop. // operation is part of a loop.
flags = ((loop_nesting() > 0) && type->IsLikelySmi()) flags = ((loop_nesting() > 0) && type->IsLikelySmi())
? SMI_CODE_INLINED ? NO_SMI_CODE_IN_STUB
: SMI_CODE_IN_STUB; : NO_GENERIC_BINARY_FLAGS;
break; break;
} }
...@@ -5157,7 +5155,7 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op, ...@@ -5157,7 +5155,7 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
return; return;
} }
if (flags == SMI_CODE_INLINED && !generate_no_smi_code) { if ((flags & NO_SMI_CODE_IN_STUB) != 0 && !generate_no_smi_code) {
LikelySmiBinaryOperation(op, &left, &right, overwrite_mode); LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
} else { } else {
frame_->Push(&left); frame_->Push(&left);
...@@ -5166,7 +5164,7 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op, ...@@ -5166,7 +5164,7 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
// that does not check for the fast smi case. // that does not check for the fast smi case.
// The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED. // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
if (generate_no_smi_code) { if (generate_no_smi_code) {
flags = SMI_CODE_INLINED; flags = NO_SMI_CODE_IN_STUB;
} }
GenericBinaryOpStub stub(op, overwrite_mode, flags); GenericBinaryOpStub stub(op, overwrite_mode, flags);
Result answer = frame_->CallStub(&stub, 2); Result answer = frame_->CallStub(&stub, 2);
...@@ -5221,41 +5219,32 @@ void DeferredReferenceGetNamedValue::Generate() { ...@@ -5221,41 +5219,32 @@ void DeferredReferenceGetNamedValue::Generate() {
void DeferredInlineSmiAdd::Generate() { void DeferredInlineSmiAdd::Generate() {
__ push(dst_); GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
__ Push(value_); igostub.GenerateCall(masm_, dst_, value_);
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub);
if (!dst_.is(rax)) __ movq(dst_, rax); if (!dst_.is(rax)) __ movq(dst_, rax);
} }
void DeferredInlineSmiAddReversed::Generate() { void DeferredInlineSmiAddReversed::Generate() {
__ Push(value_); GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
__ push(dst_); igostub.GenerateCall(masm_, value_, dst_);
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub);
if (!dst_.is(rax)) __ movq(dst_, rax); if (!dst_.is(rax)) __ movq(dst_, rax);
} }
void DeferredInlineSmiSub::Generate() { void DeferredInlineSmiSub::Generate() {
__ push(dst_); GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
__ Push(value_); igostub.GenerateCall(masm_, dst_, value_);
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub);
if (!dst_.is(rax)) __ movq(dst_, rax);
} }
void DeferredInlineSmiOperation::Generate() { void DeferredInlineSmiOperation::Generate() {
__ push(src_);
__ Push(value_);
// For mod we don't generate all the Smi code inline. // For mod we don't generate all the Smi code inline.
GenericBinaryOpStub stub( GenericBinaryOpStub stub(
op_, op_,
overwrite_mode_, overwrite_mode_,
(op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED); (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
__ CallStub(&stub); stub.GenerateCall(masm_, src_, value_);
if (!dst_.is(rax)) __ movq(dst_, rax); if (!dst_.is(rax)) __ movq(dst_, rax);
} }
...@@ -7339,6 +7328,127 @@ const char* GenericBinaryOpStub::GetName() { ...@@ -7339,6 +7328,127 @@ const char* GenericBinaryOpStub::GetName() {
} }
void GenericBinaryOpStub::GenerateCall(
MacroAssembler* masm,
Register left,
Register right) {
if (!ArgsInRegistersSupported()) {
// Pass arguments on the stack.
__ push(left);
__ push(right);
} else {
// The calling convention with registers is left in rdx and right in rax.
Register left_arg = rdx;
Register right_arg = rax;
if (!(left.is(left_arg) && right.is(right_arg))) {
if (left.is(right_arg) && right.is(left_arg)) {
if (IsOperationCommutative()) {
SetArgsReversed();
} else {
__ xchg(left, right);
}
} else if (left.is(left_arg)) {
__ movq(right_arg, right);
} else if (left.is(right_arg)) {
if (IsOperationCommutative()) {
__ movq(left_arg, right);
SetArgsReversed();
} else {
// Order of moves important to avoid destroying left argument.
__ movq(left_arg, left);
__ movq(right_arg, right);
}
} else if (right.is(left_arg)) {
if (IsOperationCommutative()) {
__ movq(right_arg, left);
SetArgsReversed();
} else {
// Order of moves important to avoid destroying right argument.
__ movq(right_arg, right);
__ movq(left_arg, left);
}
} else if (right.is(right_arg)) {
__ movq(left_arg, left);
} else {
// Order of moves is not important.
__ movq(left_arg, left);
__ movq(right_arg, right);
}
}
// Update flags to indicate that arguments are in registers.
SetArgsInRegisters();
__ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
}
// Call the stub.
__ CallStub(this);
}
void GenericBinaryOpStub::GenerateCall(
MacroAssembler* masm,
Register left,
Smi* right) {
if (!ArgsInRegistersSupported()) {
// Pass arguments on the stack.
__ push(left);
__ Push(right);
} else {
// The calling convention with registers is left in rdx and right in rax.
Register left_arg = rdx;
Register right_arg = rax;
if (left.is(left_arg)) {
__ Move(right_arg, right);
} else if (left.is(right_arg) && IsOperationCommutative()) {
__ Move(left_arg, right);
SetArgsReversed();
} else {
__ movq(left_arg, left);
__ Move(right_arg, right);
}
// Update flags to indicate that arguments are in registers.
SetArgsInRegisters();
__ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
}
// Call the stub.
__ CallStub(this);
}
void GenericBinaryOpStub::GenerateCall(
MacroAssembler* masm,
Smi* left,
Register right) {
if (!ArgsInRegistersSupported()) {
// Pass arguments on the stack.
__ Push(left);
__ push(right);
} else {
// The calling convention with registers is left in rdx and right in rax.
Register left_arg = rdx;
Register right_arg = rax;
if (right.is(right_arg)) {
__ Move(left_arg, left);
} else if (right.is(left_arg) && IsOperationCommutative()) {
__ Move(right_arg, left);
SetArgsReversed();
} else {
__ Move(left_arg, left);
__ movq(right_arg, right);
}
// Update flags to indicate that arguments are in registers.
SetArgsInRegisters();
__ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
}
// Call the stub.
__ CallStub(this);
}
void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Perform fast-case smi code for the operation (rax <op> rbx) and // Perform fast-case smi code for the operation (rax <op> rbx) and
// leave result in register rax. // leave result in register rax.
...@@ -7411,22 +7521,21 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { ...@@ -7411,22 +7521,21 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
void GenericBinaryOpStub::Generate(MacroAssembler* masm) { void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label call_runtime; Label call_runtime;
if (flags_ == SMI_CODE_IN_STUB) { if (HasSmiCodeInStub()) {
// The fast case smi code wasn't inlined in the stub caller // The fast case smi code wasn't inlined in the stub caller
// code. Generate it here to speed up common operations. // code. Generate it here to speed up common operations.
Label slow; Label slow;
__ movq(rbx, Operand(rsp, 1 * kPointerSize)); // get y __ movq(rbx, Operand(rsp, 1 * kPointerSize)); // get y
__ movq(rax, Operand(rsp, 2 * kPointerSize)); // get x __ movq(rax, Operand(rsp, 2 * kPointerSize)); // get x
GenerateSmiCode(masm, &slow); GenerateSmiCode(masm, &slow);
__ ret(2 * kPointerSize); // remove both operands GenerateReturn(masm);
// Too bad. The fast case smi code didn't succeed. // Too bad. The fast case smi code didn't succeed.
__ bind(&slow); __ bind(&slow);
} }
// Setup registers. // Make sure the arguments are in rdx and rax.
__ movq(rax, Operand(rsp, 1 * kPointerSize)); // get y GenerateLoadArguments(masm);
__ movq(rdx, Operand(rsp, 2 * kPointerSize)); // get x
// Floating point case. // Floating point case.
switch (op_) { switch (op_) {
...@@ -7450,7 +7559,10 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { ...@@ -7450,7 +7559,10 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ JumpIfNotSmi(rax, &skip_allocation); __ JumpIfNotSmi(rax, &skip_allocation);
// Fall through! // Fall through!
case NO_OVERWRITE: case NO_OVERWRITE:
__ AllocateHeapNumber(rax, rcx, &call_runtime); // Allocate a heap number for the result. Keep rax and rdx intact
// for the possible runtime call.
__ AllocateHeapNumber(rbx, rcx, &call_runtime);
__ movq(rax, rbx);
__ bind(&skip_allocation); __ bind(&skip_allocation);
break; break;
default: UNREACHABLE(); default: UNREACHABLE();
...@@ -7466,7 +7578,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { ...@@ -7466,7 +7578,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
default: UNREACHABLE(); default: UNREACHABLE();
} }
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4); __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
__ ret(2 * kPointerSize); GenerateReturn(masm);
} }
case Token::MOD: { case Token::MOD: {
// For MOD we go directly to runtime in the non-smi case. // For MOD we go directly to runtime in the non-smi case.
...@@ -7534,7 +7646,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { ...@@ -7534,7 +7646,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ JumpIfNotValidSmiValue(rax, &non_smi_result); __ JumpIfNotValidSmiValue(rax, &non_smi_result);
// Tag smi result, if possible, and return. // Tag smi result, if possible, and return.
__ Integer32ToSmi(rax, rax); __ Integer32ToSmi(rax, rax);
__ ret(2 * kPointerSize); GenerateReturn(masm);
// All ops except SHR return a signed int32 that we load in a HeapNumber. // All ops except SHR return a signed int32 that we load in a HeapNumber.
if (op_ != Token::SHR && non_smi_result.is_linked()) { if (op_ != Token::SHR && non_smi_result.is_linked()) {
...@@ -7560,7 +7672,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { ...@@ -7560,7 +7672,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ movq(Operand(rsp, 1 * kPointerSize), rbx); __ movq(Operand(rsp, 1 * kPointerSize), rbx);
__ fild_s(Operand(rsp, 1 * kPointerSize)); __ fild_s(Operand(rsp, 1 * kPointerSize));
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset)); __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
__ ret(2 * kPointerSize); GenerateReturn(masm);
} }
// Clear the FPU exception flag and reset the stack before calling // Clear the FPU exception flag and reset the stack before calling
...@@ -7591,8 +7703,20 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { ...@@ -7591,8 +7703,20 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
} }
// If all else fails, use the runtime system to get the correct // If all else fails, use the runtime system to get the correct
// result. // result. If arguments was passed in registers now place them on the
// stack in the correct order below the return address.
__ bind(&call_runtime); __ bind(&call_runtime);
if (HasArgumentsInRegisters()) {
__ pop(rcx);
if (HasArgumentsReversed()) {
__ push(rax);
__ push(rdx);
} else {
__ push(rdx);
__ push(rax);
}
__ push(rcx);
}
switch (op_) { switch (op_) {
case Token::ADD: case Token::ADD:
__ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
...@@ -7633,6 +7757,26 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { ...@@ -7633,6 +7757,26 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
} }
void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
// If arguments are not passed in registers read them from the stack.
if (!HasArgumentsInRegisters()) {
__ movq(rax, Operand(rsp, 1 * kPointerSize));
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
}
}
void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
// If arguments are not passed in registers remove them from the stack before
// returning.
if (!HasArgumentsInRegisters()) {
__ ret(2 * kPointerSize); // Remove both operands
} else {
__ ret(0);
}
}
int CompareStub::MinorKey() { int CompareStub::MinorKey() {
// Encode the two parameters in a unique 16 bit value. // Encode the two parameters in a unique 16 bit value.
ASSERT(static_cast<unsigned>(cc_) < (1 << 15)); ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
......
...@@ -647,11 +647,10 @@ class ToBooleanStub: public CodeStub { ...@@ -647,11 +647,10 @@ class ToBooleanStub: public CodeStub {
}; };
// Flag that indicates whether or not the code that handles smi arguments // Flag that indicates how to generate code for the stub GenericBinaryOpStub.
// should be placed in the stub, inlined, or omitted entirely.
enum GenericBinaryFlags { enum GenericBinaryFlags {
SMI_CODE_IN_STUB, NO_GENERIC_BINARY_FLAGS = 0,
SMI_CODE_INLINED NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
}; };
...@@ -660,45 +659,82 @@ class GenericBinaryOpStub: public CodeStub { ...@@ -660,45 +659,82 @@ class GenericBinaryOpStub: public CodeStub {
GenericBinaryOpStub(Token::Value op, GenericBinaryOpStub(Token::Value op,
OverwriteMode mode, OverwriteMode mode,
GenericBinaryFlags flags) GenericBinaryFlags flags)
: op_(op), mode_(mode), flags_(flags) { : op_(op),
mode_(mode),
flags_(flags),
args_in_registers_(false),
args_reversed_(false) {
use_sse3_ = CpuFeatures::IsSupported(CpuFeatures::SSE3); use_sse3_ = CpuFeatures::IsSupported(CpuFeatures::SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
} }
void GenerateSmiCode(MacroAssembler* masm, Label* slow); // Generate code to call the stub with the supplied arguments. This will add
// code at the call site to prepare arguments either in registers or on the
// stack together with the actual call.
void GenerateCall(MacroAssembler* masm, Register left, Register right);
void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
private: private:
Token::Value op_; Token::Value op_;
OverwriteMode mode_; OverwriteMode mode_;
GenericBinaryFlags flags_; GenericBinaryFlags flags_;
bool args_in_registers_; // Arguments passed in registers not on the stack.
bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_; bool use_sse3_;
const char* GetName(); const char* GetName();
#ifdef DEBUG #ifdef DEBUG
void Print() { void Print() {
PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n", PrintF("GenericBinaryOpStub (op %s), "
"(mode %d, flags %d, registers %d, reversed %d)\n",
Token::String(op_), Token::String(op_),
static_cast<int>(mode_), static_cast<int>(mode_),
static_cast<int>(flags_)); static_cast<int>(flags_),
static_cast<int>(args_in_registers_),
static_cast<int>(args_reversed_));
} }
#endif #endif
// Minor key encoding in 16 bits FSOOOOOOOOOOOOMM. // Minor key encoding in 16 bits FRASOOOOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {}; class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 12> {}; class OpBits: public BitField<Token::Value, 2, 10> {};
class SSE3Bits: public BitField<bool, 14, 1> {}; class SSE3Bits: public BitField<bool, 12, 1> {};
class ArgsInRegistersBits: public BitField<bool, 13, 1> {};
class ArgsReversedBits: public BitField<bool, 14, 1> {};
class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {}; class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
Major MajorKey() { return GenericBinaryOp; } Major MajorKey() { return GenericBinaryOp; }
int MinorKey() { int MinorKey() {
// Encode the parameters in a unique 16 bit value. // Encode the parameters in a unique 16 bit value.
return OpBits::encode(op_) return OpBits::encode(op_)
| ModeBits::encode(mode_) | ModeBits::encode(mode_)
| FlagBits::encode(flags_) | FlagBits::encode(flags_)
| SSE3Bits::encode(use_sse3_); | SSE3Bits::encode(use_sse3_)
| ArgsInRegistersBits::encode(args_in_registers_)
| ArgsReversedBits::encode(args_reversed_);
} }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
void GenerateSmiCode(MacroAssembler* masm, Label* slow);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
bool ArgsInRegistersSupported() {
return ((op_ == Token::ADD) || (op_ == Token::SUB)
|| (op_ == Token::MUL) || (op_ == Token::DIV))
&& flags_ != NO_SMI_CODE_IN_STUB;
}
bool IsOperationCommutative() {
return (op_ == Token::ADD) || (op_ == Token::MUL);
}
void SetArgsInRegisters() { args_in_registers_ = true; }
void SetArgsReversed() { args_reversed_ = true; }
bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
bool HasArgumentsInRegisters() { return args_in_registers_; }
bool HasArgumentsReversed() { return args_reversed_; }
}; };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment