Commit 8105ae31 authored by sgjesse@chromium.org's avatar sgjesse@chromium.org

Optimize calls to GenericBinaryStub.

The calls to GenericBinaryStub can now pass the arguments in registers instead of on the stack. It is supported for ADD, SUB, MUL and DIV. The convention in GenericBinaryStub is not changed so the left operand is passed in edx and the right one in eax. When the stub contains smi code arguments are always passed on the stack as the smi code has to have left and right operands on eax and ebx, so moving from edx,eax to eax,ebx is not worth it and the smi code also trashes the registers so if arguments where passed in registers they would have to be saved on the stack anyway.

Added flags to disable the use of certain Intel CPU features to make it easier to test different code paths.
Review URL: http://codereview.chromium.org/246075

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@3041 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent b9e7112d
......@@ -96,7 +96,7 @@ private:
//
#define FLAG FLAG_FULL
// assembler-ia32.cc / assembler-arm.cc
// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
DEFINE_bool(debug_code, false,
"generate extra code (comments, assertions) for debugging")
DEFINE_bool(emit_branch_hints, false, "emit branch hints")
......@@ -104,6 +104,16 @@ DEFINE_bool(push_pop_elimination, true,
"eliminate redundant push/pops in assembly code")
DEFINE_bool(print_push_pop_elimination, false,
"print elimination of redundant push/pops in assembly code")
DEFINE_bool(enable_sse2, true,
"enable use of SSE2 instructions if available")
DEFINE_bool(enable_sse3, true,
"enable use of SSE3 instructions if available")
DEFINE_bool(enable_cmov, true,
"enable use of CMOV instruction if available")
DEFINE_bool(enable_rdtsc, true,
"enable use of RDTSC instruction if available")
DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
// bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
......
......@@ -367,6 +367,10 @@ class CpuFeatures : public AllStatic {
static void Probe();
// Check whether a feature is supported by the target CPU.
static bool IsSupported(Feature f) {
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
if (f == RDTSC && !FLAG_enable_rdtsc) return false;
return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
}
// Check whether a feature is currently enabled.
......
This diff is collapsed.
......@@ -604,47 +604,62 @@ class CodeGenerator: public AstVisitor {
};
// Flag that indicates whether or not the code that handles smi arguments
// should be placed in the stub, inlined, or omitted entirely.
// Flag that indicates whether how to generate code for the stub.
enum GenericBinaryFlags {
SMI_CODE_IN_STUB,
SMI_CODE_INLINED
NO_GENERIC_BINARY_FLAGS = 0,
NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
};
class GenericBinaryOpStub: public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
GenericBinaryOpStub(Token::Value operation,
OverwriteMode mode,
GenericBinaryFlags flags)
: op_(op), mode_(mode), flags_(flags) {
: op_(operation),
mode_(mode),
flags_(flags),
args_in_registers_(false),
args_reversed_(false) {
use_sse3_ = CpuFeatures::IsSupported(CpuFeatures::SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
void GenerateSmiCode(MacroAssembler* masm, Label* slow);
// Generate code to call the stub with the supplied arguments. This will add
// code at the call site to prepare arguments either in registers or on the
// stack together with the actual call.
void GenerateCall(MacroAssembler* masm, Register left, Register right);
void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
private:
Token::Value op_;
OverwriteMode mode_;
GenericBinaryFlags flags_;
bool args_in_registers_; // Arguments passed in registers not on the stack.
bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_;
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
PrintF("GenericBinaryOpStub (op %s), "
"(mode %d, flags %d, registers %d, reversed %d)\n",
Token::String(op_),
static_cast<int>(mode_),
static_cast<int>(flags_));
static_cast<int>(flags_),
static_cast<int>(args_in_registers_),
static_cast<int>(args_reversed_));
}
#endif
// Minor key encoding in 16 bits FSOOOOOOOOOOOOMM.
// Minor key encoding in 16 bits FRASOOOOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 12> {};
class SSE3Bits: public BitField<bool, 14, 1> {};
class OpBits: public BitField<Token::Value, 2, 10> {};
class SSE3Bits: public BitField<bool, 12, 1> {};
class ArgsInRegistersBits: public BitField<bool, 13, 1> {};
class ArgsReversedBits: public BitField<bool, 14, 1> {};
class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
Major MajorKey() { return GenericBinaryOp; }
......@@ -653,9 +668,30 @@ class GenericBinaryOpStub: public CodeStub {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| FlagBits::encode(flags_)
| SSE3Bits::encode(use_sse3_);
| SSE3Bits::encode(use_sse3_)
| ArgsInRegistersBits::encode(args_in_registers_)
| ArgsReversedBits::encode(args_reversed_);
}
void Generate(MacroAssembler* masm);
void GenerateSmiCode(MacroAssembler* masm, Label* slow);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
bool ArgsInRegistersSupported() {
return ((op_ == Token::ADD) || (op_ == Token::SUB)
|| (op_ == Token::MUL) || (op_ == Token::DIV))
&& flags_ != NO_SMI_CODE_IN_STUB;
}
bool IsOperationCommutative() {
return (op_ == Token::ADD) || (op_ == Token::MUL);
}
void SetArgsInRegisters() { args_in_registers_ = true; }
void SetArgsReversed() { args_reversed_ = true; }
bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
bool HasArgumentsInRegisters() { return args_in_registers_; }
bool HasArgumentsReversed() { return args_reversed_; }
};
......
......@@ -150,7 +150,9 @@ namespace internal {
SC(reloc_info_count, V8.RelocInfoCount) \
SC(reloc_info_size, V8.RelocInfoSize) \
SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
SC(compute_entry_frame, V8.ComputeEntryFrame)
SC(compute_entry_frame, V8.ComputeEntryFrame) \
SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls) \
SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs)
// This file contains all the v8 counters that are in use.
......
......@@ -376,6 +376,11 @@ class CpuFeatures : public AllStatic {
static void Probe();
// Check whether a feature is supported by the target CPU.
static bool IsSupported(Feature f) {
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
if (f == RDTSC && !FLAG_enable_rdtsc) return false;
if (f == SAHF && !FLAG_enable_sahf) return false;
return (supported_ & (V8_UINT64_C(1) << f)) != 0;
}
// Check whether a feature is currently enabled.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment