Commit 91ecc77e authored by lrn@chromium.org's avatar lrn@chromium.org

X64: Abstract indexing by a smi to the macro assembler.

Review URL: http://codereview.chromium.org/196118


git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2887 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 681e67b7
...@@ -889,11 +889,11 @@ class Object BASE_EMBEDDED { ...@@ -889,11 +889,11 @@ class Object BASE_EMBEDDED {
// Smi represents integer Numbers that can be stored in 31 bits. // Smi represents integer Numbers that can be stored in 31 bits.
// TODO(X64) Increase to 53 bits?
// Smis are immediate which means they are NOT allocated in the heap. // Smis are immediate which means they are NOT allocated in the heap.
// The this pointer has the following format: [31 bit signed int] 0
// TODO(X64): 31 bits signed int sign-extended to 63 bits.
// Smi stands for small integer. // Smi stands for small integer.
// The this pointer has the following format: [31 bit signed int] 0
// On 64-bit, the top 32 bits of the pointer is allowed to have any
// value.
class Smi: public Object { class Smi: public Object {
public: public:
// Returns the integer value. // Returns the integer value.
......
...@@ -1728,10 +1728,9 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) { ...@@ -1728,10 +1728,9 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Get the i'th entry of the array. // Get the i'th entry of the array.
__ movq(rdx, frame_->ElementAt(2)); __ movq(rdx, frame_->ElementAt(2));
// TODO(smi): Find a way to abstract indexing by a smi value. SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
ASSERT(kSmiTagSize == 1 && kSmiTag == 0); __ movq(rbx,
// Multiplier is times_4 since rax is already a Smi. FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
__ movq(rbx, FieldOperand(rdx, rax, times_4, FixedArray::kHeaderSize));
// Get the expected map from the stack or a zero map in the // Get the expected map from the stack or a zero map in the
// permanent slow case rax: current iteration count rbx: i'th entry // permanent slow case rax: current iteration count rbx: i'th entry
...@@ -3886,7 +3885,9 @@ void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) { ...@@ -3886,7 +3885,9 @@ void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) { void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0); ASSERT(args->length() == 0);
ASSERT(kSmiTag == 0); // RBP value is aligned, so it should look like Smi. // RBP value is aligned, so it should be tagged as a smi (without necesarily
// being padded as a smi).
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
Result rbp_as_smi = allocator_->Allocate(); Result rbp_as_smi = allocator_->Allocate();
ASSERT(rbp_as_smi.is_valid()); ASSERT(rbp_as_smi.is_valid());
__ movq(rbp_as_smi.reg(), rbp); __ movq(rbp_as_smi.reg(), rbp);
...@@ -6127,12 +6128,11 @@ void Reference::SetValue(InitState init_state) { ...@@ -6127,12 +6128,11 @@ void Reference::SetValue(InitState init_state) {
deferred->Branch(not_equal); deferred->Branch(not_equal);
// Store the value. // Store the value.
ASSERT_EQ(1, kSmiTagSize); SmiIndex index =
ASSERT_EQ(0, kSmiTag); masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
// TODO(lrn) Find way to abstract indexing by smi. __ movq(Operand(tmp.reg(),
__ movq(Operand(tmp.reg(), index.reg,
key.reg(), index.scale,
times_half_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag), FixedArray::kHeaderSize - kHeapObjectTag),
value.reg()); value.reg());
__ IncrementCounter(&Counters::keyed_store_inline, 1); __ IncrementCounter(&Counters::keyed_store_inline, 1);
...@@ -6660,7 +6660,8 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { ...@@ -6660,7 +6660,8 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer. // Patch the arguments.length and the parameters pointer.
__ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ movq(Operand(rsp, 1 * kPointerSize), rcx); __ movq(Operand(rsp, 1 * kPointerSize), rcx);
__ lea(rdx, Operand(rdx, rcx, times_4, kDisplacement)); SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
__ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement));
__ movq(Operand(rsp, 2 * kPointerSize), rdx); __ movq(Operand(rsp, 2 * kPointerSize), rdx);
// Do the runtime call to allocate the arguments object. // Do the runtime call to allocate the arguments object.
...@@ -6696,13 +6697,10 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { ...@@ -6696,13 +6697,10 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ j(above_equal, &slow); __ j(above_equal, &slow);
// Read the argument from the stack and return it. // Read the argument from the stack and return it.
// Shifting code depends on SmiEncoding being equivalent to left shift: SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
// we multiply by four to get pointer alignment. __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
// TODO(smi): Find a way to abstract indexing by a smi. index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
ASSERT(kSmiTagSize == 1 && kSmiTag == 0); __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
__ lea(rbx, Operand(rbp, rax, times_4, 0));
__ neg(rdx); // TODO(smi): Abstract negative indexing too.
__ movq(rax, Operand(rbx, rdx, times_4, kDisplacement));
__ Ret(); __ Ret();
// Arguments adaptor case: Check index against actual arguments // Arguments adaptor case: Check index against actual arguments
...@@ -6714,13 +6712,10 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { ...@@ -6714,13 +6712,10 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ j(above_equal, &slow); __ j(above_equal, &slow);
// Read the argument from the stack and return it. // Read the argument from the stack and return it.
// Shifting code depends on SmiEncoding being equivalent to left shift: index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
// we multiply by four to get pointer alignment. __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
// TODO(smi): Find a way to abstract indexing by a smi. index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
ASSERT(kSmiTagSize == 1 && kSmiTag == 0); __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
__ lea(rbx, Operand(rbx, rcx, times_4, 0));
__ neg(rdx);
__ movq(rax, Operand(rbx, rdx, times_4, kDisplacement));
__ Ret(); __ Ret();
// Slow-case: Handle non-smi or out-of-bounds access to arguments // Slow-case: Handle non-smi or out-of-bounds access to arguments
......
...@@ -1092,6 +1092,48 @@ void MacroAssembler::SelectNonSmi(Register dst, ...@@ -1092,6 +1092,48 @@ void MacroAssembler::SelectNonSmi(Register dst,
} }
SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) {
ASSERT(is_uint6(shift));
if (shift == 0) { // times_1.
SmiToInteger32(dst, src);
return SmiIndex(dst, times_1);
}
if (shift <= 4) { // 2 - 16 times multiplier is handled using ScaleFactor.
// We expect that all smis are actually zero-padded. If this holds after
// checking, this line can be omitted.
movl(dst, src); // Ensure that the smi is zero-padded.
return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
}
// Shift by shift-kSmiTagSize.
movl(dst, src); // Ensure that the smi is zero-padded.
shl(dst, Immediate(shift - kSmiTagSize));
return SmiIndex(dst, times_1);
}
SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
Register src,
int shift) {
// Register src holds a positive smi.
ASSERT(is_uint6(shift));
if (shift == 0) { // times_1.
SmiToInteger32(dst, src);
neg(dst);
return SmiIndex(dst, times_1);
}
if (shift <= 4) { // 2 - 16 times multiplier is handled using ScaleFactor.
movl(dst, src);
neg(dst);
return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
}
// Shift by shift-kSmiTagSize.
movl(dst, src);
neg(dst);
shl(dst, Immediate(shift - kSmiTagSize));
return SmiIndex(dst, times_1);
}
bool MacroAssembler::IsUnsafeSmi(Smi* value) { bool MacroAssembler::IsUnsafeSmi(Smi* value) {
return false; return false;
......
...@@ -41,6 +41,13 @@ static const Register kScratchRegister = r10; ...@@ -41,6 +41,13 @@ static const Register kScratchRegister = r10;
// Forward declaration. // Forward declaration.
class JumpTarget; class JumpTarget;
struct SmiIndex {
SmiIndex(Register index_register, ScaleFactor scale)
: reg(index_register),
scale(scale) {}
Register reg;
ScaleFactor scale;
};
// MacroAssembler implements a collection of frequently used macros. // MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler { class MacroAssembler: public Assembler {
...@@ -241,7 +248,7 @@ class MacroAssembler: public Assembler { ...@@ -241,7 +248,7 @@ class MacroAssembler: public Assembler {
int32_t constant, int32_t constant,
Label* on_not_smi_result); Label* on_not_smi_result);
// Negating a smi can give a negative zero or too larget positive value. // Negating a smi can give a negative zero or too large positive value.
void SmiNeg(Register dst, void SmiNeg(Register dst,
Register src, Register src,
Label* on_not_smi_result); Label* on_not_smi_result);
...@@ -253,6 +260,7 @@ class MacroAssembler: public Assembler { ...@@ -253,6 +260,7 @@ class MacroAssembler: public Assembler {
Register src1, Register src1,
Register src2, Register src2,
Label* on_not_smi_result); Label* on_not_smi_result);
// Subtracts smi values and return the result as a smi. // Subtracts smi values and return the result as a smi.
// If dst is src1, then src1 will be destroyed, even if // If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful. // the operation is unsuccessful.
...@@ -260,6 +268,7 @@ class MacroAssembler: public Assembler { ...@@ -260,6 +268,7 @@ class MacroAssembler: public Assembler {
Register src1, Register src1,
Register src2, Register src2,
Label* on_not_smi_result); Label* on_not_smi_result);
// Multiplies smi values and return the result as a smi, // Multiplies smi values and return the result as a smi,
// if possible. // if possible.
// If dst is src1, then src1 will be destroyed, even if // If dst is src1, then src1 will be destroyed, even if
...@@ -335,6 +344,19 @@ class MacroAssembler: public Assembler { ...@@ -335,6 +344,19 @@ class MacroAssembler: public Assembler {
Register src2, Register src2,
Label* on_not_smis); Label* on_not_smis);
// Converts, if necessary, a smi to a combination of number and
// multiplier to be used as a scaled index.
// The src register contains a *positive* smi value. The shift is the
// power of two to multiply the index value by (e.g.
// to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
// The returned index register may be either src or dst, depending
// on what is most efficient. If src and dst are different registers,
// src is always unchanged.
SmiIndex SmiToIndex(Register dst, Register src, int shift);
// Converts a positive smi to a negative index.
SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Macro instructions // Macro instructions
......
...@@ -47,17 +47,19 @@ static void ProbeTable(MacroAssembler* masm, ...@@ -47,17 +47,19 @@ static void ProbeTable(MacroAssembler* masm,
StubCache::Table table, StubCache::Table table,
Register name, Register name,
Register offset) { Register offset) {
// The offset register must hold a *positive* smi.
ExternalReference key_offset(SCTableReference::keyReference(table)); ExternalReference key_offset(SCTableReference::keyReference(table));
Label miss; Label miss;
__ movq(kScratchRegister, key_offset); __ movq(kScratchRegister, key_offset);
SmiIndex index = masm->SmiToIndex(offset, offset, kPointerSizeLog2);
// Check that the key in the entry matches the name. // Check that the key in the entry matches the name.
__ cmpl(name, Operand(kScratchRegister, offset, times_4, 0)); __ cmpl(name, Operand(kScratchRegister, index.reg, index.scale, 0));
__ j(not_equal, &miss); __ j(not_equal, &miss);
// Get the code entry from the cache. // Get the code entry from the cache.
// Use key_offset + kPointerSize, rather than loading value_offset. // Use key_offset + kPointerSize, rather than loading value_offset.
__ movq(kScratchRegister, __ movq(kScratchRegister,
Operand(kScratchRegister, offset, times_4, kPointerSize)); Operand(kScratchRegister, index.reg, index.scale, kPointerSize));
// Check that the flags match what we're looking for. // Check that the flags match what we're looking for.
__ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset)); __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
__ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup)); __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment