Commit 91ecc77e authored by lrn@chromium.org's avatar lrn@chromium.org

X64: Abstract indexing by a smi to the macro assembler.

Review URL: http://codereview.chromium.org/196118


git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2887 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 681e67b7
......@@ -889,11 +889,11 @@ class Object BASE_EMBEDDED {
// Smi represents integer Numbers that can be stored in 31 bits.
// TODO(X64) Increase to 53 bits?
// Smis are immediate which means they are NOT allocated in the heap.
// The this pointer has the following format: [31 bit signed int] 0
// TODO(X64): 31 bits signed int sign-extended to 63 bits.
// Smi stands for small integer.
// The this pointer has the following format: [31 bit signed int] 0
// On 64-bit, the top 32 bits of the pointer is allowed to have any
// value.
class Smi: public Object {
public:
// Returns the integer value.
......
......@@ -1728,10 +1728,9 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Get the i'th entry of the array.
__ movq(rdx, frame_->ElementAt(2));
// TODO(smi): Find a way to abstract indexing by a smi value.
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
// Multiplier is times_4 since rax is already a Smi.
__ movq(rbx, FieldOperand(rdx, rax, times_4, FixedArray::kHeaderSize));
SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
__ movq(rbx,
FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
// Get the expected map from the stack or a zero map in the
// permanent slow case rax: current iteration count rbx: i'th entry
......@@ -3886,7 +3885,9 @@ void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
ASSERT(kSmiTag == 0); // RBP value is aligned, so it should look like Smi.
// RBP value is aligned, so it should be tagged as a smi (without necesarily
// being padded as a smi).
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
Result rbp_as_smi = allocator_->Allocate();
ASSERT(rbp_as_smi.is_valid());
__ movq(rbp_as_smi.reg(), rbp);
......@@ -6127,12 +6128,11 @@ void Reference::SetValue(InitState init_state) {
deferred->Branch(not_equal);
// Store the value.
ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
// TODO(lrn) Find way to abstract indexing by smi.
__ movq(Operand(tmp.reg(),
key.reg(),
times_half_pointer_size,
SmiIndex index =
masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
__ movq(Operand(tmp.reg(),
index.reg,
index.scale,
FixedArray::kHeaderSize - kHeapObjectTag),
value.reg());
__ IncrementCounter(&Counters::keyed_store_inline, 1);
......@@ -6660,7 +6660,8 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ movq(Operand(rsp, 1 * kPointerSize), rcx);
__ lea(rdx, Operand(rdx, rcx, times_4, kDisplacement));
SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
__ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement));
__ movq(Operand(rsp, 2 * kPointerSize), rdx);
// Do the runtime call to allocate the arguments object.
......@@ -6696,13 +6697,10 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
// Shifting code depends on SmiEncoding being equivalent to left shift:
// we multiply by four to get pointer alignment.
// TODO(smi): Find a way to abstract indexing by a smi.
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ lea(rbx, Operand(rbp, rax, times_4, 0));
__ neg(rdx); // TODO(smi): Abstract negative indexing too.
__ movq(rax, Operand(rbx, rdx, times_4, kDisplacement));
SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
__ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
__ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
__ Ret();
// Arguments adaptor case: Check index against actual arguments
......@@ -6714,13 +6712,10 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
// Shifting code depends on SmiEncoding being equivalent to left shift:
// we multiply by four to get pointer alignment.
// TODO(smi): Find a way to abstract indexing by a smi.
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ lea(rbx, Operand(rbx, rcx, times_4, 0));
__ neg(rdx);
__ movq(rax, Operand(rbx, rdx, times_4, kDisplacement));
index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
__ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
__ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
__ Ret();
// Slow-case: Handle non-smi or out-of-bounds access to arguments
......
......@@ -1092,6 +1092,48 @@ void MacroAssembler::SelectNonSmi(Register dst,
}
SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) {
ASSERT(is_uint6(shift));
if (shift == 0) { // times_1.
SmiToInteger32(dst, src);
return SmiIndex(dst, times_1);
}
if (shift <= 4) { // 2 - 16 times multiplier is handled using ScaleFactor.
// We expect that all smis are actually zero-padded. If this holds after
// checking, this line can be omitted.
movl(dst, src); // Ensure that the smi is zero-padded.
return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
}
// Shift by shift-kSmiTagSize.
movl(dst, src); // Ensure that the smi is zero-padded.
shl(dst, Immediate(shift - kSmiTagSize));
return SmiIndex(dst, times_1);
}
SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
Register src,
int shift) {
// Register src holds a positive smi.
ASSERT(is_uint6(shift));
if (shift == 0) { // times_1.
SmiToInteger32(dst, src);
neg(dst);
return SmiIndex(dst, times_1);
}
if (shift <= 4) { // 2 - 16 times multiplier is handled using ScaleFactor.
movl(dst, src);
neg(dst);
return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
}
// Shift by shift-kSmiTagSize.
movl(dst, src);
neg(dst);
shl(dst, Immediate(shift - kSmiTagSize));
return SmiIndex(dst, times_1);
}
bool MacroAssembler::IsUnsafeSmi(Smi* value) {
return false;
......
......@@ -41,6 +41,13 @@ static const Register kScratchRegister = r10;
// Forward declaration.
class JumpTarget;
struct SmiIndex {
SmiIndex(Register index_register, ScaleFactor scale)
: reg(index_register),
scale(scale) {}
Register reg;
ScaleFactor scale;
};
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
......@@ -241,7 +248,7 @@ class MacroAssembler: public Assembler {
int32_t constant,
Label* on_not_smi_result);
// Negating a smi can give a negative zero or too larget positive value.
// Negating a smi can give a negative zero or too large positive value.
void SmiNeg(Register dst,
Register src,
Label* on_not_smi_result);
......@@ -253,6 +260,7 @@ class MacroAssembler: public Assembler {
Register src1,
Register src2,
Label* on_not_smi_result);
// Subtracts smi values and return the result as a smi.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
......@@ -260,6 +268,7 @@ class MacroAssembler: public Assembler {
Register src1,
Register src2,
Label* on_not_smi_result);
// Multiplies smi values and return the result as a smi,
// if possible.
// If dst is src1, then src1 will be destroyed, even if
......@@ -335,6 +344,19 @@ class MacroAssembler: public Assembler {
Register src2,
Label* on_not_smis);
// Converts, if necessary, a smi to a combination of number and
// multiplier to be used as a scaled index.
// The src register contains a *positive* smi value. The shift is the
// power of two to multiply the index value by (e.g.
// to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
// The returned index register may be either src or dst, depending
// on what is most efficient. If src and dst are different registers,
// src is always unchanged.
SmiIndex SmiToIndex(Register dst, Register src, int shift);
// Converts a positive smi to a negative index.
SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
// ---------------------------------------------------------------------------
// Macro instructions
......
......@@ -47,17 +47,19 @@ static void ProbeTable(MacroAssembler* masm,
StubCache::Table table,
Register name,
Register offset) {
// The offset register must hold a *positive* smi.
ExternalReference key_offset(SCTableReference::keyReference(table));
Label miss;
__ movq(kScratchRegister, key_offset);
SmiIndex index = masm->SmiToIndex(offset, offset, kPointerSizeLog2);
// Check that the key in the entry matches the name.
__ cmpl(name, Operand(kScratchRegister, offset, times_4, 0));
__ cmpl(name, Operand(kScratchRegister, index.reg, index.scale, 0));
__ j(not_equal, &miss);
// Get the code entry from the cache.
// Use key_offset + kPointerSize, rather than loading value_offset.
__ movq(kScratchRegister,
Operand(kScratchRegister, offset, times_4, kPointerSize));
Operand(kScratchRegister, index.reg, index.scale, kPointerSize));
// Check that the flags match what we're looking for.
__ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
__ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment