Commit 5f1f2adb authored by ager@chromium.org's avatar ager@chromium.org

Landing for Rodolph Perfetta.

Reduces the number of movw/movt instructions generated in ProbeTable. It
improves code size for CompileCallMegamorphic by about 10%.

BUG=none
TEST=none

Review URL: http://codereview.chromium.org/4220004/show

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5744 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 1dc2c44e
......@@ -544,7 +544,7 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache.
Code::Flags flags =
Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
// If the stub cache probing failed, the receiver might be a value.
// For value objects, we use the map of the prototype objects for
......@@ -583,7 +583,7 @@ static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
// Probe the stub cache for the value object.
__ bind(&probe);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
__ bind(&miss);
}
......@@ -858,7 +858,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
NOT_IN_LOOP,
MONOMORPHIC);
StubCache::GenerateProbe(masm, flags, r0, r2, r3, no_reg);
StubCache::GenerateProbe(masm, flags, r0, r2, r3, r4, r5);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
......@@ -2163,7 +2163,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
MONOMORPHIC);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
......
......@@ -43,43 +43,49 @@ static void ProbeTable(MacroAssembler* masm,
Code::Flags flags,
StubCache::Table table,
Register name,
Register offset) {
Register offset,
Register scratch,
Register scratch2) {
ExternalReference key_offset(SCTableReference::keyReference(table));
ExternalReference value_offset(SCTableReference::valueReference(table));
Label miss;
uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
// Save the offset on the stack.
__ push(offset);
// Check the relative positions of the address fields.
ASSERT(value_off_addr > key_off_addr);
ASSERT((value_off_addr - key_off_addr) % 4 == 0);
ASSERT((value_off_addr - key_off_addr) < (256 * 4));
Label miss;
Register offsets_base_addr = scratch;
// Check that the key in the entry matches the name.
__ mov(ip, Operand(key_offset));
__ ldr(ip, MemOperand(ip, offset, LSL, 1));
__ mov(offsets_base_addr, Operand(key_offset));
__ ldr(ip, MemOperand(offsets_base_addr, offset, LSL, 1));
__ cmp(name, ip);
__ b(ne, &miss);
// Get the code entry from the cache.
__ mov(ip, Operand(value_offset));
__ ldr(offset, MemOperand(ip, offset, LSL, 1));
__ add(offsets_base_addr, offsets_base_addr,
Operand(value_off_addr - key_off_addr));
__ ldr(scratch2, MemOperand(offsets_base_addr, offset, LSL, 1));
// Check that the flags match what we're looking for.
__ ldr(offset, FieldMemOperand(offset, Code::kFlagsOffset));
__ and_(offset, offset, Operand(~Code::kFlagsNotUsedInLookup));
__ cmp(offset, Operand(flags));
__ ldr(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset));
__ bic(scratch2, scratch2, Operand(Code::kFlagsNotUsedInLookup));
__ cmp(scratch2, Operand(flags));
__ b(ne, &miss);
// Restore offset and re-load code entry from cache.
__ pop(offset);
__ mov(ip, Operand(value_offset));
__ ldr(offset, MemOperand(ip, offset, LSL, 1));
// Re-load code entry from cache.
__ ldr(offset, MemOperand(offsets_base_addr, offset, LSL, 1));
// Jump to the first instruction in the code stub.
__ add(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(offset);
// Miss: Restore offset and fall through.
// Miss: fall through.
__ bind(&miss);
__ pop(offset);
}
......@@ -201,7 +207,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register receiver,
Register name,
Register scratch,
Register extra) {
Register extra,
Register extra2) {
Label miss;
// Make sure that code is valid. The shifting code relies on the
......@@ -214,6 +221,18 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
// Make sure that there are no register conflicts.
ASSERT(!scratch.is(receiver));
ASSERT(!scratch.is(name));
ASSERT(!extra.is(receiver));
ASSERT(!extra.is(name));
ASSERT(!extra.is(scratch));
ASSERT(!extra2.is(receiver));
ASSERT(!extra2.is(name));
ASSERT(!extra2.is(scratch));
ASSERT(!extra2.is(extra));
// Check scratch, extra and extra2 registers are valid.
ASSERT(!scratch.is(no_reg));
ASSERT(!extra.is(no_reg));
ASSERT(!extra2.is(no_reg));
// Check that the receiver isn't a smi.
__ tst(receiver, Operand(kSmiTagMask));
......@@ -229,7 +248,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
// Probe the primary table.
ProbeTable(masm, flags, kPrimary, name, scratch);
ProbeTable(masm, flags, kPrimary, name, scratch, extra, extra2);
// Primary miss: Compute hash for secondary probe.
__ sub(scratch, scratch, Operand(name));
......@@ -239,7 +258,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
// Probe the secondary table.
ProbeTable(masm, flags, kSecondary, name, scratch);
ProbeTable(masm, flags, kSecondary, name, scratch, extra, extra2);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.
......
......@@ -206,8 +206,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register receiver,
Register name,
Register scratch,
Register extra) {
Register extra,
Register extra2) {
Label miss;
USE(extra2); // The register extra2 is not used on the ia32 platform.
// Make sure that code is valid. The shifting code relies on the
// entry size being 8.
......@@ -223,6 +225,11 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
ASSERT(!extra.is(name));
ASSERT(!extra.is(scratch));
// Check scratch and extra registers are valid, and extra2 is unused.
ASSERT(!scratch.is(no_reg));
ASSERT(!extra.is(no_reg));
ASSERT(extra2.is(no_reg));
// Check that the receiver isn't a smi.
__ test(receiver, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
......
......@@ -44,7 +44,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register receiver,
Register name,
Register scratch,
Register extra) {
Register extra,
Register extra2) {
UNIMPLEMENTED_MIPS();
}
......
......@@ -241,13 +241,15 @@ class StubCache : public AllStatic {
static void Clear();
// Generate code for probing the stub cache table.
// If extra != no_reg it might be used as am extra scratch register.
// Arguments extra and extra2 may be used to pass additional scratch
// registers. Set to no_reg if not needed.
static void GenerateProbe(MacroAssembler* masm,
Code::Flags flags,
Register receiver,
Register name,
Register scratch,
Register extra);
Register extra,
Register extra2 = no_reg);
enum Table {
kPrimary,
......
......@@ -273,9 +273,11 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
Register receiver,
Register name,
Register scratch,
Register extra) {
Register extra,
Register extra2) {
Label miss;
USE(extra); // The register extra is not used on the X64 platform.
USE(extra2); // The register extra2 is not used on the X64 platform.
// Make sure that code is valid. The shifting code relies on the
// entry size being 16.
ASSERT(sizeof(Entry) == 16);
......@@ -287,6 +289,11 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
ASSERT(!scratch.is(receiver));
ASSERT(!scratch.is(name));
// Check scratch register is valid, extra and extra2 are unused.
ASSERT(!scratch.is(no_reg));
ASSERT(extra.is(no_reg));
ASSERT(extra2.is(no_reg));
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver, &miss);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment