Remove support for inlined property loads and stores.

The full code generator does not generate inline code for
property loads and stores. All this code is unused with
Crankshaft.
Review URL: http://codereview.chromium.org/6850015

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@7623 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent cc782be6
......@@ -4295,7 +4295,6 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
default:
break;
}
__ Call(ic, mode);
}
......@@ -4317,7 +4316,6 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
default:
break;
}
__ Call(ic, RelocInfo::CODE_TARGET);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
......
......@@ -926,217 +926,6 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallExternalReference(ref, 2, 1);
}
// Returns the code marker, or the 0 if the code is not marked.
static inline int InlinedICSiteMarker(Address address,
Address* inline_end_address) {
if (V8::UseCrankshaft()) return false;
// If the instruction after the call site is not the pseudo instruction nop1
// then this is not related to an inlined in-object property load. The nop1
// instruction is located just after the call to the IC in the deferred code
// handling the miss in the inlined code. After the nop1 instruction there is
// a branch instruction for jumping back from the deferred code.
Address address_after_call = address + Assembler::kCallTargetAddressOffset;
Instr instr_after_call = Assembler::instr_at(address_after_call);
int code_marker = MacroAssembler::GetCodeMarker(instr_after_call);
// A negative result means the code is not marked.
if (code_marker <= 0) return 0;
Address address_after_nop = address_after_call + Assembler::kInstrSize;
Instr instr_after_nop = Assembler::instr_at(address_after_nop);
// There may be some reg-reg move and frame merging code to skip over before
// the branch back from the DeferredReferenceGetKeyedValue code to the inlined
// code.
while (!Assembler::IsBranch(instr_after_nop)) {
address_after_nop += Assembler::kInstrSize;
instr_after_nop = Assembler::instr_at(address_after_nop);
}
// Find the end of the inlined code for handling the load.
int b_offset =
Assembler::GetBranchOffset(instr_after_nop) + Assembler::kPcLoadDelta;
ASSERT(b_offset < 0); // Jumping back from deferred code.
*inline_end_address = address_after_nop + b_offset;
return code_marker;
}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
if (V8::UseCrankshaft()) return false;
// Find the end of the inlined code for handling the load if this is an
// inlined IC call site.
Address inline_end_address = 0;
if (InlinedICSiteMarker(address, &inline_end_address)
!= Assembler::PROPERTY_ACCESS_INLINED) {
return false;
}
// Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
// The immediate must be representable in 12 bits.
ASSERT((JSObject::kMaxInstanceSize - JSObject::kHeaderSize) < (1 << 12));
Address ldr_property_instr_address =
inline_end_address - Assembler::kInstrSize;
ASSERT(Assembler::IsLdrRegisterImmediate(
Assembler::instr_at(ldr_property_instr_address)));
Instr ldr_property_instr = Assembler::instr_at(ldr_property_instr_address);
ldr_property_instr = Assembler::SetLdrRegisterImmediateOffset(
ldr_property_instr, offset - kHeapObjectTag);
Assembler::instr_at_put(ldr_property_instr_address, ldr_property_instr);
// Indicate that code has changed.
CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize);
// Patch the map check.
// For PROPERTY_ACCESS_INLINED, the load map instruction is generated
// 4 instructions before the end of the inlined code.
// See codgen-arm.cc CodeGenerator::EmitNamedLoad.
int ldr_map_offset = -4;
Address ldr_map_instr_address =
inline_end_address + ldr_map_offset * Assembler::kInstrSize;
Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map));
return true;
}
bool LoadIC::PatchInlinedContextualLoad(Address address,
Object* map,
Object* cell,
bool is_dont_delete) {
// Find the end of the inlined code for handling the contextual load if
// this is inlined IC call site.
Address inline_end_address = 0;
int marker = InlinedICSiteMarker(address, &inline_end_address);
if (!((marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT) ||
(marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE))) {
return false;
}
// On ARM we don't rely on the is_dont_delete argument as the hint is already
// embedded in the code marker.
bool marker_is_dont_delete =
marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE;
// These are the offsets from the end of the inlined code.
// See codgen-arm.cc CodeGenerator::EmitNamedLoad.
int ldr_map_offset = marker_is_dont_delete ? -5: -8;
int ldr_cell_offset = marker_is_dont_delete ? -2: -5;
if (FLAG_debug_code && marker_is_dont_delete) {
// Three extra instructions were generated to check for the_hole_value.
ldr_map_offset -= 3;
ldr_cell_offset -= 3;
}
Address ldr_map_instr_address =
inline_end_address + ldr_map_offset * Assembler::kInstrSize;
Address ldr_cell_instr_address =
inline_end_address + ldr_cell_offset * Assembler::kInstrSize;
// Patch the map check.
Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map));
// Patch the cell address.
Assembler::set_target_address_at(ldr_cell_instr_address,
reinterpret_cast<Address>(cell));
return true;
}
bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
if (V8::UseCrankshaft()) return false;
// Find the end of the inlined code for the store if there is an
// inlined version of the store.
Address inline_end_address = 0;
if (InlinedICSiteMarker(address, &inline_end_address)
!= Assembler::PROPERTY_ACCESS_INLINED) {
return false;
}
// Compute the address of the map load instruction.
Address ldr_map_instr_address =
inline_end_address -
(CodeGenerator::GetInlinedNamedStoreInstructionsAfterPatch() *
Assembler::kInstrSize);
// Update the offsets if initializing the inlined store. No reason
// to update the offsets when clearing the inlined version because
// it will bail out in the map check.
if (map != HEAP->null_value()) {
// Patch the offset in the actual store instruction.
Address str_property_instr_address =
ldr_map_instr_address + 3 * Assembler::kInstrSize;
Instr str_property_instr = Assembler::instr_at(str_property_instr_address);
ASSERT(Assembler::IsStrRegisterImmediate(str_property_instr));
str_property_instr = Assembler::SetStrRegisterImmediateOffset(
str_property_instr, offset - kHeapObjectTag);
Assembler::instr_at_put(str_property_instr_address, str_property_instr);
// Patch the offset in the add instruction that is part of the
// write barrier.
Address add_offset_instr_address =
str_property_instr_address + Assembler::kInstrSize;
Instr add_offset_instr = Assembler::instr_at(add_offset_instr_address);
ASSERT(Assembler::IsAddRegisterImmediate(add_offset_instr));
add_offset_instr = Assembler::SetAddRegisterImmediateOffset(
add_offset_instr, offset - kHeapObjectTag);
Assembler::instr_at_put(add_offset_instr_address, add_offset_instr);
// Indicate that code has changed.
CPU::FlushICache(str_property_instr_address, 2 * Assembler::kInstrSize);
}
// Patch the map check.
Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map));
return true;
}
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
if (V8::UseCrankshaft()) return false;
Address inline_end_address = 0;
if (InlinedICSiteMarker(address, &inline_end_address)
!= Assembler::PROPERTY_ACCESS_INLINED) {
return false;
}
// Patch the map check.
Address ldr_map_instr_address =
inline_end_address -
(CodeGenerator::GetInlinedKeyedLoadInstructionsAfterPatch() *
Assembler::kInstrSize);
Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map));
return true;
}
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
if (V8::UseCrankshaft()) return false;
// Find the end of the inlined code for handling the store if this is an
// inlined IC call site.
Address inline_end_address = 0;
if (InlinedICSiteMarker(address, &inline_end_address)
!= Assembler::PROPERTY_ACCESS_INLINED) {
return false;
}
// Patch the map check.
Address ldr_map_instr_address =
inline_end_address -
(CodeGenerator::kInlinedKeyedStoreInstructionsAfterPatch *
Assembler::kInstrSize);
Assembler::set_target_address_at(ldr_map_instr_address,
reinterpret_cast<Address>(map));
return true;
}
Object* KeyedLoadIC_Miss(Arguments args);
......
......@@ -477,21 +477,6 @@ void BreakLocationIterator::SetDebugBreakAtIC() {
// calling convention used by the call site.
Handle<Code> dbgbrk_code(Debug::FindDebugBreak(code, mode));
rinfo()->set_target_address(dbgbrk_code->entry());
// For stubs that refer back to an inlined version clear the cached map for
// the inlined case to always go through the IC. As long as the break point
// is set the patching performed by the runtime system will take place in
// the code copy and will therefore have no effect on the running code
// keeping it from using the inlined code.
if (code->is_keyed_load_stub()) {
KeyedLoadIC::ClearInlinedVersion(pc());
} else if (code->is_keyed_store_stub()) {
KeyedStoreIC::ClearInlinedVersion(pc());
} else if (code->is_load_stub()) {
LoadIC::ClearInlinedVersion(pc());
} else if (code->is_store_stub()) {
StoreIC::ClearInlinedVersion(pc());
}
}
}
......@@ -499,20 +484,6 @@ void BreakLocationIterator::SetDebugBreakAtIC() {
void BreakLocationIterator::ClearDebugBreakAtIC() {
// Patch the code to the original invoke.
rinfo()->set_target_address(original_rinfo()->target_address());
RelocInfo::Mode mode = rmode();
if (RelocInfo::IsCodeTarget(mode)) {
AssertNoAllocation nogc;
Address target = original_rinfo()->target_address();
Code* code = Code::GetCodeFromTargetAddress(target);
// Restore the inlined version of keyed stores to get back to the
// fast case. We need to patch back the keyed store because no
// patching happens when running normally. For keyed loads, the
// map check will get patched back when running normally after ICs
// have been cleared at GC.
if (code->is_keyed_store_stub()) KeyedStoreIC::RestoreInlinedVersion(pc());
}
}
......
......@@ -4254,30 +4254,7 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
default:
break;
}
__ call(ic, mode);
// Crankshaft doesn't need patching of inlined loads and stores.
// When compiling the snapshot we need to produce code that works
// with and without Crankshaft.
if (V8::UseCrankshaft() && !Serializer::enabled()) {
return;
}
// If we're calling a (keyed) load or store stub, we have to mark
// the call as containing no inlined code so we will not attempt to
// patch it.
switch (ic->kind()) {
case Code::LOAD_IC:
case Code::KEYED_LOAD_IC:
case Code::STORE_IC:
case Code::KEYED_STORE_IC:
__ nop(); // Signals no inlined code.
break;
default:
// Do nothing.
break;
}
}
......@@ -4298,7 +4275,6 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
default:
break;
}
__ call(ic, RelocInfo::CODE_TARGET);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
......
......@@ -371,12 +371,6 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
}
// The offset from the inlined patch site to the start of the
// inlined load instruction. It is 7 bytes (test eax, imm) plus
// 6 bytes (jne slow_label).
const int LoadIC::kOffsetToLoadInstruction = 13;
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : receiver
......@@ -1273,172 +1267,6 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
if (V8::UseCrankshaft()) return false;
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a test eax, nothing
// was inlined.
if (*test_instruction_address != Assembler::kTestEaxByte) return false;
Address delta_address = test_instruction_address + 1;
// The delta to the start of the map check instruction.
int delta = *reinterpret_cast<int*>(delta_address);
// The map address is the last 4 bytes of the 7-byte
// operand-immediate compare instruction, so we add 3 to get the
// offset to the last 4 bytes.
Address map_address = test_instruction_address + delta + 3;
*(reinterpret_cast<Object**>(map_address)) = map;
// The offset is in the last 4 bytes of a six byte
// memory-to-register move instruction, so we add 2 to get the
// offset to the last 4 bytes.
Address offset_address =
test_instruction_address + delta + kOffsetToLoadInstruction + 2;
*reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
return true;
}
// One byte opcode for mov ecx,0xXXXXXXXX.
// Marks inlined contextual loads using all kinds of cells. Generated
// code has the hole check:
// mov reg, <cell>
// mov reg, (<cell>, value offset)
// cmp reg, <the hole>
// je slow
// ;; use reg
static const byte kMovEcxByte = 0xB9;
// One byte opcode for mov edx,0xXXXXXXXX.
// Marks inlined contextual loads using only "don't delete"
// cells. Generated code doesn't have the hole check:
// mov reg, <cell>
// mov reg, (<cell>, value offset)
// ;; use reg
static const byte kMovEdxByte = 0xBA;
bool LoadIC::PatchInlinedContextualLoad(Address address,
Object* map,
Object* cell,
bool is_dont_delete) {
if (V8::UseCrankshaft()) return false;
// The address of the instruction following the call.
Address mov_instruction_address =
address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a mov ecx/edx,
// nothing was inlined.
byte b = *mov_instruction_address;
if (b != kMovEcxByte && b != kMovEdxByte) return false;
// If we don't have the hole check generated, we can only support
// "don't delete" cells.
if (b == kMovEdxByte && !is_dont_delete) return false;
Address delta_address = mov_instruction_address + 1;
// The delta to the start of the map check instruction.
int delta = *reinterpret_cast<int*>(delta_address);
// The map address is the last 4 bytes of the 7-byte
// operand-immediate compare instruction, so we add 3 to get the
// offset to the last 4 bytes.
Address map_address = mov_instruction_address + delta + 3;
*(reinterpret_cast<Object**>(map_address)) = map;
// The cell is in the last 4 bytes of a five byte mov reg, imm32
// instruction, so we add 1 to get the offset to the last 4 bytes.
Address offset_address =
mov_instruction_address + delta + kOffsetToLoadInstruction + 1;
*reinterpret_cast<Object**>(offset_address) = cell;
return true;
}
bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
if (V8::UseCrankshaft()) return false;
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a test eax, nothing
// was inlined.
if (*test_instruction_address != Assembler::kTestEaxByte) return false;
// Extract the encoded deltas from the test eax instruction.
Address encoded_offsets_address = test_instruction_address + 1;
int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
int delta_to_map_check = -(encoded_offsets & 0xFFFF);
int delta_to_record_write = encoded_offsets >> 16;
// Patch the map to check. The map address is the last 4 bytes of
// the 7-byte operand-immediate compare instruction.
Address map_check_address = test_instruction_address + delta_to_map_check;
Address map_address = map_check_address + 3;
*(reinterpret_cast<Object**>(map_address)) = map;
// Patch the offset in the store instruction. The offset is in the
// last 4 bytes of a six byte register-to-memory move instruction.
Address offset_address =
map_check_address + StoreIC::kOffsetToStoreInstruction + 2;
// The offset should have initial value (kMaxInt - 1), cleared value
// (-1) or we should be clearing the inlined version.
ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
*reinterpret_cast<int*>(offset_address) == -1 ||
(offset == 0 && map == HEAP->null_value()));
*reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
// Patch the offset in the write-barrier code. The offset is the
// last 4 bytes of a six byte lea instruction.
offset_address = map_check_address + delta_to_record_write + 2;
// The offset should have initial value (kMaxInt), cleared value
// (-1) or we should be clearing the inlined version.
ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
*reinterpret_cast<int*>(offset_address) == -1 ||
(offset == 0 && map == HEAP->null_value()));
*reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
return true;
}
static bool PatchInlinedMapCheck(Address address, Object* map) {
if (V8::UseCrankshaft()) return false;
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
// The keyed load has a fast inlined case if the IC call instruction
// is immediately followed by a test instruction.
if (*test_instruction_address != Assembler::kTestEaxByte) return false;
// Fetch the offset from the test instruction to the map cmp
// instruction. This offset is stored in the last 4 bytes of the 5
// byte test instruction.
Address delta_address = test_instruction_address + 1;
int delta = *reinterpret_cast<int*>(delta_address);
// Compute the map address. The map address is in the last 4 bytes
// of the 7-byte operand-immediate compare instruction, so we add 3
// to the offset to get the map address.
Address map_address = test_instruction_address + delta + 3;
// Patch the map check.
*(reinterpret_cast<Object**>(map_address)) = map;
return true;
}
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
return PatchInlinedMapCheck(address, map);
}
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
return PatchInlinedMapCheck(address, map);
}
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : key
......@@ -1519,12 +1347,6 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
}
// The offset from the inlined patch site to the start of the inlined
// store instruction. It is 7 bytes (test reg, imm) plus 6 bytes (jne
// slow_label).
const int StoreIC::kOffsetToStoreInstruction = 13;
void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
......
......@@ -304,54 +304,23 @@ void CallICBase::Clear(Address address, Code* target) {
}
void KeyedLoadIC::ClearInlinedVersion(Address address) {
// Insert null as the map to check for to make sure the map check fails
// sending control flow to the IC instead of the inlined version.
PatchInlinedLoad(address, HEAP->null_value());
}
void KeyedLoadIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
// Make sure to also clear the map used in inline fast cases. If we
// do not clear these maps, cached code can keep objects alive
// through the embedded maps.
ClearInlinedVersion(address);
SetTargetAtAddress(address, initialize_stub());
}
void LoadIC::ClearInlinedVersion(Address address) {
// Reset the map check of the inlined inobject property load (if
// present) to guarantee failure by holding an invalid map (the null
// value). The offset can be patched to anything.
Heap* heap = HEAP;
PatchInlinedLoad(address, heap->null_value(), 0);
PatchInlinedContextualLoad(address,
heap->null_value(),
heap->null_value(),
true);
}
void LoadIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
ClearInlinedVersion(address);
SetTargetAtAddress(address, initialize_stub());
}
void StoreIC::ClearInlinedVersion(Address address) {
// Reset the map check of the inlined inobject property store (if
// present) to guarantee failure by holding an invalid map (the null
// value). The offset can be patched to anything.
PatchInlinedStore(address, HEAP->null_value(), 0);
}
void StoreIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
ClearInlinedVersion(address);
SetTargetAtAddress(address,
(target->extra_ic_state() == kStrictMode)
? initialize_stub_strict()
......@@ -359,21 +328,6 @@ void StoreIC::Clear(Address address, Code* target) {
}
void KeyedStoreIC::ClearInlinedVersion(Address address) {
// Insert null as the elements map to check for. This will make
// sure that the elements fast-case map check fails so that control
// flows to the IC instead of the inlined version.
PatchInlinedStore(address, HEAP->null_value());
}
void KeyedStoreIC::RestoreInlinedVersion(Address address) {
// Restore the fast-case elements map check so that the inlined
// version can be used again.
PatchInlinedStore(address, HEAP->fixed_array_map());
}
void KeyedStoreIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address,
......@@ -873,9 +827,6 @@ MaybeObject* LoadIC::Load(State state,
#endif
if (state == PREMONOMORPHIC) {
if (object->IsString()) {
Map* map = HeapObject::cast(*object)->map();
const int offset = String::kLengthOffset;
PatchInlinedLoad(address(), map, offset);
set_target(isolate()->builtins()->builtin(
Builtins::kLoadIC_StringLength));
} else {
......@@ -903,9 +854,6 @@ MaybeObject* LoadIC::Load(State state,
if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
#endif
if (state == PREMONOMORPHIC) {
Map* map = HeapObject::cast(*object)->map();
const int offset = JSArray::kLengthOffset;
PatchInlinedLoad(address(), map, offset);
set_target(isolate()->builtins()->builtin(
Builtins::kLoadIC_ArrayLength));
} else {
......@@ -948,63 +896,6 @@ MaybeObject* LoadIC::Load(State state,
LOG(isolate(), SuspectReadEvent(*name, *object));
}
bool can_be_inlined_precheck =
FLAG_use_ic &&
lookup.IsProperty() &&
lookup.IsCacheable() &&
lookup.holder() == *object &&
!object->IsAccessCheckNeeded();
bool can_be_inlined =
can_be_inlined_precheck &&
state == PREMONOMORPHIC &&
lookup.type() == FIELD;
bool can_be_inlined_contextual =
can_be_inlined_precheck &&
state == UNINITIALIZED &&
lookup.holder()->IsGlobalObject() &&
lookup.type() == NORMAL;
if (can_be_inlined) {
Map* map = lookup.holder()->map();
// Property's index in the properties array. If negative we have
// an inobject property.
int index = lookup.GetFieldIndex() - map->inobject_properties();
if (index < 0) {
// Index is an offset from the end of the object.
int offset = map->instance_size() + (index * kPointerSize);
if (PatchInlinedLoad(address(), map, offset)) {
set_target(megamorphic_stub());
TRACE_IC_NAMED("[LoadIC : inline patch %s]\n", name);
return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
} else {
TRACE_IC_NAMED("[LoadIC : no inline patch %s (patching failed)]\n",
name);
}
} else {
TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inobject)]\n", name);
}
} else if (can_be_inlined_contextual) {
Map* map = lookup.holder()->map();
JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
lookup.holder()->property_dictionary()->ValueAt(
lookup.GetDictionaryEntry()));
if (PatchInlinedContextualLoad(address(),
map,
cell,
lookup.IsDontDelete())) {
set_target(megamorphic_stub());
TRACE_IC_NAMED("[LoadIC : inline contextual patch %s]\n", name);
ASSERT(cell->value() != isolate()->heap()->the_hole_value());
return cell->value();
}
} else {
if (FLAG_use_ic && state == PREMONOMORPHIC) {
TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inlinable)]\n", name);
}
}
// Update inline cache and stub cache.
if (FLAG_use_ic) {
UpdateCaches(&lookup, state, object, name);
......@@ -1294,18 +1185,6 @@ MaybeObject* KeyedLoadIC::Load(State state,
#ifdef DEBUG
TraceIC("KeyedLoadIC", key, state, target());
#endif // DEBUG
// For JSObjects with fast elements that are not value wrappers
// and that do not have indexed interceptors, we initialize the
// inlined fast case (if present) by patching the inlined map
// check.
if (object->IsJSObject() &&
!object->IsJSValue() &&
!JSObject::cast(*object)->HasIndexedInterceptor() &&
JSObject::cast(*object)->HasFastElements()) {
Map* map = JSObject::cast(*object)->map();
PatchInlinedLoad(address(), map);
}
}
// Get the property.
......@@ -1471,57 +1350,7 @@ MaybeObject* StoreIC::Store(State state,
LookupResult lookup;
if (LookupForWrite(*receiver, *name, &lookup)) {
bool can_be_inlined =
state == UNINITIALIZED &&
lookup.IsProperty() &&
lookup.holder() == *receiver &&
lookup.type() == FIELD &&
!receiver->IsAccessCheckNeeded();
if (can_be_inlined) {
Map* map = lookup.holder()->map();
// Property's index in the properties array. If negative we have
// an inobject property.
int index = lookup.GetFieldIndex() - map->inobject_properties();
if (index < 0) {
// Index is an offset from the end of the object.
int offset = map->instance_size() + (index * kPointerSize);
if (PatchInlinedStore(address(), map, offset)) {
set_target((strict_mode == kStrictMode)
? megamorphic_stub_strict()
: megamorphic_stub());
#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[StoreIC : inline patch %s]\n", *name->ToCString());
}
#endif
return receiver->SetProperty(*name, *value, NONE, strict_mode);
#ifdef DEBUG
} else {
if (FLAG_trace_ic) {
PrintF("[StoreIC : no inline patch %s (patching failed)]\n",
*name->ToCString());
}
}
} else {
if (FLAG_trace_ic) {
PrintF("[StoreIC : no inline patch %s (not inobject)]\n",
*name->ToCString());
}
}
} else {
if (state == PREMONOMORPHIC) {
if (FLAG_trace_ic) {
PrintF("[StoreIC : no inline patch %s (not inlinable)]\n",
*name->ToCString());
#endif
}
}
}
// If no inlined store ic was patched, generate a stub for this
// store.
// Generate a stub for this store.
UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
} else {
// Strict mode doesn't allow setting non-existent global property
......
......@@ -296,14 +296,6 @@ class LoadIC: public IC {
bool support_wrappers);
static void GenerateFunctionPrototype(MacroAssembler* masm);
// Clear the use of the inlined version.
static void ClearInlinedVersion(Address address);
// The offset from the inlined patch site to the start of the
// inlined load instruction. It is architecture-dependent, and not
// used on ARM.
static const int kOffsetToLoadInstruction;
private:
// Update the inline cache and the global stub cache based on the
// lookup result.
......@@ -328,13 +320,6 @@ class LoadIC: public IC {
static void Clear(Address address, Code* target);
static bool PatchInlinedLoad(Address address, Object* map, int index);
static bool PatchInlinedContextualLoad(Address address,
Object* map,
Object* cell,
bool is_dont_delete);
friend class IC;
};
......@@ -361,9 +346,6 @@ class KeyedLoadIC: public IC {
static void GenerateIndexedInterceptor(MacroAssembler* masm);
// Clear the use of the inlined version.
static void ClearInlinedVersion(Address address);
// Bit mask to be tested against bit field for the cases when
// generic stub should go into slow case.
// Access check is necessary explicitly since generic stub does not perform
......@@ -407,10 +389,6 @@ class KeyedLoadIC: public IC {
static void Clear(Address address, Code* target);
// Support for patching the map that is checked in an inlined
// version of keyed load.
static bool PatchInlinedLoad(Address address, Object* map);
friend class IC;
};
......@@ -437,13 +415,6 @@ class StoreIC: public IC {
static void GenerateGlobalProxy(MacroAssembler* masm,
StrictModeFlag strict_mode);
// Clear the use of an inlined version.
static void ClearInlinedVersion(Address address);
// The offset from the inlined patch site to the start of the
// inlined store instruction.
static const int kOffsetToStoreInstruction;
private:
// Update the inline cache and the global stub cache based on the
// lookup result.
......@@ -489,10 +460,6 @@ class StoreIC: public IC {
static void Clear(Address address, Code* target);
// Support for patching the index and the map that is checked in an
// inlined version of the named store.
static bool PatchInlinedStore(Address address, Object* map, int index);
friend class IC;
};
......@@ -514,12 +481,6 @@ class KeyedStoreIC: public IC {
StrictModeFlag strict_mode);
static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
// Clear the inlined version so the IC is always hit.
static void ClearInlinedVersion(Address address);
// Restore the inlined version so the fast case can get hit.
static void RestoreInlinedVersion(Address address);
private:
// Update the inline cache.
void UpdateCaches(LookupResult* lookup,
......@@ -564,14 +525,6 @@ class KeyedStoreIC: public IC {
static void Clear(Address address, Code* target);
// Support for patching the map that is checked in an inlined
// version of keyed store.
// The address is the patch point for the IC call
// (Assembler::kCallTargetAddressOffset before the end of
// the call/return address).
// The map is the new map that the inlined code should check against.
static bool PatchInlinedStore(Address address, Object* map);
friend class IC;
};
......
......@@ -4232,30 +4232,7 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
default:
break;
}
__ call(ic, mode);
// Crankshaft doesn't need patching of inlined loads and stores.
// When compiling the snapshot we need to produce code that works
// with and without Crankshaft.
if (V8::UseCrankshaft() && !Serializer::enabled()) {
return;
}
// If we're calling a (keyed) load or store stub, we have to mark
// the call as containing no inlined code so we will not attempt to
// patch it.
switch (ic->kind()) {
case Code::LOAD_IC:
case Code::KEYED_LOAD_IC:
case Code::STORE_IC:
case Code::KEYED_STORE_IC:
__ nop(); // Signals no inlined code.
break;
default:
// Do nothing.
break;
}
}
......@@ -4276,7 +4253,6 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
default:
break;
}
__ call(ic, RelocInfo::CODE_TARGET);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
......
......@@ -381,11 +381,6 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
}
// The offset from the inlined patch site to the start of the inlined
// load instruction.
const int LoadIC::kOffsetToLoadInstruction = 20;
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : receiver
......@@ -1297,130 +1292,6 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
if (V8::UseCrankshaft()) return false;
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a test rax, nothing
// was inlined.
if (*test_instruction_address != Assembler::kTestEaxByte) return false;
Address delta_address = test_instruction_address + 1;
// The delta to the start of the map check instruction.
int delta = *reinterpret_cast<int*>(delta_address);
// The map address is the last 8 bytes of the 10-byte
// immediate move instruction, so we add 2 to get the
// offset to the last 8 bytes.
Address map_address = test_instruction_address + delta + 2;
*(reinterpret_cast<Object**>(map_address)) = map;
// The offset is in the 32-bit displacement of a seven byte
// memory-to-register move instruction (REX.W 0x88 ModR/M disp32),
// so we add 3 to get the offset of the displacement.
Address offset_address =
test_instruction_address + delta + kOffsetToLoadInstruction + 3;
*reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
return true;
}
bool LoadIC::PatchInlinedContextualLoad(Address address,
Object* map,
Object* cell,
bool is_dont_delete) {
// TODO(<bug#>): implement this.
return false;
}
bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
if (V8::UseCrankshaft()) return false;
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a test rax, nothing
// was inlined.
if (*test_instruction_address != Assembler::kTestEaxByte) return false;
// Extract the encoded deltas from the test rax instruction.
Address encoded_offsets_address = test_instruction_address + 1;
int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
int delta_to_map_check = -(encoded_offsets & 0xFFFF);
int delta_to_record_write = encoded_offsets >> 16;
// Patch the map to check. The map address is the last 8 bytes of
// the 10-byte immediate move instruction.
Address map_check_address = test_instruction_address + delta_to_map_check;
Address map_address = map_check_address + 2;
*(reinterpret_cast<Object**>(map_address)) = map;
// Patch the offset in the store instruction. The offset is in the
// last 4 bytes of a 7 byte register-to-memory move instruction.
Address offset_address =
map_check_address + StoreIC::kOffsetToStoreInstruction + 3;
// The offset should have initial value (kMaxInt - 1), cleared value
// (-1) or we should be clearing the inlined version.
ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
*reinterpret_cast<int*>(offset_address) == -1 ||
(offset == 0 && map == HEAP->null_value()));
*reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
// Patch the offset in the write-barrier code. The offset is the
// last 4 bytes of a 7 byte lea instruction.
offset_address = map_check_address + delta_to_record_write + 3;
// The offset should have initial value (kMaxInt), cleared value
// (-1) or we should be clearing the inlined version.
ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
*reinterpret_cast<int*>(offset_address) == -1 ||
(offset == 0 && map == HEAP->null_value()));
*reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
return true;
}
static bool PatchInlinedMapCheck(Address address, Object* map) {
if (V8::UseCrankshaft()) return false;
// Arguments are address of start of call sequence that called
// the IC,
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
// The keyed load has a fast inlined case if the IC call instruction
// is immediately followed by a test instruction.
if (*test_instruction_address != Assembler::kTestEaxByte) return false;
// Fetch the offset from the test instruction to the map compare
// instructions (starting with the 64-bit immediate mov of the map
// address). This offset is stored in the last 4 bytes of the 5
// byte test instruction.
Address delta_address = test_instruction_address + 1;
int delta = *reinterpret_cast<int*>(delta_address);
// Compute the map address. The map address is in the last 8 bytes
// of the 10-byte immediate mov instruction (incl. REX prefix), so we add 2
// to the offset to get the map address.
Address map_address = test_instruction_address + delta + 2;
// Patch the map check.
*(reinterpret_cast<Object**>(map_address)) = map;
return true;
}
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
return PatchInlinedMapCheck(address, map);
}
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
return PatchInlinedMapCheck(address, map);
}
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
......@@ -1503,11 +1374,6 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
}
// The offset from the inlined patch site to the start of the inlined
// store instruction.
const int StoreIC::kOffsetToStoreInstruction = 20;
void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment