Commit 7d5038a3 authored by ager@chromium.org's avatar ager@chromium.org

Fixed a couple of issues with store inlining on arm.

Spill and merge virtual frames explicitly in the deferred code.

Account for the fact that the inlined write barrier size depends on
the size of the new space masks.

Review URL: http://codereview.chromium.org/3018015

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5122 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 9deeec0b
......@@ -827,9 +827,10 @@ void Assembler::addrmod1(Instr instr,
instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
}
emit(instr | rn.code()*B16 | rd.code()*B12);
if (rn.is(pc) || x.rm_.is(pc))
if (rn.is(pc) || x.rm_.is(pc)) {
// Block constant pool emission for one instruction after reading pc.
BlockConstPoolBefore(pc_offset() + kInstrSize);
}
}
......
......@@ -151,6 +151,8 @@ TypeInfoCodeGenState::~TypeInfoCodeGenState() {
// -------------------------------------------------------------------------
// CodeGenerator implementation
int CodeGenerator::inlined_write_barrier_size_ = -1;
CodeGenerator::CodeGenerator(MacroAssembler* masm)
: deferred_(8),
masm_(masm),
......@@ -6225,7 +6227,13 @@ class DeferredReferenceSetNamedValue: public DeferredCode {
};
// Takes value in r0, receiver in r1 and returns the result (the
// value) in r0.
void DeferredReferenceSetNamedValue::Generate() {
// Record the entry frame and spill.
VirtualFrame copied_frame(*frame_state()->frame());
copied_frame.SpillAll();
// Ensure value in r0, receiver in r1 to match store ic calling
// convention.
ASSERT(value_.is(r0) && receiver_.is(r1));
......@@ -6241,6 +6249,12 @@ void DeferredReferenceSetNamedValue::Generate() {
// named store has been inlined.
__ nop(PROPERTY_ACCESS_INLINED);
// Go back to the frame we entered with. The instructions
// generated by this merge are skipped over by the inline store
// patching mechanism when looking for the branch instruction that
// tells it where the code to patch is.
copied_frame.MergeTo(frame_state()->frame());
// Block the constant pool for one more instruction after leaving this
// constant pool block scope to include the branch instruction ending the
// deferred code.
......@@ -6365,11 +6379,38 @@ void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
int offset = 0;
__ str(value, MemOperand(receiver, offset));
// Update the write barrier.
__ RecordWrite(receiver, Operand(offset), scratch0, scratch1);
// Update the write barrier and record its size. We do not use
// the RecordWrite macro here because we want the offset
// addition instruction first to make it easy to patch.
Label record_write_start, record_write_done;
__ bind(&record_write_start);
// Add offset into the object.
__ add(scratch0, receiver, Operand(offset));
// Test that the object is not in the new space. We cannot set
// region marks for new space pages.
__ InNewSpace(receiver, scratch1, eq, &record_write_done);
// Record the actual write.
__ RecordWriteHelper(receiver, scratch0, scratch1);
__ bind(&record_write_done);
// Clobber all input registers when running with the debug-code flag
// turned on to provoke errors.
if (FLAG_debug_code) {
__ mov(receiver, Operand(BitCast<int32_t>(kZapValue)));
__ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
__ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
}
// Check that this is the first inlined write barrier or that
// this inlined write barrier has the same size as all the other
// inlined write barriers.
ASSERT((inlined_write_barrier_size_ == -1) ||
(inlined_write_barrier_size_ ==
masm()->InstructionsGeneratedSince(&record_write_start)));
inlined_write_barrier_size_ =
masm()->InstructionsGeneratedSince(&record_write_start);
// Make sure that the expected number of instructions are generated.
ASSERT_EQ(GetInlinedNamedStoreInstructionsAfterPatch(),
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
masm()->InstructionsGeneratedSince(&check_inlined_codesize));
}
deferred->BindExit();
}
......
......@@ -282,7 +282,8 @@ class CodeGenerator: public AstVisitor {
}
static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
static int GetInlinedNamedStoreInstructionsAfterPatch() {
return FLAG_debug_code ? 33 : 14;
ASSERT(inlined_write_barrier_size_ != -1);
return inlined_write_barrier_size_ + 4;
}
private:
......@@ -589,6 +590,9 @@ class CodeGenerator: public AstVisitor {
// to some unlinking code).
bool function_return_is_shadowed_;
// Size of inlined write barriers generated by EmitNamedStore.
static int inlined_write_barrier_size_;
static InlineRuntimeLUT kInlineRuntimeLUT[];
friend class VirtualFrame;
......
......@@ -1016,7 +1016,7 @@ bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
// Patch the offset in the add instruction that is part of the
// write barrier.
Address add_offset_instr_address =
str_property_instr_address + 4 * Assembler::kInstrSize;
str_property_instr_address + Assembler::kInstrSize;
Instr add_offset_instr = Assembler::instr_at(add_offset_instr_address);
ASSERT(Assembler::IsAddRegisterImmediate(add_offset_instr));
add_offset_instr = Assembler::SetAddRegisterImmediateOffset(
......@@ -1024,7 +1024,7 @@ bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
Assembler::instr_at_put(add_offset_instr_address, add_offset_instr);
// Indicate that code has changed.
CPU::FlushICache(str_property_instr_address, 5 * Assembler::kInstrSize);
CPU::FlushICache(str_property_instr_address, 2 * Assembler::kInstrSize);
}
// Patch the map check.
......
......@@ -169,7 +169,7 @@ namespace internal {
SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
SC(constructed_objects_stub, V8.ConstructedObjectsStub) \
SC(negative_lookups, V8.NegativeLookups) \
SC(negative_lookups_miss, V8.NegativeLookupsMiss) \
SC(negative_lookups_miss, V8.NegativeLookupsMiss) \
SC(array_function_runtime, V8.ArrayFunctionRuntime) \
SC(array_function_native, V8.ArrayFunctionNative) \
SC(for_in, V8.ForIn) \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment