Martyn Capewell <m.m.capewell@googlemail.com>

Review URL: https://chromiumcodereview.appspot.com/10451037

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@11784 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent f78c1248
...@@ -3737,9 +3737,13 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, ...@@ -3737,9 +3737,13 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Compute the return address in lr to return to after the jump below. Pc is // Compute the return address in lr to return to after the jump below. Pc is
// already at '+ 8' from the current instruction but return is after three // already at '+ 8' from the current instruction but return is after three
// instructions so add another 4 to pc to get the return address. // instructions so add another 4 to pc to get the return address.
masm->add(lr, pc, Operand(4)); {
__ str(lr, MemOperand(sp, 0)); // Prevent literal pool emission before return address.
masm->Jump(r5); Assembler::BlockConstPoolScope block_const_pool(masm);
masm->add(lr, pc, Operand(4));
__ str(lr, MemOperand(sp, 0));
masm->Jump(r5);
}
if (always_allocate) { if (always_allocate) {
// It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
...@@ -3956,14 +3960,21 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { ...@@ -3956,14 +3960,21 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Jump to a faked try block that does the invoke, with a faked catch // Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception. // block that sets the pending exception.
__ jmp(&invoke); __ jmp(&invoke);
__ bind(&handler_entry);
handler_offset_ = handler_entry.pos(); // Block literal pool emission whilst taking the position of the handler
// Caught exception: Store result (exception) in the pending exception // entry. This avoids making the assumption that literal pools are always
// field in the JSEnv and return a failure sentinel. Coming in here the // emitted after an instruction is emitted, rather than before.
// fp will be invalid because the PushTryHandler below sets it to 0 to {
// signal the existence of the JSEntry frame. Assembler::BlockConstPoolScope block_const_pool(masm);
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, __ bind(&handler_entry);
isolate))); handler_offset_ = handler_entry.pos();
// Caught exception: Store result (exception) in the pending exception
// field in the JSEnv and return a failure sentinel. Coming in here the
// fp will be invalid because the PushTryHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
}
__ str(r0, MemOperand(ip)); __ str(r0, MemOperand(ip));
__ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
__ b(&exit); __ b(&exit);
...@@ -4006,9 +4017,13 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { ...@@ -4006,9 +4017,13 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Branch and link to JSEntryTrampoline. We don't use the double underscore // Branch and link to JSEntryTrampoline. We don't use the double underscore
// macro for the add instruction because we don't want the coverage tool // macro for the add instruction because we don't want the coverage tool
// inserting instructions here after we read the pc. // inserting instructions here after we read the pc. We block literal pool
__ mov(lr, Operand(pc)); // emission for the same reason.
masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); {
Assembler::BlockConstPoolScope block_const_pool(masm);
__ mov(lr, Operand(pc));
masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
}
// Unlink this frame from the handler chain. // Unlink this frame from the handler chain.
__ PopTryHandler(); __ PopTryHandler();
...@@ -6812,6 +6827,10 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, ...@@ -6812,6 +6827,10 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) { Register target) {
__ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()), __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
RelocInfo::CODE_TARGET)); RelocInfo::CODE_TARGET));
// Prevent literal pool emission during calculation of return address.
Assembler::BlockConstPoolScope block_const_pool(masm);
// Push return address (accessible to GC through exit frame pc). // Push return address (accessible to GC through exit frame pc).
// Note that using pc with str is deprecated. // Note that using pc with str is deprecated.
Label start; Label start;
...@@ -7172,8 +7191,13 @@ void RecordWriteStub::Generate(MacroAssembler* masm) { ...@@ -7172,8 +7191,13 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
// forth between a compare instructions (a nop in this position) and the // forth between a compare instructions (a nop in this position) and the
// real branch when we start and stop incremental heap marking. // real branch when we start and stop incremental heap marking.
// See RecordWriteStub::Patch for details. // See RecordWriteStub::Patch for details.
__ b(&skip_to_incremental_noncompacting); {
__ b(&skip_to_incremental_compacting); // Block literal pool emission, as the position of these two instructions
// is assumed by the patching code.
Assembler::BlockConstPoolScope block_const_pool(masm);
__ b(&skip_to_incremental_noncompacting);
__ b(&skip_to_incremental_compacting);
}
if (remembered_set_action_ == EMIT_REMEMBERED_SET) { if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
__ RememberedSetHelper(object_, __ RememberedSetHelper(object_,
......
...@@ -73,9 +73,6 @@ class JumpPatchSite BASE_EMBEDDED { ...@@ -73,9 +73,6 @@ class JumpPatchSite BASE_EMBEDDED {
Assembler::BlockConstPoolScope block_const_pool(masm_); Assembler::BlockConstPoolScope block_const_pool(masm_);
__ bind(&patch_site_); __ bind(&patch_site_);
__ cmp(reg, Operand(reg)); __ cmp(reg, Operand(reg));
// Don't use b(al, ...) as that might emit the constant pool right after the
// branch. After patching when the branch is no longer unconditional
// execution can continue into the constant pool.
__ b(eq, target); // Always taken before patched. __ b(eq, target); // Always taken before patched.
} }
...@@ -90,6 +87,8 @@ class JumpPatchSite BASE_EMBEDDED { ...@@ -90,6 +87,8 @@ class JumpPatchSite BASE_EMBEDDED {
} }
void EmitPatchInfo() { void EmitPatchInfo() {
// Block literal pool emission whilst recording patch site information.
Assembler::BlockConstPoolScope block_const_pool(masm_);
if (patch_site_.is_bound()) { if (patch_site_.is_bound()) {
int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_); int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
Register reg; Register reg;
...@@ -344,6 +343,8 @@ static const int kBackEdgeDistanceDivisor = 142; ...@@ -344,6 +343,8 @@ static const int kBackEdgeDistanceDivisor = 142;
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt, void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) { Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check"); Comment cmnt(masm_, "[ Stack check");
// Block literal pools whilst emitting stack check code.
Assembler::BlockConstPoolScope block_const_pool(masm_);
Label ok; Label ok;
if (FLAG_count_based_interrupts) { if (FLAG_count_based_interrupts) {
......
...@@ -571,6 +571,9 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code, ...@@ -571,6 +571,9 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LInstruction* instr, LInstruction* instr,
SafepointMode safepoint_mode) { SafepointMode safepoint_mode) {
ASSERT(instr != NULL); ASSERT(instr != NULL);
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
LPointerMap* pointers = instr->pointer_map(); LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position()); RecordPosition(pointers->position());
__ Call(code, mode); __ Call(code, mode);
...@@ -1685,6 +1688,9 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { ...@@ -1685,6 +1688,9 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->result()).is(r0)); ASSERT(ToRegister(instr->result()).is(r0));
BinaryOpStub stub(instr->op(), NO_OVERWRITE); BinaryOpStub stub(instr->op(), NO_OVERWRITE);
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code. __ nop(); // Signals no inlined code.
} }
...@@ -2316,20 +2322,25 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { ...@@ -2316,20 +2322,25 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Label cache_miss; Label cache_miss;
Register map = temp; Register map = temp;
__ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching. {
// We use Factory::the_hole_value() on purpose instead of loading from the // Block constant pool emission to ensure the positions of instructions are
// root array to force relocation to be able to later patch with // as expected by the patcher. See InstanceofStub::Generate().
// the cached map. Assembler::BlockConstPoolScope block_const_pool(masm());
Handle<JSGlobalPropertyCell> cell = __ bind(deferred->map_check()); // Label for calculating code patching.
factory()->NewJSGlobalPropertyCell(factory()->the_hole_value()); // We use Factory::the_hole_value() on purpose instead of loading from the
__ mov(ip, Operand(Handle<Object>(cell))); // root array to force relocation to be able to later patch with
__ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); // the cached map.
__ cmp(map, Operand(ip)); Handle<JSGlobalPropertyCell> cell =
__ b(ne, &cache_miss); factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
// We use Factory::the_hole_value() on purpose instead of loading from the __ mov(ip, Operand(Handle<Object>(cell)));
// root array to force relocation to be able to later patch __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
// with true or false. __ cmp(map, Operand(ip));
__ mov(result, Operand(factory()->the_hole_value())); __ b(ne, &cache_miss);
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
// with true or false.
__ mov(result, Operand(factory()->the_hole_value()));
}
__ b(&done); __ b(&done);
// The inlined call site cache did not match. Check null and string before // The inlined call site cache did not match. Check null and string before
...@@ -5178,6 +5189,8 @@ void LCodeGen::EnsureSpaceForLazyDeopt() { ...@@ -5178,6 +5189,8 @@ void LCodeGen::EnsureSpaceForLazyDeopt() {
int current_pc = masm()->pc_offset(); int current_pc = masm()->pc_offset();
int patch_size = Deoptimizer::patch_size(); int patch_size = Deoptimizer::patch_size();
if (current_pc < last_lazy_deopt_pc_ + patch_size) { if (current_pc < last_lazy_deopt_pc_ + patch_size) {
// Block literal pool emission for duration of padding.
Assembler::BlockConstPoolScope block_const_pool(masm());
int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
ASSERT_EQ(0, padding_size % Assembler::kInstrSize); ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
while (padding_size > 0) { while (padding_size > 0) {
......
...@@ -92,6 +92,10 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) { ...@@ -92,6 +92,10 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
if (!DisassembleAndCompare(progcounter, compare_string)) failure = true; \ if (!DisassembleAndCompare(progcounter, compare_string)) failure = true; \
} }
// Force emission of any pending literals into a pool.
#define EMIT_PENDING_LITERALS() \
assm.CheckConstPool(true, false)
// Verify that all invocations of the COMPARE macro passed successfully. // Verify that all invocations of the COMPARE macro passed successfully.
// Exit with a failure if at least one of the tests failed. // Exit with a failure if at least one of the tests failed.
...@@ -280,6 +284,10 @@ TEST(Type0) { ...@@ -280,6 +284,10 @@ TEST(Type0) {
// is pretty strange anyway. // is pretty strange anyway.
COMPARE(mov(r5, Operand(0x01234), SetCC, ne), COMPARE(mov(r5, Operand(0x01234), SetCC, ne),
"159fc000 ldrne ip, [pc, #+0]"); "159fc000 ldrne ip, [pc, #+0]");
// Emit a literal pool now, otherwise this could be dumped later, in the
// middle of a different test.
EMIT_PENDING_LITERALS();
// We only disassemble one instruction so the eor instruction is not here. // We only disassemble one instruction so the eor instruction is not here.
// The eor does the setcc so we get a movw here. // The eor does the setcc so we get a movw here.
COMPARE(eor(r5, r4, Operand(0x1234), SetCC, ne), COMPARE(eor(r5, r4, Operand(0x1234), SetCC, ne),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment