Commit bc9ddf20 authored by sgjesse@chromium.org's avatar sgjesse@chromium.org

ARM: Port r7089 to ARM

Ensure that there is always enough bytes between consequtive calls in optimized code to write a call instruction at the return points without overlapping.

Add a call to deoptimize all functions after running tests with --stress-opt. This will catch some issues with functions which cannot be forcefully deoptimized. Some of the tests failed on ARM with that change without the rest of the changes in this change.
Review URL: http://codereview.chromium.org/6661022

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@7132 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 1d458703
......@@ -87,6 +87,11 @@ class V8EXPORT Testing {
* should be between 0 and one less than the result from GetStressRuns()
*/
static void PrepareStressRun(int run);
/**
* Force deoptimization of all functions.
*/
static void DeoptimizeAll();
};
......
......@@ -65,6 +65,11 @@ int RunMain(int argc, char* argv[]) {
// Create a new execution environment containing the built-in
// functions
v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
if (context.IsEmpty()) {
printf("Error creating context\n");
return 1;
}
bool run_shell = (argc == 1);
for (int i = 1; i < argc; i++) {
// Enter the execution environment before evaluating any code.
......@@ -139,6 +144,8 @@ int main(int argc, char* argv[]) {
v8::Testing::PrepareStressRun(i);
result = RunMain(argc, argv);
}
printf("======== Full Deoptimization =======\n");
v8::Testing::DeoptimizeAll();
} else {
result = RunMain(argc, argv);
}
......
......@@ -5143,6 +5143,11 @@ void Testing::PrepareStressRun(int run) {
}
void Testing::DeoptimizeAll() {
internal::Deoptimizer::DeoptimizeAll();
}
namespace internal {
......
......@@ -767,11 +767,35 @@ bool Operand::must_use_constant_pool() const {
}
bool Operand::is_single_instruction() const {
bool Operand::is_single_instruction(Instr instr) const {
if (rm_.is_valid()) return true;
if (must_use_constant_pool()) return false;
uint32_t dummy1, dummy2;
return fits_shifter(imm32_, &dummy1, &dummy2, NULL);
if (must_use_constant_pool() ||
!fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, or use of
// constant pool is required. For a mov instruction not setting the
// condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
if (must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) {
// mov instruction will be an ldr from constant pool (one instruction).
return true;
} else {
// mov instruction will be a mov or movw followed by movt (two
// instructions).
return false;
}
} else {
// If this is not a mov or mvn instruction there will always an additional
// instructions - either mov or ldr. The mov might actually be two
// instructions mov or movw followed by movt so including the actual
// instruction two or three instructions will be generated.
return false;
}
} else {
// No use of constant pool and the immediate operand can be encoded as a
// shifter operand.
return true;
}
}
......
......@@ -389,8 +389,11 @@ class Operand BASE_EMBEDDED {
INLINE(bool is_reg() const);
// Return true if this operand fits in one instruction so that no
// 2-instruction solution with a load into the ip register is necessary.
bool is_single_instruction() const;
// 2-instruction solution with a load into the ip register is necessary. If
// the instruction this operand is used for is a MOV or MVN instruction the
// actual instruction to use is required for this calculation. For other
// instructions instr is ignored.
bool is_single_instruction(Instr instr = 0) const;
bool must_use_constant_pool() const;
inline int32_t immediate() const {
......
......@@ -46,6 +46,7 @@ int Deoptimizer::patch_size() {
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
HandleScope scope;
AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return;
......@@ -69,8 +70,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
int deoptimization_index = safepoint_entry.deoptimization_index();
int gap_code_size = safepoint_entry.gap_code_size();
// Check that we did not shoot past next safepoint.
// TODO(srdjan): How do we guarantee that safepoint code does not
// overlap other safepoint patching code?
CHECK(pc_offset >= last_pc_offset);
#ifdef DEBUG
// Destroy the code which is not supposed to be run again.
......@@ -117,6 +116,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
#ifdef DEBUG
if (FLAG_print_code) {
code->PrintLn();
}
#endif
}
}
......
......@@ -34,7 +34,7 @@ namespace v8 {
namespace internal {
class SafepointGenerator : public PostCallGenerator {
class SafepointGenerator : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
......@@ -44,7 +44,24 @@ class SafepointGenerator : public PostCallGenerator {
deoptimization_index_(deoptimization_index) { }
virtual ~SafepointGenerator() { }
virtual void Generate() {
virtual void BeforeCall(int call_size) {
ASSERT(call_size >= 0);
// Ensure that we have enough space after the previous safepoint position
// for the generated code there.
int call_end = codegen_->masm()->pc_offset() + call_size;
int prev_jump_end =
codegen_->LastSafepointEnd() + Deoptimizer::patch_size();
if (call_end < prev_jump_end) {
int padding_size = prev_jump_end - call_end;
ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
while (padding_size > 0) {
codegen_->masm()->nop();
padding_size -= Assembler::kInstrSize;
}
}
}
virtual void AfterCall() {
codegen_->RecordSafepoint(pointers_, deoptimization_index_);
}
......
......@@ -229,6 +229,9 @@ class LCodeGen BASE_EMBEDDED {
int arguments,
int deoptimization_index);
void RecordPosition(int position);
int LastSafepointEnd() {
return static_cast<int>(safepoints_.GetPcAfterGap());
}
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
......
......@@ -103,19 +103,54 @@ void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
}
int MacroAssembler::CallSize(Register target, Condition cond) {
#if USE_BLX
return kInstrSize;
#else
return 2 * kInstrSize;
#endif
}
void MacroAssembler::Call(Register target, Condition cond) {
#ifdef DEBUG
int pre_position = pc_offset();
#endif
#if USE_BLX
blx(target, cond);
#else
// set lr for return at current pc + 8
mov(lr, Operand(pc), LeaveCC, cond);
mov(pc, Operand(target), LeaveCC, cond);
{ BlockConstPoolScope block_const_pool(this);
mov(lr, Operand(pc), LeaveCC, cond);
mov(pc, Operand(target), LeaveCC, cond);
}
#endif
#ifdef DEBUG
int post_position = pc_offset();
CHECK_EQ(pre_position + CallSize(target, cond), post_position);
#endif
}
void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
Condition cond) {
int MacroAssembler::CallSize(
intptr_t target, RelocInfo::Mode rmode, Condition cond) {
int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC;
if (!Operand(target, rmode).is_single_instruction(mov_instr)) {
size += kInstrSize;
}
return size;
}
void MacroAssembler::Call(
intptr_t target, RelocInfo::Mode rmode, Condition cond) {
#ifdef DEBUG
int pre_position = pc_offset();
#endif
#if USE_BLX
// On ARMv5 and after the recommended call sequence is:
// ldr ip, [pc, #...]
......@@ -137,28 +172,64 @@ void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
#else
// Set lr for return at current pc + 8.
mov(lr, Operand(pc), LeaveCC, cond);
// Emit a ldr<cond> pc, [pc + offset of target in constant pool].
mov(pc, Operand(target, rmode), LeaveCC, cond);
{ BlockConstPoolScope block_const_pool(this);
// Set lr for return at current pc + 8.
mov(lr, Operand(pc), LeaveCC, cond);
// Emit a ldr<cond> pc, [pc + offset of target in constant pool].
mov(pc, Operand(target, rmode), LeaveCC, cond);
}
ASSERT(kCallTargetAddressOffset == kInstrSize);
#endif
#ifdef DEBUG
int post_position = pc_offset();
CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
#endif
}
void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
Condition cond) {
int MacroAssembler::CallSize(
byte* target, RelocInfo::Mode rmode, Condition cond) {
return CallSize(reinterpret_cast<intptr_t>(target), rmode);
}
void MacroAssembler::Call(
byte* target, RelocInfo::Mode rmode, Condition cond) {
#ifdef DEBUG
int pre_position = pc_offset();
#endif
ASSERT(!RelocInfo::IsCodeTarget(rmode));
Call(reinterpret_cast<intptr_t>(target), rmode, cond);
#ifdef DEBUG
int post_position = pc_offset();
CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
#endif
}
void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
int MacroAssembler::CallSize(
Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
return CallSize(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
}
void MacroAssembler::Call(
Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
#ifdef DEBUG
int pre_position = pc_offset();
#endif
ASSERT(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ARM code, never THUMB code
Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
#ifdef DEBUG
int post_position = pc_offset();
CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
#endif
}
......@@ -784,7 +855,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Register code_reg,
Label* done,
InvokeFlag flag,
PostCallGenerator* post_call_generator) {
CallWrapper* call_wrapper) {
bool definitely_matches = false;
Label regular_invoke;
......@@ -839,8 +910,11 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Handle<Code> adaptor =
Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
if (flag == CALL_FUNCTION) {
if (call_wrapper != NULL) {
call_wrapper->BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
}
Call(adaptor, RelocInfo::CODE_TARGET);
if (post_call_generator != NULL) post_call_generator->Generate();
if (call_wrapper != NULL) call_wrapper->AfterCall();
b(done);
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);
......@@ -854,14 +928,15 @@ void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
PostCallGenerator* post_call_generator) {
CallWrapper* call_wrapper) {
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
post_call_generator);
call_wrapper);
if (flag == CALL_FUNCTION) {
if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(code));
Call(code);
if (post_call_generator != NULL) post_call_generator->Generate();
if (call_wrapper != NULL) call_wrapper->AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
Jump(code);
......@@ -896,7 +971,7 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& actual,
InvokeFlag flag,
PostCallGenerator* post_call_generator) {
CallWrapper* call_wrapper) {
// Contract with called JS functions requires that function is passed in r1.
ASSERT(fun.is(r1));
......@@ -913,7 +988,7 @@ void MacroAssembler::InvokeFunction(Register fun,
FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
InvokeCode(code_reg, expected, actual, flag, post_call_generator);
InvokeCode(code_reg, expected, actual, flag, call_wrapper);
}
......@@ -2083,11 +2158,12 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference(
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
PostCallGenerator* post_call_generator) {
CallWrapper* call_wrapper) {
GetBuiltinEntry(r2, id);
if (flags == CALL_JS) {
if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(r2));
Call(r2);
if (post_call_generator != NULL) post_call_generator->Generate();
if (call_wrapper != NULL) call_wrapper->AfterCall();
} else {
ASSERT(flags == JUMP_JS);
Jump(r2);
......
......@@ -34,7 +34,7 @@ namespace v8 {
namespace internal {
// Forward declaration.
class PostCallGenerator;
class CallWrapper;
// ----------------------------------------------------------------------------
// Static helper functions
......@@ -96,8 +96,11 @@ class MacroAssembler: public Assembler {
void Jump(Register target, Condition cond = al);
void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Register target, Condition cond = al);
void Call(Register target, Condition cond = al);
int CallSize(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Ret(Condition cond = al);
......@@ -343,7 +346,7 @@ class MacroAssembler: public Assembler {
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
PostCallGenerator* post_call_generator = NULL);
CallWrapper* call_wrapper = NULL);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
......@@ -356,7 +359,7 @@ class MacroAssembler: public Assembler {
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
PostCallGenerator* post_call_generator = NULL);
CallWrapper* call_wrapper = NULL);
void InvokeFunction(JSFunction* function,
const ParameterCount& actual,
......@@ -748,7 +751,7 @@ class MacroAssembler: public Assembler {
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
PostCallGenerator* post_call_generator = NULL);
CallWrapper* call_wrapper = NULL);
// Store the code object for the given builtin in the target register and
// setup the function in r1.
......@@ -911,6 +914,7 @@ class MacroAssembler: public Assembler {
private:
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
// Helper functions for generating invokes.
......@@ -920,7 +924,7 @@ class MacroAssembler: public Assembler {
Register code_reg,
Label* done,
InvokeFlag flag,
PostCallGenerator* post_call_generator = NULL);
CallWrapper* call_wrapper = NULL);
// Activation support.
void EnterFrame(StackFrame::Type type);
......@@ -984,11 +988,15 @@ class CodePatcher {
// Helper class for generating code or data associated with the code
// right after a call instruction. As an example this can be used to
// generate safepoint data after calls for crankshaft.
class PostCallGenerator {
class CallWrapper {
public:
PostCallGenerator() { }
virtual ~PostCallGenerator() { }
virtual void Generate() = 0;
CallWrapper() { }
virtual ~CallWrapper() { }
// Called just before emitting a call. Argument is the size of the generated
// call code.
virtual void BeforeCall(int call_size) = 0;
// Called just after emitting a call, i.e., at the return site for the call.
virtual void AfterCall() = 0;
};
......
......@@ -56,6 +56,7 @@ static void ZapCodeRange(Address start, Address end) {
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
HandleScope scope;
AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return;
......@@ -132,6 +133,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
#ifdef DEBUG
if (FLAG_print_code) {
code->PrintLn();
}
#endif
}
}
......
......@@ -102,6 +102,7 @@ class SafepointTableDeoptimiztionEntryIterator {
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
HandleScope scope;
AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return;
......@@ -196,6 +197,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
#ifdef DEBUG
if (FLAG_print_code) {
code->PrintLn();
}
#endif
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment