Commit bc9ddf20 authored by sgjesse@chromium.org's avatar sgjesse@chromium.org

ARM: Port r7089 to ARM

Ensure that there is always enough bytes between consequtive calls in optimized code to write a call instruction at the return points without overlapping.

Add a call to deoptimize all functions after running tests with --stress-opt. This will catch some issues with functions which cannot be forcefully deoptimized. Some of the tests failed on ARM with that change without the rest of the changes in this change.
Review URL: http://codereview.chromium.org/6661022

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@7132 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 1d458703
...@@ -87,6 +87,11 @@ class V8EXPORT Testing { ...@@ -87,6 +87,11 @@ class V8EXPORT Testing {
* should be between 0 and one less than the result from GetStressRuns() * should be between 0 and one less than the result from GetStressRuns()
*/ */
static void PrepareStressRun(int run); static void PrepareStressRun(int run);
/**
* Force deoptimization of all functions.
*/
static void DeoptimizeAll();
}; };
......
...@@ -65,6 +65,11 @@ int RunMain(int argc, char* argv[]) { ...@@ -65,6 +65,11 @@ int RunMain(int argc, char* argv[]) {
// Create a new execution environment containing the built-in // Create a new execution environment containing the built-in
// functions // functions
v8::Persistent<v8::Context> context = v8::Context::New(NULL, global); v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
if (context.IsEmpty()) {
printf("Error creating context\n");
return 1;
}
bool run_shell = (argc == 1); bool run_shell = (argc == 1);
for (int i = 1; i < argc; i++) { for (int i = 1; i < argc; i++) {
// Enter the execution environment before evaluating any code. // Enter the execution environment before evaluating any code.
...@@ -139,6 +144,8 @@ int main(int argc, char* argv[]) { ...@@ -139,6 +144,8 @@ int main(int argc, char* argv[]) {
v8::Testing::PrepareStressRun(i); v8::Testing::PrepareStressRun(i);
result = RunMain(argc, argv); result = RunMain(argc, argv);
} }
printf("======== Full Deoptimization =======\n");
v8::Testing::DeoptimizeAll();
} else { } else {
result = RunMain(argc, argv); result = RunMain(argc, argv);
} }
......
...@@ -5143,6 +5143,11 @@ void Testing::PrepareStressRun(int run) { ...@@ -5143,6 +5143,11 @@ void Testing::PrepareStressRun(int run) {
} }
void Testing::DeoptimizeAll() {
internal::Deoptimizer::DeoptimizeAll();
}
namespace internal { namespace internal {
......
...@@ -767,11 +767,35 @@ bool Operand::must_use_constant_pool() const { ...@@ -767,11 +767,35 @@ bool Operand::must_use_constant_pool() const {
} }
bool Operand::is_single_instruction() const { bool Operand::is_single_instruction(Instr instr) const {
if (rm_.is_valid()) return true; if (rm_.is_valid()) return true;
if (must_use_constant_pool()) return false;
uint32_t dummy1, dummy2; uint32_t dummy1, dummy2;
return fits_shifter(imm32_, &dummy1, &dummy2, NULL); if (must_use_constant_pool() ||
!fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, or use of
// constant pool is required. For a mov instruction not setting the
// condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
if (must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) {
// mov instruction will be an ldr from constant pool (one instruction).
return true;
} else {
// mov instruction will be a mov or movw followed by movt (two
// instructions).
return false;
}
} else {
// If this is not a mov or mvn instruction there will always an additional
// instructions - either mov or ldr. The mov might actually be two
// instructions mov or movw followed by movt so including the actual
// instruction two or three instructions will be generated.
return false;
}
} else {
// No use of constant pool and the immediate operand can be encoded as a
// shifter operand.
return true;
}
} }
......
...@@ -389,8 +389,11 @@ class Operand BASE_EMBEDDED { ...@@ -389,8 +389,11 @@ class Operand BASE_EMBEDDED {
INLINE(bool is_reg() const); INLINE(bool is_reg() const);
// Return true if this operand fits in one instruction so that no // Return true if this operand fits in one instruction so that no
// 2-instruction solution with a load into the ip register is necessary. // 2-instruction solution with a load into the ip register is necessary. If
bool is_single_instruction() const; // the instruction this operand is used for is a MOV or MVN instruction the
// actual instruction to use is required for this calculation. For other
// instructions instr is ignored.
bool is_single_instruction(Instr instr = 0) const;
bool must_use_constant_pool() const; bool must_use_constant_pool() const;
inline int32_t immediate() const { inline int32_t immediate() const {
......
...@@ -46,6 +46,7 @@ int Deoptimizer::patch_size() { ...@@ -46,6 +46,7 @@ int Deoptimizer::patch_size() {
void Deoptimizer::DeoptimizeFunction(JSFunction* function) { void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
HandleScope scope;
AssertNoAllocation no_allocation; AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return; if (!function->IsOptimized()) return;
...@@ -69,8 +70,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { ...@@ -69,8 +70,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
int deoptimization_index = safepoint_entry.deoptimization_index(); int deoptimization_index = safepoint_entry.deoptimization_index();
int gap_code_size = safepoint_entry.gap_code_size(); int gap_code_size = safepoint_entry.gap_code_size();
// Check that we did not shoot past next safepoint. // Check that we did not shoot past next safepoint.
// TODO(srdjan): How do we guarantee that safepoint code does not
// overlap other safepoint patching code?
CHECK(pc_offset >= last_pc_offset); CHECK(pc_offset >= last_pc_offset);
#ifdef DEBUG #ifdef DEBUG
// Destroy the code which is not supposed to be run again. // Destroy the code which is not supposed to be run again.
...@@ -117,6 +116,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { ...@@ -117,6 +116,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: "); PrintF("[forced deoptimization: ");
function->PrintName(); function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function)); PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
#ifdef DEBUG
if (FLAG_print_code) {
code->PrintLn();
}
#endif
} }
} }
......
...@@ -34,7 +34,7 @@ namespace v8 { ...@@ -34,7 +34,7 @@ namespace v8 {
namespace internal { namespace internal {
class SafepointGenerator : public PostCallGenerator { class SafepointGenerator : public CallWrapper {
public: public:
SafepointGenerator(LCodeGen* codegen, SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers, LPointerMap* pointers,
...@@ -44,7 +44,24 @@ class SafepointGenerator : public PostCallGenerator { ...@@ -44,7 +44,24 @@ class SafepointGenerator : public PostCallGenerator {
deoptimization_index_(deoptimization_index) { } deoptimization_index_(deoptimization_index) { }
virtual ~SafepointGenerator() { } virtual ~SafepointGenerator() { }
virtual void Generate() { virtual void BeforeCall(int call_size) {
ASSERT(call_size >= 0);
// Ensure that we have enough space after the previous safepoint position
// for the generated code there.
int call_end = codegen_->masm()->pc_offset() + call_size;
int prev_jump_end =
codegen_->LastSafepointEnd() + Deoptimizer::patch_size();
if (call_end < prev_jump_end) {
int padding_size = prev_jump_end - call_end;
ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
while (padding_size > 0) {
codegen_->masm()->nop();
padding_size -= Assembler::kInstrSize;
}
}
}
virtual void AfterCall() {
codegen_->RecordSafepoint(pointers_, deoptimization_index_); codegen_->RecordSafepoint(pointers_, deoptimization_index_);
} }
......
...@@ -229,6 +229,9 @@ class LCodeGen BASE_EMBEDDED { ...@@ -229,6 +229,9 @@ class LCodeGen BASE_EMBEDDED {
int arguments, int arguments,
int deoptimization_index); int deoptimization_index);
void RecordPosition(int position); void RecordPosition(int position);
int LastSafepointEnd() {
return static_cast<int>(safepoints_.GetPcAfterGap());
}
static Condition TokenToCondition(Token::Value op, bool is_unsigned); static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL); void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
......
...@@ -103,19 +103,54 @@ void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, ...@@ -103,19 +103,54 @@ void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
} }
int MacroAssembler::CallSize(Register target, Condition cond) {
#if USE_BLX
return kInstrSize;
#else
return 2 * kInstrSize;
#endif
}
void MacroAssembler::Call(Register target, Condition cond) { void MacroAssembler::Call(Register target, Condition cond) {
#ifdef DEBUG
int pre_position = pc_offset();
#endif
#if USE_BLX #if USE_BLX
blx(target, cond); blx(target, cond);
#else #else
// set lr for return at current pc + 8 // set lr for return at current pc + 8
mov(lr, Operand(pc), LeaveCC, cond); { BlockConstPoolScope block_const_pool(this);
mov(pc, Operand(target), LeaveCC, cond); mov(lr, Operand(pc), LeaveCC, cond);
mov(pc, Operand(target), LeaveCC, cond);
}
#endif
#ifdef DEBUG
int post_position = pc_offset();
CHECK_EQ(pre_position + CallSize(target, cond), post_position);
#endif #endif
} }
void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode, int MacroAssembler::CallSize(
Condition cond) { intptr_t target, RelocInfo::Mode rmode, Condition cond) {
int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC;
if (!Operand(target, rmode).is_single_instruction(mov_instr)) {
size += kInstrSize;
}
return size;
}
void MacroAssembler::Call(
intptr_t target, RelocInfo::Mode rmode, Condition cond) {
#ifdef DEBUG
int pre_position = pc_offset();
#endif
#if USE_BLX #if USE_BLX
// On ARMv5 and after the recommended call sequence is: // On ARMv5 and after the recommended call sequence is:
// ldr ip, [pc, #...] // ldr ip, [pc, #...]
...@@ -137,28 +172,64 @@ void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode, ...@@ -137,28 +172,64 @@ void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
ASSERT(kCallTargetAddressOffset == 2 * kInstrSize); ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
#else #else
// Set lr for return at current pc + 8. { BlockConstPoolScope block_const_pool(this);
mov(lr, Operand(pc), LeaveCC, cond); // Set lr for return at current pc + 8.
// Emit a ldr<cond> pc, [pc + offset of target in constant pool]. mov(lr, Operand(pc), LeaveCC, cond);
mov(pc, Operand(target, rmode), LeaveCC, cond); // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
mov(pc, Operand(target, rmode), LeaveCC, cond);
}
ASSERT(kCallTargetAddressOffset == kInstrSize); ASSERT(kCallTargetAddressOffset == kInstrSize);
#endif #endif
#ifdef DEBUG
int post_position = pc_offset();
CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
#endif
} }
void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode, int MacroAssembler::CallSize(
Condition cond) { byte* target, RelocInfo::Mode rmode, Condition cond) {
return CallSize(reinterpret_cast<intptr_t>(target), rmode);
}
void MacroAssembler::Call(
byte* target, RelocInfo::Mode rmode, Condition cond) {
#ifdef DEBUG
int pre_position = pc_offset();
#endif
ASSERT(!RelocInfo::IsCodeTarget(rmode)); ASSERT(!RelocInfo::IsCodeTarget(rmode));
Call(reinterpret_cast<intptr_t>(target), rmode, cond); Call(reinterpret_cast<intptr_t>(target), rmode, cond);
#ifdef DEBUG
int post_position = pc_offset();
CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
#endif
} }
void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, int MacroAssembler::CallSize(
Condition cond) { Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
return CallSize(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
}
void MacroAssembler::Call(
Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
#ifdef DEBUG
int pre_position = pc_offset();
#endif
ASSERT(RelocInfo::IsCodeTarget(rmode)); ASSERT(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ARM code, never THUMB code // 'code' is always generated ARM code, never THUMB code
Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond); Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
#ifdef DEBUG
int post_position = pc_offset();
CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
#endif
} }
...@@ -784,7 +855,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, ...@@ -784,7 +855,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Register code_reg, Register code_reg,
Label* done, Label* done,
InvokeFlag flag, InvokeFlag flag,
PostCallGenerator* post_call_generator) { CallWrapper* call_wrapper) {
bool definitely_matches = false; bool definitely_matches = false;
Label regular_invoke; Label regular_invoke;
...@@ -839,8 +910,11 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, ...@@ -839,8 +910,11 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Handle<Code> adaptor = Handle<Code> adaptor =
Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)); Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
if (flag == CALL_FUNCTION) { if (flag == CALL_FUNCTION) {
if (call_wrapper != NULL) {
call_wrapper->BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
}
Call(adaptor, RelocInfo::CODE_TARGET); Call(adaptor, RelocInfo::CODE_TARGET);
if (post_call_generator != NULL) post_call_generator->Generate(); if (call_wrapper != NULL) call_wrapper->AfterCall();
b(done); b(done);
} else { } else {
Jump(adaptor, RelocInfo::CODE_TARGET); Jump(adaptor, RelocInfo::CODE_TARGET);
...@@ -854,14 +928,15 @@ void MacroAssembler::InvokeCode(Register code, ...@@ -854,14 +928,15 @@ void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected, const ParameterCount& expected,
const ParameterCount& actual, const ParameterCount& actual,
InvokeFlag flag, InvokeFlag flag,
PostCallGenerator* post_call_generator) { CallWrapper* call_wrapper) {
Label done; Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag, InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
post_call_generator); call_wrapper);
if (flag == CALL_FUNCTION) { if (flag == CALL_FUNCTION) {
if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(code));
Call(code); Call(code);
if (post_call_generator != NULL) post_call_generator->Generate(); if (call_wrapper != NULL) call_wrapper->AfterCall();
} else { } else {
ASSERT(flag == JUMP_FUNCTION); ASSERT(flag == JUMP_FUNCTION);
Jump(code); Jump(code);
...@@ -896,7 +971,7 @@ void MacroAssembler::InvokeCode(Handle<Code> code, ...@@ -896,7 +971,7 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
void MacroAssembler::InvokeFunction(Register fun, void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& actual, const ParameterCount& actual,
InvokeFlag flag, InvokeFlag flag,
PostCallGenerator* post_call_generator) { CallWrapper* call_wrapper) {
// Contract with called JS functions requires that function is passed in r1. // Contract with called JS functions requires that function is passed in r1.
ASSERT(fun.is(r1)); ASSERT(fun.is(r1));
...@@ -913,7 +988,7 @@ void MacroAssembler::InvokeFunction(Register fun, ...@@ -913,7 +988,7 @@ void MacroAssembler::InvokeFunction(Register fun,
FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg); ParameterCount expected(expected_reg);
InvokeCode(code_reg, expected, actual, flag, post_call_generator); InvokeCode(code_reg, expected, actual, flag, call_wrapper);
} }
...@@ -2083,11 +2158,12 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference( ...@@ -2083,11 +2158,12 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference(
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags, InvokeJSFlags flags,
PostCallGenerator* post_call_generator) { CallWrapper* call_wrapper) {
GetBuiltinEntry(r2, id); GetBuiltinEntry(r2, id);
if (flags == CALL_JS) { if (flags == CALL_JS) {
if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(r2));
Call(r2); Call(r2);
if (post_call_generator != NULL) post_call_generator->Generate(); if (call_wrapper != NULL) call_wrapper->AfterCall();
} else { } else {
ASSERT(flags == JUMP_JS); ASSERT(flags == JUMP_JS);
Jump(r2); Jump(r2);
......
...@@ -34,7 +34,7 @@ namespace v8 { ...@@ -34,7 +34,7 @@ namespace v8 {
namespace internal { namespace internal {
// Forward declaration. // Forward declaration.
class PostCallGenerator; class CallWrapper;
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Static helper functions // Static helper functions
...@@ -96,8 +96,11 @@ class MacroAssembler: public Assembler { ...@@ -96,8 +96,11 @@ class MacroAssembler: public Assembler {
void Jump(Register target, Condition cond = al); void Jump(Register target, Condition cond = al);
void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al); void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Register target, Condition cond = al);
void Call(Register target, Condition cond = al); void Call(Register target, Condition cond = al);
int CallSize(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al); void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Ret(Condition cond = al); void Ret(Condition cond = al);
...@@ -343,7 +346,7 @@ class MacroAssembler: public Assembler { ...@@ -343,7 +346,7 @@ class MacroAssembler: public Assembler {
const ParameterCount& expected, const ParameterCount& expected,
const ParameterCount& actual, const ParameterCount& actual,
InvokeFlag flag, InvokeFlag flag,
PostCallGenerator* post_call_generator = NULL); CallWrapper* call_wrapper = NULL);
void InvokeCode(Handle<Code> code, void InvokeCode(Handle<Code> code,
const ParameterCount& expected, const ParameterCount& expected,
...@@ -356,7 +359,7 @@ class MacroAssembler: public Assembler { ...@@ -356,7 +359,7 @@ class MacroAssembler: public Assembler {
void InvokeFunction(Register function, void InvokeFunction(Register function,
const ParameterCount& actual, const ParameterCount& actual,
InvokeFlag flag, InvokeFlag flag,
PostCallGenerator* post_call_generator = NULL); CallWrapper* call_wrapper = NULL);
void InvokeFunction(JSFunction* function, void InvokeFunction(JSFunction* function,
const ParameterCount& actual, const ParameterCount& actual,
...@@ -748,7 +751,7 @@ class MacroAssembler: public Assembler { ...@@ -748,7 +751,7 @@ class MacroAssembler: public Assembler {
// the unresolved list if the name does not resolve. // the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id, void InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags, InvokeJSFlags flags,
PostCallGenerator* post_call_generator = NULL); CallWrapper* call_wrapper = NULL);
// Store the code object for the given builtin in the target register and // Store the code object for the given builtin in the target register and
// setup the function in r1. // setup the function in r1.
...@@ -911,6 +914,7 @@ class MacroAssembler: public Assembler { ...@@ -911,6 +914,7 @@ class MacroAssembler: public Assembler {
private: private:
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
// Helper functions for generating invokes. // Helper functions for generating invokes.
...@@ -920,7 +924,7 @@ class MacroAssembler: public Assembler { ...@@ -920,7 +924,7 @@ class MacroAssembler: public Assembler {
Register code_reg, Register code_reg,
Label* done, Label* done,
InvokeFlag flag, InvokeFlag flag,
PostCallGenerator* post_call_generator = NULL); CallWrapper* call_wrapper = NULL);
// Activation support. // Activation support.
void EnterFrame(StackFrame::Type type); void EnterFrame(StackFrame::Type type);
...@@ -984,11 +988,15 @@ class CodePatcher { ...@@ -984,11 +988,15 @@ class CodePatcher {
// Helper class for generating code or data associated with the code // Helper class for generating code or data associated with the code
// right after a call instruction. As an example this can be used to // right after a call instruction. As an example this can be used to
// generate safepoint data after calls for crankshaft. // generate safepoint data after calls for crankshaft.
class PostCallGenerator { class CallWrapper {
public: public:
PostCallGenerator() { } CallWrapper() { }
virtual ~PostCallGenerator() { } virtual ~CallWrapper() { }
virtual void Generate() = 0; // Called just before emitting a call. Argument is the size of the generated
// call code.
virtual void BeforeCall(int call_size) = 0;
// Called just after emitting a call, i.e., at the return site for the call.
virtual void AfterCall() = 0;
}; };
......
...@@ -56,6 +56,7 @@ static void ZapCodeRange(Address start, Address end) { ...@@ -56,6 +56,7 @@ static void ZapCodeRange(Address start, Address end) {
void Deoptimizer::DeoptimizeFunction(JSFunction* function) { void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
HandleScope scope;
AssertNoAllocation no_allocation; AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return; if (!function->IsOptimized()) return;
...@@ -132,6 +133,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { ...@@ -132,6 +133,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: "); PrintF("[forced deoptimization: ");
function->PrintName(); function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function)); PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
#ifdef DEBUG
if (FLAG_print_code) {
code->PrintLn();
}
#endif
} }
} }
......
...@@ -102,6 +102,7 @@ class SafepointTableDeoptimiztionEntryIterator { ...@@ -102,6 +102,7 @@ class SafepointTableDeoptimiztionEntryIterator {
void Deoptimizer::DeoptimizeFunction(JSFunction* function) { void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
HandleScope scope;
AssertNoAllocation no_allocation; AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return; if (!function->IsOptimized()) return;
...@@ -196,6 +197,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { ...@@ -196,6 +197,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: "); PrintF("[forced deoptimization: ");
function->PrintName(); function->PrintName();
PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function)); PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
#ifdef DEBUG
if (FLAG_print_code) {
code->PrintLn();
}
#endif
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment