Commit 0efd83c9 authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

* Add code to check coverage of generated code on IA32 port.

* Move ARM coverage code to ARM-specific file and add missing
file to cover.
Review URL: http://codereview.chromium.org/88025

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@1754 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 0d21e7b3
......@@ -283,6 +283,10 @@ bool Operand::is_reg(Register reg) const {
*pc_++ = (x)
#ifdef GENERATED_CODE_COVERAGE
static void InitCoverageLog();
#endif
// spare_buffer_
static byte* spare_buffer_ = NULL;
......@@ -329,6 +333,9 @@ Assembler::Assembler(void* buffer, int buffer_size) {
current_position_ = RelocInfo::kNoPosition;
written_statement_position_ = current_statement_position_;
written_position_ = current_position_;
#ifdef GENERATED_CODE_COVERAGE
InitCoverageLog();
#endif
}
......@@ -2202,4 +2209,30 @@ void Assembler::WriteInternalReference(int position, const Label& bound_label) {
long_at_put(position, label_loc);
}
#ifdef GENERATED_CODE_COVERAGE
static FILE* coverage_log = NULL;
static void InitCoverageLog() {
char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
if (file_name != NULL) {
coverage_log = fopen(file_name, "aw+");
}
}
void LogGeneratedCodeCoverage(const char* file_line) {
const char* return_address = (&file_line)[-1];
char* push_insn = const_cast<char*>(return_address - 12);
push_insn[0] = 0xeb; // Relative branch insn.
push_insn[1] = 13; // Skip over coverage insns.
if (coverage_log != NULL) {
fprintf(coverage_log, "%s\n", file_line);
fflush(coverage_log);
}
}
#endif
} } // namespace v8::internal
......@@ -34,7 +34,7 @@
namespace v8 { namespace internal {
#define __ DEFINE_MASM(masm)
#define __ ACCESS_MASM(masm)
void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
......
......@@ -32,7 +32,7 @@
namespace v8 { namespace internal {
#define __ masm->
#define __ ACCESS_MASM(masm)
void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
......
......@@ -38,7 +38,7 @@
namespace v8 { namespace internal {
#define __ DEFINE_MASM(masm_)
#define __ ACCESS_MASM(masm_)
// -------------------------------------------------------------------------
......@@ -4096,7 +4096,7 @@ bool CodeGenerator::HasValidEntryRegisters() { return true; }
#undef __
#define __ DEFINE_MASM(masm)
#define __ ACCESS_MASM(masm)
Handle<String> Reference::GetName() {
......
......@@ -37,7 +37,7 @@
namespace v8 { namespace internal {
#define __ masm_->
#define __ ACCESS_MASM(masm_)
// -------------------------------------------------------------------------
// CodeGenState implementation.
......@@ -2009,18 +2009,18 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
// Add a label for checking the size of the code used for returning.
Label check_exit_codesize;
__ bind(&check_exit_codesize);
masm_->bind(&check_exit_codesize);
// Leave the frame and return popping the arguments and the
// receiver.
frame_->Exit();
__ ret((scope_->num_parameters() + 1) * kPointerSize);
masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
DeleteFrame();
// Check that the size of the code used for returning matches what is
// expected by the debugger.
ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
__ SizeOfCodeGeneratedSince(&check_exit_codesize));
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
}
......@@ -2143,7 +2143,7 @@ void CodeGenerator::GenerateFastCaseSwitchJumpTable(
times_1, 0x0, RelocInfo::INTERNAL_REFERENCE));
smi_value.Unuse();
// Calculate address to overwrite later with actual address of table.
int32_t jump_table_ref = __ pc_offset() - sizeof(int32_t);
int32_t jump_table_ref = masm_->pc_offset() - sizeof(int32_t);
__ Align(4);
Label table_start;
__ bind(&table_start);
......@@ -3386,7 +3386,9 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
// Loop up the context chain. There is no frame effect so it is
// safe to use raw labels here.
Label next, fast;
if (!context.reg().is(tmp.reg())) __ mov(tmp.reg(), context.reg());
if (!context.reg().is(tmp.reg())) {
__ mov(tmp.reg(), context.reg());
}
__ bind(&next);
// Terminate at global context.
__ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
......@@ -5275,8 +5277,11 @@ void DeferredReferenceGetKeyedValue::Generate() {
// instruction.
ASSERT(value.is_register() && value.reg().is(eax));
// The delta from the start of the map-compare instruction to the
// test eax instruction.
int delta_to_patch_site = __ SizeOfCodeGeneratedSince(patch_site());
// test eax instruction. We use masm_ directly here instead of the
// __ macro because the __ macro sometimes uses macro expansion to turn
// into something that can't return a value. This is encountered when
// doing generated code coverage tests.
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
__ test(value.reg(), Immediate(-delta_to_patch_site));
__ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
......@@ -5291,7 +5296,7 @@ void DeferredReferenceGetKeyedValue::Generate() {
#undef __
#define __ masm->
#define __ ACCESS_MASM(masm)
Handle<String> Reference::GetName() {
ASSERT(type_ == NAMED);
......@@ -5573,7 +5578,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
#undef __
#define __ masm_->
#define __ ACCESS_MASM(masm_)
Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left,
Result* right) {
......@@ -5907,7 +5912,7 @@ Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left,
#undef __
#define __ masm->
#define __ ACCESS_MASM(masm)
void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Perform fast-case smi code for the operation (eax <op> ebx) and
......@@ -6232,7 +6237,9 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
}
// SHR should return uint32 - go to runtime for non-smi/negative result.
if (op_ == Token::SHR) __ bind(&non_smi_result);
if (op_ == Token::SHR) {
__ bind(&non_smi_result);
}
__ mov(eax, Operand(esp, 1 * kPointerSize));
__ mov(edx, Operand(esp, 2 * kPointerSize));
break;
......
......@@ -58,7 +58,7 @@ bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
}
#define __ DEFINE_MASM(masm)
#define __ ACCESS_MASM(masm)
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
......
......@@ -68,7 +68,7 @@ bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
}
#define __ masm->
#define __ ACCESS_MASM(masm)
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
......
......@@ -513,16 +513,6 @@ inline Dest bit_cast(const Source& source) {
}
#ifdef ARM_GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
#define DEFINE_MASM(masm) masm->stop(__FILE_LINE__); masm->
#else
#define DEFINE_MASM(masm) masm->
#endif
} } // namespace v8::internal
#endif // V8_GLOBALS_H_
......@@ -39,7 +39,7 @@ namespace v8 { namespace internal {
// Static IC stub generators.
//
#define __ DEFINE_MASM(masm)
#define __ ACCESS_MASM(masm)
// Helper function used from LoadIC/CallIC GenerateNormal.
......
......@@ -38,7 +38,7 @@ namespace v8 { namespace internal {
// Static IC stub generators.
//
#define __ masm->
#define __ ACCESS_MASM(masm)
// Helper function used to load a property from a dictionary backing storage.
......@@ -91,7 +91,9 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
// Compute the masked index: (hash + i + i * i) & mask.
__ mov(r1, FieldOperand(name, String::kLengthOffset));
__ shr(r1, String::kHashShift);
if (i > 0) __ add(Operand(r1), Immediate(Dictionary::GetProbeOffset(i)));
if (i > 0) {
__ add(Operand(r1), Immediate(Dictionary::GetProbeOffset(i)));
}
__ and_(r1, Operand(r2));
// Scale the index by multiplying by the element size.
......
......@@ -35,7 +35,7 @@ namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// JumpTarget implementation.
#define __ masm_->
#define __ ACCESS_MASM(masm_)
void JumpTarget::DoJump() {
ASSERT(cgen_ != NULL);
......
......@@ -35,7 +35,7 @@ namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// JumpTarget implementation.
#define __ masm_->
#define __ ACCESS_MASM(masm_)
void JumpTarget::DoJump() {
ASSERT(cgen_ != NULL);
......
......@@ -298,6 +298,15 @@ static inline MemOperand FieldMemOperand(Register object, int offset) {
}
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
#else
#define ACCESS_MASM(masm) masm->
#endif
} } // namespace v8::internal
......
......@@ -343,6 +343,29 @@ static inline Operand FieldOperand(Register object,
return Operand(object, index, scale, offset - kHeapObjectTag);
}
#ifdef GENERATED_CODE_COVERAGE
extern void LogGeneratedCodeCoverage(const char* file_line);
#define CODE_COVERAGE_STRINGIFY(x) #x
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
#define ACCESS_MASM(masm) { \
byte* ia32_coverage_function = \
reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
masm->pushfd(); \
masm->pushad(); \
masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
masm->call(ia32_coverage_function, RelocInfo::RUNTIME_ENTRY); \
masm->pop(eax); \
masm->popad(); \
masm->popfd(); \
} \
masm->
#else
#define ACCESS_MASM(masm) masm->
#endif
} } // namespace v8::internal
#endif // V8_MACRO_ASSEMBLER_IA32_H_
......@@ -86,7 +86,7 @@ namespace v8 { namespace internal {
* byte* stack_area_top)
*/
#define __ masm_->
#define __ ACCESS_MASM(masm_)
RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
Mode mode,
......
......@@ -91,7 +91,7 @@ Debugger::~Debugger() {
#ifdef ARM_GENERATED_CODE_COVERAGE
#ifdef GENERATED_CODE_COVERAGE
static FILE* coverage_log = NULL;
......@@ -107,7 +107,7 @@ void Debugger::Stop(Instr* instr) {
char* str = reinterpret_cast<char*>(instr->InstructionBits() & 0x0fffffff);
if (strlen(str) > 0) {
if (coverage_log != NULL) {
fprintf(coverage_log, "Simulator hit %s\n", str);
fprintf(coverage_log, "%s\n", str);
fflush(coverage_log);
}
instr->SetInstructionBits(0xe1a00000); // Overwrite with nop.
......@@ -115,7 +115,7 @@ void Debugger::Stop(Instr* instr) {
sim_->set_pc(sim_->get_pc() + Instr::kInstrSize);
}
#else // ndef ARM_GENERATED_CODE_COVERAGE
#else // ndef GENERATED_CODE_COVERAGE
static void InitializeCoverage() {
}
......
......@@ -33,7 +33,7 @@
namespace v8 { namespace internal {
#define __ DEFINE_MASM(masm)
#define __ ACCESS_MASM(masm)
static void ProbeTable(MacroAssembler* masm,
......@@ -456,7 +456,7 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
#undef __
#define __ DEFINE_MASM(masm())
#define __ ACCESS_MASM(masm())
Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
......
......@@ -33,7 +33,7 @@
namespace v8 { namespace internal {
#define __ masm->
#define __ ACCESS_MASM(masm)
static void ProbeTable(MacroAssembler* masm,
......@@ -256,7 +256,7 @@ void StubCompiler::GenerateLoadField(MacroAssembler* masm,
// Check that the maps haven't changed.
Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Get the value from the properties.
GenerateFastPropertyLoad(masm, eax, reg, holder, index);
......@@ -279,7 +279,7 @@ void StubCompiler::GenerateLoadCallback(MacroAssembler* masm,
// Check that the maps haven't changed.
Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Push the arguments on the JS stack of the caller.
__ pop(scratch2); // remove return address
......@@ -310,7 +310,7 @@ void StubCompiler::GenerateLoadConstant(MacroAssembler* masm,
// Check that the maps haven't changed.
Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Return the constant value.
__ mov(eax, Handle<Object>(value));
......@@ -332,7 +332,7 @@ void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm,
// Check that the maps haven't changed.
Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Push the arguments on the JS stack of the caller.
__ pop(scratch2); // remove return address
......@@ -440,7 +440,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
#undef __
#define __ masm()->
#define __ ACCESS_MASM(masm())
// TODO(1241006): Avoid having lazy compile stubs specialized by the
......@@ -485,7 +485,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
// Do the right check and compute the holder register.
Register reg =
__ CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
masm()->CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
......@@ -656,7 +656,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
// Check that maps have not changed and compute the holder register.
Register reg =
__ CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
masm()->CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
// Enter an internal frame.
__ EnterInternalFrame();
......
......@@ -36,7 +36,7 @@ namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// VirtualFrame implementation.
#define __ DEFINE_MASM(masm_)
#define __ ACCESS_MASM(masm_)
// On entry to a function, the virtual frame already contains the
......
......@@ -33,7 +33,7 @@
namespace v8 { namespace internal {
#define __ masm_->
#define __ ACCESS_MASM(masm_)
// -------------------------------------------------------------------------
// VirtualFrame implementation.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment