Commit cb0a7bc7 authored by ricow@chromium.org's avatar ricow@chromium.org

This fixes the issue with the deoptimizer trashing the reloc info before patching the code.

If we, immediately after the deoptimization, but before actually
running the patched code, get a compacting GC, the addresses from the
calls might no longer be valid.

I have validated that this works by patching the existing code to
always do a compacting gc after we finish deoptimizing. I will create
a real regression test for this, but this includes additional code for
allowing us to force a deopt/opt from javascript test code. I will
land this in a seperate change.


Review URL: http://codereview.chromium.org/6349043

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@6565 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 0dd951ee
...@@ -48,14 +48,19 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { ...@@ -48,14 +48,19 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// Get the optimized code. // Get the optimized code.
Code* code = function->code(); Code* code = function->code();
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
// For each return after a safepoint insert a absolute call to the // For each return after a safepoint insert a absolute call to the
// corresponding deoptimization entry. // corresponding deoptimization entry.
unsigned last_pc_offset = 0; unsigned last_pc_offset = 0;
SafepointTable table(function->code()); SafepointTable table(function->code());
// We will overwrite the code's relocation info in-place. Relocation info
// is written backward. The relocation info is the payload of a byte array.
// Later on we will align this at the start of the byte array and create
// a trash byte array of the remaining space.
ByteArray* reloc_info = code->relocation_info();
Address end_address = reloc_info->address() + reloc_info->Size();
RelocInfoWriter reloc_info_writer(end_address, code->instruction_start());
for (unsigned i = 0; i < table.length(); i++) { for (unsigned i = 0; i < table.length(); i++) {
unsigned pc_offset = table.GetPcOffset(i); unsigned pc_offset = table.GetPcOffset(i);
SafepointEntry safepoint_entry = table.GetEntry(i); SafepointEntry safepoint_entry = table.GetEntry(i);
...@@ -72,12 +77,14 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { ...@@ -72,12 +77,14 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
#endif #endif
last_pc_offset = pc_offset; last_pc_offset = pc_offset;
if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
CodePatcher patcher( Address call_pc = code->instruction_start() + pc_offset + gap_code_size;
code->instruction_start() + pc_offset + gap_code_size, CodePatcher patcher(call_pc, Assembler::kCallInstructionLength);
Assembler::kCallInstructionLength); Address entry = GetDeoptimizationEntry(deoptimization_index, LAZY);
patcher.masm()->call(GetDeoptimizationEntry(deoptimization_index, LAZY), patcher.masm()->call(entry, RelocInfo::NONE);
RelocInfo::NONE);
last_pc_offset += gap_code_size + Assembler::kCallInstructionLength; last_pc_offset += gap_code_size + Assembler::kCallInstructionLength;
RelocInfo rinfo(call_pc + 1, RelocInfo::RUNTIME_ENTRY,
reinterpret_cast<intptr_t>(entry));
reloc_info_writer.Write(&rinfo);
} }
} }
#ifdef DEBUG #ifdef DEBUG
...@@ -90,6 +97,40 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { ...@@ -90,6 +97,40 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
} }
#endif #endif
// Move the relocation info to the beginning of the byte array.
int reloc_size = end_address - reloc_info_writer.pos();
memmove(code->relocation_start(), reloc_info_writer.pos(), reloc_size);
// The relocation info is in place, update the size.
reloc_info->set_length(reloc_size);
// Handle the junk part after the new relocation info. We will create
// a non-live object in the extra space at the end of the former reloc info.
Address junk = reloc_info->address() + reloc_info->Size();
ASSERT(junk <= end_address);
if (end_address - junk <= ByteArray::kHeaderSize) {
// We get in here if there is not enough space for a ByteArray.
// Both addresses are kPointerSize alligned.
CHECK_EQ((end_address - junk) % 4, 0);
Map* filler_map = Heap::one_pointer_filler_map();
while (junk < end_address) {
HeapObject::FromAddress(junk)->set_map(filler_map);
junk += kPointerSize;
}
} else {
int size = end_address - junk;
// Since the reloc_end address and junk are both alligned, we shouild,
// never have junk which is not a multipla of kPointerSize.
CHECK_EQ(size % kPointerSize, 0);
CHECK_GT(size, 0);
HeapObject* junk_object = HeapObject::FromAddress(junk);
junk_object->set_map(Heap::byte_array_map());
int length = ByteArray::LengthFor(end_address - junk);
ByteArray::cast(junk_object)->set_length(length);
}
// Add the deoptimizing code to the list. // Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
node->set_next(deoptimizing_code_list_); node->set_next(deoptimizing_code_list_);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment