Commit cdf65ca0 authored by ager@chromium.org's avatar ager@chromium.org

Reimplement the padding of relocation information for lazy deoptimization on ia32.

The previous implementation attempted to keep track of the needed
relocation size for deoptimization while generating the optimized
code. That was error prone. This patch moves the relocation resizing
to the deoptimizer as the last step of creating an optimized code
object.

The down side to this approach is that two relocation information byte
arrays are created for all optimized functions that do not have enough
relocation space for lazy deoptimization.

R=sgjesse@chromium.org

Review URL: http://codereview.chromium.org/6730050

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@7360 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 6fadb525
......@@ -44,6 +44,11 @@ int Deoptimizer::patch_size() {
}
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
// Nothing to do. No new relocation information is written for lazy
// deoptimization on ARM.
}
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
HandleScope scope;
......
......@@ -94,6 +94,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
code->set_stack_slots(StackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
}
......
......@@ -140,6 +140,7 @@ const int kPCJumpTag = (1 << kExtraTagBits) - 1;
const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1;
const int RelocInfo::kMaxSmallPCDelta = kSmallPCDeltaMask;
const int kVariableLengthPCJumpTopTag = 1;
const int kChunkBits = 7;
......
......@@ -205,6 +205,9 @@ class RelocInfo BASE_EMBEDDED {
// The maximum size for a call instruction including pc-jump.
static const int kMaxCallSize = 6;
// The maximum pc delta that will use the short encoding.
static const int kMaxSmallPCDelta;
enum Mode {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
......
......@@ -136,6 +136,13 @@ class Deoptimizer : public Malloced {
Isolate* isolate);
static Deoptimizer* Grab(Isolate* isolate);
// Makes sure that there is enough room in the relocation
// information of a code object to perform lazy deoptimization
// patching. If there is not enough room a new relocation
// information object is allocated and comments are added until it
// is big enough.
static void EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code);
// Deoptimize the function now. Its current optimized code will never be run
// again and any activations of the optimized code will get deoptimized when
// execution returns.
......
......@@ -55,6 +55,81 @@ static void ZapCodeRange(Address start, Address end) {
}
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
HandleScope scope;
// Compute the size of relocation information needed for the code
// patching in Deoptimizer::DeoptimizeFunction.
int min_reloc_size = 0;
Address prev_reloc_address = code->instruction_start();
Address code_start_address = code->instruction_start();
SafepointTable table(*code);
for (unsigned i = 0; i < table.length(); ++i) {
Address curr_reloc_address = code_start_address + table.GetPcOffset(i);
ASSERT_GE(curr_reloc_address, prev_reloc_address);
SafepointEntry safepoint_entry = table.GetEntry(i);
int deoptimization_index = safepoint_entry.deoptimization_index();
if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
// The gap code is needed to get to the state expected at the
// bailout and we need to skip the call opcode to get to the
// address that needs reloc.
curr_reloc_address += safepoint_entry.gap_code_size() + 1;
int pc_delta = curr_reloc_address - prev_reloc_address;
// We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
// if encodable with small pc delta encoding and up to 6 bytes
// otherwise.
if (pc_delta <= RelocInfo::kMaxSmallPCDelta) {
min_reloc_size += 2;
} else {
min_reloc_size += 6;
}
prev_reloc_address = curr_reloc_address;
}
}
// If the relocation information is not big enough we create a new
// relocation info object that is padded with comments to make it
// big enough for lazy doptimization.
int reloc_length = code->relocation_info()->length();
if (min_reloc_size > reloc_length) {
int comment_reloc_size = RelocInfo::kMinRelocCommentSize;
// Padding needed.
int min_padding = min_reloc_size - reloc_length;
// Number of comments needed to take up at least that much space.
int additional_comments =
(min_padding + comment_reloc_size - 1) / comment_reloc_size;
// Actual padding size.
int padding = additional_comments * comment_reloc_size;
// Allocate new relocation info and copy old relocation to the end
// of the new relocation info array because relocation info is
// written and read backwards.
Factory* factory = code->GetIsolate()->factory();
Handle<ByteArray> new_reloc =
factory->NewByteArray(reloc_length + padding, TENURED);
memcpy(new_reloc->GetDataStartAddress() + padding,
code->relocation_info()->GetDataStartAddress(),
reloc_length);
// Create a relocation writer to write the comments in the padding
// space. Use position 0 for everything to ensure short encoding.
RelocInfoWriter reloc_info_writer(
new_reloc->GetDataStartAddress() + padding, 0);
intptr_t comment_string
= reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string);
for (int i = 0; i < additional_comments; ++i) {
#ifdef DEBUG
byte* pos_before = reloc_info_writer.pos();
#endif
reloc_info_writer.Write(&rinfo);
ASSERT(RelocInfo::kMinRelocCommentSize ==
pos_before - reloc_info_writer.pos());
}
// Replace relocation information on the code object.
code->set_relocation_info(*new_reloc);
}
}
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
HandleScope scope;
AssertNoAllocation no_allocation;
......
......@@ -31,6 +31,7 @@
#include "ia32/lithium-codegen-ia32.h"
#include "code-stubs.h"
#include "deoptimizer.h"
#include "stub-cache.h"
namespace v8 {
......@@ -50,9 +51,6 @@ class SafepointGenerator : public PostCallGenerator {
virtual ~SafepointGenerator() { }
virtual void Generate() {
// Ensure that we have enough space in the reloc info to patch
// this with calls when doing deoptimization.
codegen_->EnsureRelocSpaceForDeoptimization();
codegen_->RecordSafepoint(pointers_, deoptimization_index_);
}
......@@ -73,7 +71,6 @@ bool LCodeGen::GenerateCode() {
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
GenerateRelocPadding() &&
GenerateSafepointTable();
}
......@@ -83,6 +80,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
code->set_stack_slots(StackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
}
......@@ -380,22 +378,6 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
}
void LCodeGen::EnsureRelocSpaceForDeoptimization() {
// Since we patch the reloc info with RUNTIME_ENTRY calls every
// patch site will take up 2 bytes + any pc-jumps. We are
// conservative and always reserve 6 bytes in case a simple pc-jump
// is not enough.
uint32_t pc_delta =
masm()->pc_offset() - deoptimization_reloc_size.last_pc_offset;
if (is_uintn(pc_delta, 6)) {
deoptimization_reloc_size.min_size += 2;
} else {
deoptimization_reloc_size.min_size += 6;
}
deoptimization_reloc_size.last_pc_offset = masm()->pc_offset();
}
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged) {
......@@ -449,7 +431,6 @@ void LCodeGen::CallCode(Handle<Code> code,
}
__ call(code, mode);
EnsureRelocSpaceForDeoptimization();
RegisterLazyDeoptimization(instr);
// Signal that we don't inline smi code before these stubs in the
......@@ -474,7 +455,7 @@ void LCodeGen::CallRuntime(const Runtime::Function* fun,
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
__ CallRuntime(fun, argc);
EnsureRelocSpaceForDeoptimization();
RegisterLazyDeoptimization(instr);
}
......@@ -2539,7 +2520,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
} else {
__ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
}
EnsureRelocSpaceForDeoptimization();
// Setup deoptimization.
RegisterLazyDeoptimization(instr);
......
......@@ -101,6 +101,11 @@ class SafepointTableDeoptimiztionEntryIterator {
};
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
// TODO(1276): Implement.
}
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
HandleScope scope;
AssertNoAllocation no_allocation;
......
......@@ -97,6 +97,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
code->set_stack_slots(StackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment