Initial bypass of JumpTarget::ComputeEntryFrame for deferred code

entry labels where it is statically safe.

Review URL: http://codereview.chromium.org/115296

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@1943 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent edf51c0f
......@@ -52,6 +52,8 @@ void JumpTarget::DoJump() {
cgen_->DeleteFrame();
__ jmp(&entry_label_);
} else {
// Preconfigured entry frame is not used on ARM.
ASSERT(entry_frame_ == NULL);
// Forward jump. The current frame is added to the end of the list
// of frames reaching the target block and a jump to the merge code
// is emitted.
......@@ -115,6 +117,8 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
__ bind(&original_fall_through);
} else {
// Preconfigured entry frame is not used on ARM.
ASSERT(entry_frame_ == NULL);
// Forward branch. A copy of the current frame is added to the end
// of the list of frames reaching the target block and a branch to
// the merge code is emitted.
......@@ -141,6 +145,8 @@ void JumpTarget::Call() {
cgen_->frame()->SpillAll();
VirtualFrame* target_frame = new VirtualFrame(cgen_->frame());
target_frame->Adjust(1);
// We do not expect a call with a preconfigured entry frame.
ASSERT(entry_frame_ == NULL);
AddReachingFrame(target_frame);
__ bl(&merge_labels_.last());
......@@ -205,13 +211,15 @@ void JumpTarget::DoBind(int mergable_elements) {
bool had_fall_through = false;
if (cgen_->has_valid_frame()) {
had_fall_through = true;
AddReachingFrame(cgen_->frame());
AddReachingFrame(cgen_->frame()); // Return value ignored.
RegisterFile empty;
cgen_->SetFrame(NULL, &empty);
}
// Compute the frame to use for entry to the block.
if (entry_frame_ == NULL) {
ComputeEntryFrame(mergable_elements);
}
// Some moves required to merge to an expected frame require purely
// frame state changes, and do not require any code generation.
......
......@@ -30,10 +30,19 @@
#define V8_CODEGEN_INL_H_
#include "codegen.h"
#include "register-allocator-inl.h"
namespace v8 { namespace internal {
void DeferredCode::SetEntryFrame(Result* arg) {
ASSERT(generator()->has_valid_frame());
generator()->frame()->Push(arg);
enter()->set_entry_frame(new VirtualFrame(generator()->frame()));
*arg = generator()->frame()->Pop();
}
// -----------------------------------------------------------------------------
// Support for "structured" code comments.
//
......
......@@ -117,7 +117,14 @@ class DeferredCode: public ZoneObject {
MacroAssembler* masm() const { return masm_; }
CodeGenerator* generator() const { return generator_; }
// Set the virtual frame for entry to the deferred code as a
// snapshot of the code generator's current frame (plus additional
// results). This is optional, but should be done before branching
// or jumping to the deferred code.
inline void SetEntryFrame(Result* arg);
JumpTarget* enter() { return &enter_; }
void BindExit() { exit_.Bind(0); }
void BindExit(Result* result) { exit_.Bind(result, 1); }
void BindExit(Result* result0, Result* result1) {
......
......@@ -113,6 +113,8 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
// Adjust for function-level loop nesting.
loop_nesting_ += fun->loop_nesting();
JumpTarget::set_compiling_deferred_code(false);
{
CodeGenState state(this);
......@@ -316,7 +318,9 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
if (HasStackOverflow()) {
ClearDeferred();
} else {
JumpTarget::set_compiling_deferred_code(true);
ProcessDeferred();
JumpTarget::set_compiling_deferred_code(false);
}
// There is no need to delete the register allocator, it is a
......@@ -1247,6 +1251,10 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
switch (op) {
case Token::ADD: {
operand->ToRegister();
frame_->Spill(operand->reg());
__ add(Operand(operand->reg()), Immediate(value));
DeferredCode* deferred = NULL;
if (reversed) {
deferred = new DeferredInlineSmiAddReversed(this, smi_value,
......@@ -1254,9 +1262,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
} else {
deferred = new DeferredInlineSmiAdd(this, smi_value, overwrite_mode);
}
operand->ToRegister();
frame_->Spill(operand->reg());
__ add(Operand(operand->reg()), Immediate(value));
deferred->SetEntryFrame(operand);
deferred->enter()->Branch(overflow, operand, not_taken);
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(not_zero, operand, not_taken);
......@@ -1271,8 +1277,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
if (reversed) {
answer = allocator()->Allocate();
ASSERT(answer.is_valid());
deferred = new DeferredInlineSmiSubReversed(this,
smi_value,
deferred = new DeferredInlineSmiSubReversed(this, smi_value,
overwrite_mode);
__ Set(answer.reg(), Immediate(value));
// We are in the reversed case so they can't both be Smi constants.
......@@ -1281,12 +1286,11 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
} else {
operand->ToRegister();
frame_->Spill(operand->reg());
deferred = new DeferredInlineSmiSub(this,
smi_value,
overwrite_mode);
deferred = new DeferredInlineSmiSub(this, smi_value, overwrite_mode);
__ sub(Operand(operand->reg()), Immediate(value));
answer = *operand;
}
deferred->SetEntryFrame(operand);
deferred->enter()->Branch(overflow, operand, not_taken);
__ test(answer.reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(not_zero, operand, not_taken);
......@@ -5413,19 +5417,24 @@ void Reference::GetValue(TypeofState typeof_state) {
} else {
// Inline the inobject property case.
Comment cmnt(masm, "[ Inlined named property load");
DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(cgen_, GetName());
Result receiver = cgen_->frame()->Pop();
receiver.ToRegister();
// Check that the receiver is a heap object.
__ test(receiver.reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(zero, &receiver, not_taken);
// Preallocate the value register to ensure that there is no
// spill emitted between the patch site label and the offset in
// the load instruction.
// the load instruction and that all frames reaching the
// deferred code are identical.
Result value = cgen_->allocator()->Allocate();
ASSERT(value.is_valid());
// Check that the receiver is a heap object.
__ test(receiver.reg(), Immediate(kSmiTagMask));
DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(cgen_, GetName());
deferred->SetEntryFrame(&receiver);
deferred->enter()->Branch(zero, &receiver, not_taken);
__ bind(deferred->patch_site());
// This is the map check instruction that will be patched (so we can't
// use the double underscore macro that may insert instructions).
......
......@@ -51,10 +51,15 @@ void JumpTarget::DoJump() {
cgen_->frame()->MergeTo(entry_frame_);
cgen_->DeleteFrame();
__ jmp(&entry_label_);
} else if (entry_frame_ != NULL) {
// Forward jump with a preconfigured entry frame. Assert the
// current frame matches the expected one and jump to the block.
ASSERT(cgen_->frame()->Equals(entry_frame_));
cgen_->DeleteFrame();
__ jmp(&entry_label_);
} else {
// Forward jump. The current frame is added to the end of the list
// of frames reaching the target block and a jump to the merge code
// is emitted.
// Forward jump. Remember the current frame and emit a jump to
// its merge code.
AddReachingFrame(cgen_->frame());
RegisterFile empty;
cgen_->SetFrame(NULL, &empty);
......@@ -114,12 +119,19 @@ void JumpTarget::DoBranch(Condition cc, Hint hint) {
cgen_->SetFrame(fall_through_frame, &non_frame_registers);
__ bind(&original_fall_through);
} else if (entry_frame_ != NULL) {
// Forward branch with a preconfigured entry frame. Assert the
// current frame matches the expected one and branch to the block.
ASSERT(cgen_->frame()->Equals(entry_frame_));
// Use masm_-> instead of __ as forward branches are expected to
// be a fixed size (no inserted coverage-checking instructions
// please). This is used in Reference::GetValue.
masm_->j(cc, &entry_label_, hint);
is_linked_ = true;
} else {
// Forward branch. A copy of the current frame is added to the end of the
// list of frames reaching the target block and a branch to the merge code
// is emitted. Use masm_-> instead of __ as forward branches are expected
// to be a fixed size (no inserted coverage-checking instructions please).
// This is used in Reference::GetValue.
// Forward branch. A copy of the current frame is remembered and
// a branch to the merge code is emitted.
AddReachingFrame(new VirtualFrame(cgen_->frame()));
masm_->j(cc, &merge_labels_.last(), hint);
is_linked_ = true;
......@@ -143,6 +155,8 @@ void JumpTarget::Call() {
cgen_->frame()->SpillAll();
VirtualFrame* target_frame = new VirtualFrame(cgen_->frame());
target_frame->Adjust(1);
// We do not expect a call with a preconfigured entry frame.
ASSERT(entry_frame_ == NULL);
AddReachingFrame(target_frame);
__ call(&merge_labels_.last());
......@@ -158,6 +172,29 @@ void JumpTarget::DoBind(int mergable_elements) {
// block.
ASSERT(!cgen_->has_valid_frame() || cgen_->HasValidEntryRegisters());
// Fast case: the jump target was manually configured with an entry
// frame to use.
if (entry_frame_ != NULL) {
// Assert no reaching frames to deal with.
ASSERT(reaching_frames_.is_empty());
ASSERT(!cgen_->has_valid_frame());
RegisterFile reserved = RegisterAllocator::Reserved();
if (direction_ == BIDIRECTIONAL) {
// Copy the entry frame so the original can be used for a
// possible backward jump.
cgen_->SetFrame(new VirtualFrame(entry_frame_), &reserved);
} else {
// Take ownership of the entry frame.
cgen_->SetFrame(entry_frame_, &reserved);
entry_frame_ = NULL;
}
__ bind(&entry_label_);
is_linked_ = false;
is_bound_ = true;
return;
}
if (direction_ == FORWARD_ONLY) {
// A simple case: no forward jumps and no possible backward jumps.
if (!is_linked()) {
......@@ -207,13 +244,15 @@ void JumpTarget::DoBind(int mergable_elements) {
bool had_fall_through = false;
if (cgen_->has_valid_frame()) {
had_fall_through = true;
AddReachingFrame(cgen_->frame());
AddReachingFrame(cgen_->frame()); // Return value ignored.
RegisterFile empty;
cgen_->SetFrame(NULL, &empty);
}
// Compute the frame to use for entry to the block.
if (entry_frame_ == NULL) {
ComputeEntryFrame(mergable_elements);
}
// Some moves required to merge to an expected frame require purely
// frame state changes, and do not require any code generation.
......
......@@ -35,6 +35,9 @@ namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// JumpTarget implementation.
bool JumpTarget::compiling_deferred_code_ = false;
JumpTarget::JumpTarget(CodeGenerator* cgen, Directionality direction)
: cgen_(cgen),
direction_(direction),
......@@ -97,6 +100,22 @@ void JumpTarget::ComputeEntryFrame(int mergable_elements) {
// the directionality of the block. Compute: an entry frame for the
// block.
Counters::compute_entry_frame.Increment();
#ifdef DEBUG
if (compiling_deferred_code_) {
ASSERT(reaching_frames_.length() > 1);
VirtualFrame* frame = reaching_frames_[0];
bool all_identical = true;
for (int i = 1; i < reaching_frames_.length(); i++) {
if (!frame->Equals(reaching_frames_[i])) {
all_identical = false;
break;
}
}
ASSERT(!all_identical || all_identical);
}
#endif
// Choose an initial frame.
VirtualFrame* initial_frame = reaching_frames_[0];
......@@ -509,6 +528,7 @@ void JumpTarget::Bind(Result* arg0,
void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
ASSERT(reaching_frames_.length() == merge_labels_.length());
ASSERT(entry_frame_ == NULL);
Label fresh;
merge_labels_.Add(fresh);
reaching_frames_.Add(frame);
......
......@@ -161,6 +161,10 @@ class JumpTarget : public Malloced { // Shadows are dynamically allocated.
static const int kAllElements = -1; // Not a valid number of elements.
static void set_compiling_deferred_code(bool flag) {
compiling_deferred_code_ = flag;
}
protected:
// The code generator gives access to its current frame.
CodeGenerator* cgen_;
......@@ -198,12 +202,14 @@ class JumpTarget : public Malloced { // Shadows are dynamically allocated.
void DoBind(int mergable_elements);
private:
// Add a virtual frame reaching this labeled block via a forward
// jump, and a fresh label for its merge code.
static bool compiling_deferred_code_;
// Add a virtual frame reaching this labeled block via a forward jump,
// and a corresponding merge code label.
void AddReachingFrame(VirtualFrame* frame);
// Compute a frame to use for entry to this block. Mergable
// elements is as described for the Bind function.
// Compute a frame to use for entry to this block. Mergable elements
// is as described for the Bind function.
void ComputeEntryFrame(int mergable_elements);
DISALLOW_COPY_AND_ASSIGN(JumpTarget);
......
......@@ -124,7 +124,8 @@ namespace v8 { namespace internal {
SC(enum_cache_misses, V8.EnumCacheMisses) \
SC(reloc_info_count, V8.RelocInfoCount) \
SC(reloc_info_size, V8.RelocInfoSize) \
SC(zone_segment_bytes, V8.ZoneSegmentBytes)
SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
SC(compute_entry_frame, V8.ComputeEntryFrame)
// This file contains all the v8 counters that are in use.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment