Initial bypass of JumpTarget::ComputeEntryFrame for deferred code

entry labels where it is statically safe.

Review URL: http://codereview.chromium.org/115296

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@1943 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent edf51c0f
...@@ -52,6 +52,8 @@ void JumpTarget::DoJump() { ...@@ -52,6 +52,8 @@ void JumpTarget::DoJump() {
cgen_->DeleteFrame(); cgen_->DeleteFrame();
__ jmp(&entry_label_); __ jmp(&entry_label_);
} else { } else {
// Preconfigured entry frame is not used on ARM.
ASSERT(entry_frame_ == NULL);
// Forward jump. The current frame is added to the end of the list // Forward jump. The current frame is added to the end of the list
// of frames reaching the target block and a jump to the merge code // of frames reaching the target block and a jump to the merge code
// is emitted. // is emitted.
...@@ -115,6 +117,8 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) { ...@@ -115,6 +117,8 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
__ bind(&original_fall_through); __ bind(&original_fall_through);
} else { } else {
// Preconfigured entry frame is not used on ARM.
ASSERT(entry_frame_ == NULL);
// Forward branch. A copy of the current frame is added to the end // Forward branch. A copy of the current frame is added to the end
// of the list of frames reaching the target block and a branch to // of the list of frames reaching the target block and a branch to
// the merge code is emitted. // the merge code is emitted.
...@@ -141,6 +145,8 @@ void JumpTarget::Call() { ...@@ -141,6 +145,8 @@ void JumpTarget::Call() {
cgen_->frame()->SpillAll(); cgen_->frame()->SpillAll();
VirtualFrame* target_frame = new VirtualFrame(cgen_->frame()); VirtualFrame* target_frame = new VirtualFrame(cgen_->frame());
target_frame->Adjust(1); target_frame->Adjust(1);
// We do not expect a call with a preconfigured entry frame.
ASSERT(entry_frame_ == NULL);
AddReachingFrame(target_frame); AddReachingFrame(target_frame);
__ bl(&merge_labels_.last()); __ bl(&merge_labels_.last());
...@@ -205,13 +211,15 @@ void JumpTarget::DoBind(int mergable_elements) { ...@@ -205,13 +211,15 @@ void JumpTarget::DoBind(int mergable_elements) {
bool had_fall_through = false; bool had_fall_through = false;
if (cgen_->has_valid_frame()) { if (cgen_->has_valid_frame()) {
had_fall_through = true; had_fall_through = true;
AddReachingFrame(cgen_->frame()); AddReachingFrame(cgen_->frame()); // Return value ignored.
RegisterFile empty; RegisterFile empty;
cgen_->SetFrame(NULL, &empty); cgen_->SetFrame(NULL, &empty);
} }
// Compute the frame to use for entry to the block. // Compute the frame to use for entry to the block.
if (entry_frame_ == NULL) {
ComputeEntryFrame(mergable_elements); ComputeEntryFrame(mergable_elements);
}
// Some moves required to merge to an expected frame require purely // Some moves required to merge to an expected frame require purely
// frame state changes, and do not require any code generation. // frame state changes, and do not require any code generation.
......
...@@ -30,10 +30,19 @@ ...@@ -30,10 +30,19 @@
#define V8_CODEGEN_INL_H_ #define V8_CODEGEN_INL_H_
#include "codegen.h" #include "codegen.h"
#include "register-allocator-inl.h"
namespace v8 { namespace internal { namespace v8 { namespace internal {
void DeferredCode::SetEntryFrame(Result* arg) {
ASSERT(generator()->has_valid_frame());
generator()->frame()->Push(arg);
enter()->set_entry_frame(new VirtualFrame(generator()->frame()));
*arg = generator()->frame()->Pop();
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Support for "structured" code comments. // Support for "structured" code comments.
// //
......
...@@ -117,7 +117,14 @@ class DeferredCode: public ZoneObject { ...@@ -117,7 +117,14 @@ class DeferredCode: public ZoneObject {
MacroAssembler* masm() const { return masm_; } MacroAssembler* masm() const { return masm_; }
CodeGenerator* generator() const { return generator_; } CodeGenerator* generator() const { return generator_; }
// Set the virtual frame for entry to the deferred code as a
// snapshot of the code generator's current frame (plus additional
// results). This is optional, but should be done before branching
// or jumping to the deferred code.
inline void SetEntryFrame(Result* arg);
JumpTarget* enter() { return &enter_; } JumpTarget* enter() { return &enter_; }
void BindExit() { exit_.Bind(0); } void BindExit() { exit_.Bind(0); }
void BindExit(Result* result) { exit_.Bind(result, 1); } void BindExit(Result* result) { exit_.Bind(result, 1); }
void BindExit(Result* result0, Result* result1) { void BindExit(Result* result0, Result* result1) {
......
...@@ -113,6 +113,8 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) { ...@@ -113,6 +113,8 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
// Adjust for function-level loop nesting. // Adjust for function-level loop nesting.
loop_nesting_ += fun->loop_nesting(); loop_nesting_ += fun->loop_nesting();
JumpTarget::set_compiling_deferred_code(false);
{ {
CodeGenState state(this); CodeGenState state(this);
...@@ -316,7 +318,9 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) { ...@@ -316,7 +318,9 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
if (HasStackOverflow()) { if (HasStackOverflow()) {
ClearDeferred(); ClearDeferred();
} else { } else {
JumpTarget::set_compiling_deferred_code(true);
ProcessDeferred(); ProcessDeferred();
JumpTarget::set_compiling_deferred_code(false);
} }
// There is no need to delete the register allocator, it is a // There is no need to delete the register allocator, it is a
...@@ -1247,6 +1251,10 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, ...@@ -1247,6 +1251,10 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
switch (op) { switch (op) {
case Token::ADD: { case Token::ADD: {
operand->ToRegister();
frame_->Spill(operand->reg());
__ add(Operand(operand->reg()), Immediate(value));
DeferredCode* deferred = NULL; DeferredCode* deferred = NULL;
if (reversed) { if (reversed) {
deferred = new DeferredInlineSmiAddReversed(this, smi_value, deferred = new DeferredInlineSmiAddReversed(this, smi_value,
...@@ -1254,9 +1262,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, ...@@ -1254,9 +1262,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
} else { } else {
deferred = new DeferredInlineSmiAdd(this, smi_value, overwrite_mode); deferred = new DeferredInlineSmiAdd(this, smi_value, overwrite_mode);
} }
operand->ToRegister(); deferred->SetEntryFrame(operand);
frame_->Spill(operand->reg());
__ add(Operand(operand->reg()), Immediate(value));
deferred->enter()->Branch(overflow, operand, not_taken); deferred->enter()->Branch(overflow, operand, not_taken);
__ test(operand->reg(), Immediate(kSmiTagMask)); __ test(operand->reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(not_zero, operand, not_taken); deferred->enter()->Branch(not_zero, operand, not_taken);
...@@ -1271,8 +1277,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, ...@@ -1271,8 +1277,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
if (reversed) { if (reversed) {
answer = allocator()->Allocate(); answer = allocator()->Allocate();
ASSERT(answer.is_valid()); ASSERT(answer.is_valid());
deferred = new DeferredInlineSmiSubReversed(this, deferred = new DeferredInlineSmiSubReversed(this, smi_value,
smi_value,
overwrite_mode); overwrite_mode);
__ Set(answer.reg(), Immediate(value)); __ Set(answer.reg(), Immediate(value));
// We are in the reversed case so they can't both be Smi constants. // We are in the reversed case so they can't both be Smi constants.
...@@ -1281,12 +1286,11 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, ...@@ -1281,12 +1286,11 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
} else { } else {
operand->ToRegister(); operand->ToRegister();
frame_->Spill(operand->reg()); frame_->Spill(operand->reg());
deferred = new DeferredInlineSmiSub(this, deferred = new DeferredInlineSmiSub(this, smi_value, overwrite_mode);
smi_value,
overwrite_mode);
__ sub(Operand(operand->reg()), Immediate(value)); __ sub(Operand(operand->reg()), Immediate(value));
answer = *operand; answer = *operand;
} }
deferred->SetEntryFrame(operand);
deferred->enter()->Branch(overflow, operand, not_taken); deferred->enter()->Branch(overflow, operand, not_taken);
__ test(answer.reg(), Immediate(kSmiTagMask)); __ test(answer.reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(not_zero, operand, not_taken); deferred->enter()->Branch(not_zero, operand, not_taken);
...@@ -5413,19 +5417,24 @@ void Reference::GetValue(TypeofState typeof_state) { ...@@ -5413,19 +5417,24 @@ void Reference::GetValue(TypeofState typeof_state) {
} else { } else {
// Inline the inobject property case. // Inline the inobject property case.
Comment cmnt(masm, "[ Inlined named property load"); Comment cmnt(masm, "[ Inlined named property load");
DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(cgen_, GetName());
Result receiver = cgen_->frame()->Pop(); Result receiver = cgen_->frame()->Pop();
receiver.ToRegister(); receiver.ToRegister();
// Check that the receiver is a heap object.
__ test(receiver.reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(zero, &receiver, not_taken);
// Preallocate the value register to ensure that there is no // Preallocate the value register to ensure that there is no
// spill emitted between the patch site label and the offset in // spill emitted between the patch site label and the offset in
// the load instruction. // the load instruction and that all frames reaching the
// deferred code are identical.
Result value = cgen_->allocator()->Allocate(); Result value = cgen_->allocator()->Allocate();
ASSERT(value.is_valid()); ASSERT(value.is_valid());
// Check that the receiver is a heap object.
__ test(receiver.reg(), Immediate(kSmiTagMask));
DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(cgen_, GetName());
deferred->SetEntryFrame(&receiver);
deferred->enter()->Branch(zero, &receiver, not_taken);
__ bind(deferred->patch_site()); __ bind(deferred->patch_site());
// This is the map check instruction that will be patched (so we can't // This is the map check instruction that will be patched (so we can't
// use the double underscore macro that may insert instructions). // use the double underscore macro that may insert instructions).
......
...@@ -51,10 +51,15 @@ void JumpTarget::DoJump() { ...@@ -51,10 +51,15 @@ void JumpTarget::DoJump() {
cgen_->frame()->MergeTo(entry_frame_); cgen_->frame()->MergeTo(entry_frame_);
cgen_->DeleteFrame(); cgen_->DeleteFrame();
__ jmp(&entry_label_); __ jmp(&entry_label_);
} else if (entry_frame_ != NULL) {
// Forward jump with a preconfigured entry frame. Assert the
// current frame matches the expected one and jump to the block.
ASSERT(cgen_->frame()->Equals(entry_frame_));
cgen_->DeleteFrame();
__ jmp(&entry_label_);
} else { } else {
// Forward jump. The current frame is added to the end of the list // Forward jump. Remember the current frame and emit a jump to
// of frames reaching the target block and a jump to the merge code // its merge code.
// is emitted.
AddReachingFrame(cgen_->frame()); AddReachingFrame(cgen_->frame());
RegisterFile empty; RegisterFile empty;
cgen_->SetFrame(NULL, &empty); cgen_->SetFrame(NULL, &empty);
...@@ -114,12 +119,19 @@ void JumpTarget::DoBranch(Condition cc, Hint hint) { ...@@ -114,12 +119,19 @@ void JumpTarget::DoBranch(Condition cc, Hint hint) {
cgen_->SetFrame(fall_through_frame, &non_frame_registers); cgen_->SetFrame(fall_through_frame, &non_frame_registers);
__ bind(&original_fall_through); __ bind(&original_fall_through);
} else if (entry_frame_ != NULL) {
// Forward branch with a preconfigured entry frame. Assert the
// current frame matches the expected one and branch to the block.
ASSERT(cgen_->frame()->Equals(entry_frame_));
// Use masm_-> instead of __ as forward branches are expected to
// be a fixed size (no inserted coverage-checking instructions
// please). This is used in Reference::GetValue.
masm_->j(cc, &entry_label_, hint);
is_linked_ = true;
} else { } else {
// Forward branch. A copy of the current frame is added to the end of the // Forward branch. A copy of the current frame is remembered and
// list of frames reaching the target block and a branch to the merge code // a branch to the merge code is emitted.
// is emitted. Use masm_-> instead of __ as forward branches are expected
// to be a fixed size (no inserted coverage-checking instructions please).
// This is used in Reference::GetValue.
AddReachingFrame(new VirtualFrame(cgen_->frame())); AddReachingFrame(new VirtualFrame(cgen_->frame()));
masm_->j(cc, &merge_labels_.last(), hint); masm_->j(cc, &merge_labels_.last(), hint);
is_linked_ = true; is_linked_ = true;
...@@ -143,6 +155,8 @@ void JumpTarget::Call() { ...@@ -143,6 +155,8 @@ void JumpTarget::Call() {
cgen_->frame()->SpillAll(); cgen_->frame()->SpillAll();
VirtualFrame* target_frame = new VirtualFrame(cgen_->frame()); VirtualFrame* target_frame = new VirtualFrame(cgen_->frame());
target_frame->Adjust(1); target_frame->Adjust(1);
// We do not expect a call with a preconfigured entry frame.
ASSERT(entry_frame_ == NULL);
AddReachingFrame(target_frame); AddReachingFrame(target_frame);
__ call(&merge_labels_.last()); __ call(&merge_labels_.last());
...@@ -158,6 +172,29 @@ void JumpTarget::DoBind(int mergable_elements) { ...@@ -158,6 +172,29 @@ void JumpTarget::DoBind(int mergable_elements) {
// block. // block.
ASSERT(!cgen_->has_valid_frame() || cgen_->HasValidEntryRegisters()); ASSERT(!cgen_->has_valid_frame() || cgen_->HasValidEntryRegisters());
// Fast case: the jump target was manually configured with an entry
// frame to use.
if (entry_frame_ != NULL) {
// Assert no reaching frames to deal with.
ASSERT(reaching_frames_.is_empty());
ASSERT(!cgen_->has_valid_frame());
RegisterFile reserved = RegisterAllocator::Reserved();
if (direction_ == BIDIRECTIONAL) {
// Copy the entry frame so the original can be used for a
// possible backward jump.
cgen_->SetFrame(new VirtualFrame(entry_frame_), &reserved);
} else {
// Take ownership of the entry frame.
cgen_->SetFrame(entry_frame_, &reserved);
entry_frame_ = NULL;
}
__ bind(&entry_label_);
is_linked_ = false;
is_bound_ = true;
return;
}
if (direction_ == FORWARD_ONLY) { if (direction_ == FORWARD_ONLY) {
// A simple case: no forward jumps and no possible backward jumps. // A simple case: no forward jumps and no possible backward jumps.
if (!is_linked()) { if (!is_linked()) {
...@@ -207,13 +244,15 @@ void JumpTarget::DoBind(int mergable_elements) { ...@@ -207,13 +244,15 @@ void JumpTarget::DoBind(int mergable_elements) {
bool had_fall_through = false; bool had_fall_through = false;
if (cgen_->has_valid_frame()) { if (cgen_->has_valid_frame()) {
had_fall_through = true; had_fall_through = true;
AddReachingFrame(cgen_->frame()); AddReachingFrame(cgen_->frame()); // Return value ignored.
RegisterFile empty; RegisterFile empty;
cgen_->SetFrame(NULL, &empty); cgen_->SetFrame(NULL, &empty);
} }
// Compute the frame to use for entry to the block. // Compute the frame to use for entry to the block.
if (entry_frame_ == NULL) {
ComputeEntryFrame(mergable_elements); ComputeEntryFrame(mergable_elements);
}
// Some moves required to merge to an expected frame require purely // Some moves required to merge to an expected frame require purely
// frame state changes, and do not require any code generation. // frame state changes, and do not require any code generation.
......
...@@ -35,6 +35,9 @@ namespace v8 { namespace internal { ...@@ -35,6 +35,9 @@ namespace v8 { namespace internal {
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// JumpTarget implementation. // JumpTarget implementation.
bool JumpTarget::compiling_deferred_code_ = false;
JumpTarget::JumpTarget(CodeGenerator* cgen, Directionality direction) JumpTarget::JumpTarget(CodeGenerator* cgen, Directionality direction)
: cgen_(cgen), : cgen_(cgen),
direction_(direction), direction_(direction),
...@@ -97,6 +100,22 @@ void JumpTarget::ComputeEntryFrame(int mergable_elements) { ...@@ -97,6 +100,22 @@ void JumpTarget::ComputeEntryFrame(int mergable_elements) {
// the directionality of the block. Compute: an entry frame for the // the directionality of the block. Compute: an entry frame for the
// block. // block.
Counters::compute_entry_frame.Increment();
#ifdef DEBUG
if (compiling_deferred_code_) {
ASSERT(reaching_frames_.length() > 1);
VirtualFrame* frame = reaching_frames_[0];
bool all_identical = true;
for (int i = 1; i < reaching_frames_.length(); i++) {
if (!frame->Equals(reaching_frames_[i])) {
all_identical = false;
break;
}
}
ASSERT(!all_identical || all_identical);
}
#endif
// Choose an initial frame. // Choose an initial frame.
VirtualFrame* initial_frame = reaching_frames_[0]; VirtualFrame* initial_frame = reaching_frames_[0];
...@@ -509,6 +528,7 @@ void JumpTarget::Bind(Result* arg0, ...@@ -509,6 +528,7 @@ void JumpTarget::Bind(Result* arg0,
void JumpTarget::AddReachingFrame(VirtualFrame* frame) { void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
ASSERT(reaching_frames_.length() == merge_labels_.length()); ASSERT(reaching_frames_.length() == merge_labels_.length());
ASSERT(entry_frame_ == NULL);
Label fresh; Label fresh;
merge_labels_.Add(fresh); merge_labels_.Add(fresh);
reaching_frames_.Add(frame); reaching_frames_.Add(frame);
......
...@@ -161,6 +161,10 @@ class JumpTarget : public Malloced { // Shadows are dynamically allocated. ...@@ -161,6 +161,10 @@ class JumpTarget : public Malloced { // Shadows are dynamically allocated.
static const int kAllElements = -1; // Not a valid number of elements. static const int kAllElements = -1; // Not a valid number of elements.
static void set_compiling_deferred_code(bool flag) {
compiling_deferred_code_ = flag;
}
protected: protected:
// The code generator gives access to its current frame. // The code generator gives access to its current frame.
CodeGenerator* cgen_; CodeGenerator* cgen_;
...@@ -198,12 +202,14 @@ class JumpTarget : public Malloced { // Shadows are dynamically allocated. ...@@ -198,12 +202,14 @@ class JumpTarget : public Malloced { // Shadows are dynamically allocated.
void DoBind(int mergable_elements); void DoBind(int mergable_elements);
private: private:
// Add a virtual frame reaching this labeled block via a forward static bool compiling_deferred_code_;
// jump, and a fresh label for its merge code.
// Add a virtual frame reaching this labeled block via a forward jump,
// and a corresponding merge code label.
void AddReachingFrame(VirtualFrame* frame); void AddReachingFrame(VirtualFrame* frame);
// Compute a frame to use for entry to this block. Mergable // Compute a frame to use for entry to this block. Mergable elements
// elements is as described for the Bind function. // is as described for the Bind function.
void ComputeEntryFrame(int mergable_elements); void ComputeEntryFrame(int mergable_elements);
DISALLOW_COPY_AND_ASSIGN(JumpTarget); DISALLOW_COPY_AND_ASSIGN(JumpTarget);
......
...@@ -124,7 +124,8 @@ namespace v8 { namespace internal { ...@@ -124,7 +124,8 @@ namespace v8 { namespace internal {
SC(enum_cache_misses, V8.EnumCacheMisses) \ SC(enum_cache_misses, V8.EnumCacheMisses) \
SC(reloc_info_count, V8.RelocInfoCount) \ SC(reloc_info_count, V8.RelocInfoCount) \
SC(reloc_info_size, V8.RelocInfoSize) \ SC(reloc_info_size, V8.RelocInfoSize) \
SC(zone_segment_bytes, V8.ZoneSegmentBytes) SC(zone_segment_bytes, V8.ZoneSegmentBytes) \
SC(compute_entry_frame, V8.ComputeEntryFrame)
// This file contains all the v8 counters that are in use. // This file contains all the v8 counters that are in use.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment