Commit de9d1d8b authored by rmcilroy's avatar rmcilroy Committed by Commit bot

[Interpreter] Move jump processing to bytecode array writer.

This moves processing of jumps out of bytecode array builder and into
bytecode array writer. This simplifies the pipeline by avoiding having
to flush for offset and patch up offsets in bytecode array builder based
on what was emitted by the bytecode array writer.

This also enables future refactorings to add dead code elimination back
into the pipeline, and move processing of scalable operand sizes to the
end of the pipeline (in the bytecode array writer) rather than having to
deal with scalable operand types throughout pipeline.

BUG=v8:4280,chromium:616064

Review-Url: https://codereview.chromium.org/2035813002
Cr-Commit-Position: refs/heads/master@{#36716}
parent a09fb95b
......@@ -1229,6 +1229,7 @@ v8_source_set("v8_base") {
"src/interpreter/bytecode-array-writer.h",
"src/interpreter/bytecode-generator.cc",
"src/interpreter/bytecode-generator.h",
"src/interpreter/bytecode-label.h",
"src/interpreter/bytecode-peephole-optimizer.cc",
"src/interpreter/bytecode-peephole-optimizer.h",
"src/interpreter/bytecode-pipeline.cc",
......
This diff is collapsed.
......@@ -11,7 +11,6 @@
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/constant-array-builder.h"
#include "src/interpreter/handler-table-builder.h"
#include "src/interpreter/source-position-table.h"
#include "src/zone-containers.h"
namespace v8 {
......@@ -297,8 +296,6 @@ class BytecodeArrayBuilder final : public ZoneObject {
static Bytecode BytecodeForDelete(LanguageMode language_mode);
static Bytecode BytecodeForCall(TailCallMode tail_call_mode);
static Bytecode GetJumpWithConstantOperand(Bytecode jump_smi8_operand);
void Output(Bytecode bytecode);
void OutputScaled(Bytecode bytecode, OperandScale operand_scale,
uint32_t operand0, uint32_t operand1, uint32_t operand2,
......@@ -312,15 +309,7 @@ class BytecodeArrayBuilder final : public ZoneObject {
BytecodeArrayBuilder& OutputJump(Bytecode jump_bytecode,
BytecodeLabel* label);
void PatchJump(size_t jump_target, size_t jump_location);
void PatchJumpWith8BitOperand(ZoneVector<uint8_t>* bytecodes,
size_t jump_location, int delta);
void PatchJumpWith16BitOperand(ZoneVector<uint8_t>* bytecodes,
size_t jump_location, int delta);
void PatchJumpWith32BitOperand(ZoneVector<uint8_t>* bytecodes,
size_t jump_location, int delta);
void LeaveBasicBlock();
bool OperandIsValid(Bytecode bytecode, OperandScale operand_scale,
int operand_index, uint32_t operand_value) const;
......@@ -337,6 +326,8 @@ class BytecodeArrayBuilder final : public ZoneObject {
// during bytecode generation.
BytecodeArrayBuilder& Illegal();
void LeaveBasicBlock() { return_seen_in_block_ = false; }
Isolate* isolate() const { return isolate_; }
BytecodeArrayWriter* bytecode_array_writer() {
return &bytecode_array_writer_;
......@@ -351,18 +342,13 @@ class BytecodeArrayBuilder final : public ZoneObject {
HandlerTableBuilder* handler_table_builder() {
return &handler_table_builder_;
}
SourcePositionTableBuilder* source_position_table_builder() {
return &source_position_table_builder_;
}
Isolate* isolate_;
Zone* zone_;
bool bytecode_generated_;
ConstantArrayBuilder constant_array_builder_;
HandlerTableBuilder handler_table_builder_;
SourcePositionTableBuilder source_position_table_builder_;
bool return_seen_in_block_;
int unbound_jumps_;
int parameter_count_;
int local_register_count_;
int context_register_count_;
......@@ -375,47 +361,6 @@ class BytecodeArrayBuilder final : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
};
// A label representing a branch target in a bytecode array. When a
// label is bound, it represents a known position in the bytecode
// array. For labels that are forward references there can be at most
// one reference whilst it is unbound.
class BytecodeLabel final {
public:
BytecodeLabel() : bound_(false), offset_(kInvalidOffset) {}
bool is_bound() const { return bound_; }
size_t offset() const { return offset_; }
private:
static const size_t kInvalidOffset = static_cast<size_t>(-1);
void bind_to(size_t offset) {
DCHECK(!bound_ && offset != kInvalidOffset);
offset_ = offset;
bound_ = true;
}
void set_referrer(size_t offset) {
DCHECK(!bound_ && offset != kInvalidOffset && offset_ == kInvalidOffset);
offset_ = offset;
}
bool is_forward_target() const {
return offset() != kInvalidOffset && !is_bound();
}
// There are three states for a label:
// bound_ offset_
// UNSET false kInvalidOffset
// FORWARD_TARGET false Offset of referring jump
// BACKWARD_TARGET true Offset of label in bytecode array when bound
bool bound_;
size_t offset_;
friend class BytecodeArrayBuilder;
};
} // namespace interpreter
} // namespace internal
} // namespace v8
......
This diff is collapsed.
......@@ -6,40 +6,61 @@
#define V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
#include "src/interpreter/bytecode-pipeline.h"
#include "src/interpreter/source-position-table.h"
namespace v8 {
namespace internal {
namespace interpreter {
class BytecodeLabel;
class SourcePositionTableBuilder;
class ConstantArrayBuilder;
// Class for emitting bytecode as the final stage of the bytecode
// generation pipeline.
class BytecodeArrayWriter final : public BytecodePipelineStage {
public:
BytecodeArrayWriter(
Zone* zone, SourcePositionTableBuilder* source_position_table_builder);
BytecodeArrayWriter(Isolate* isolate, Zone* zone,
ConstantArrayBuilder* constant_array_builder);
virtual ~BytecodeArrayWriter();
// BytecodePipelineStage interface.
void Write(BytecodeNode* node) override;
size_t FlushForOffset() override;
void FlushBasicBlock() override;
// Get the bytecode vector.
ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
// Returns the size in bytes of the frame associated with the
// bytecode written.
int GetMaximumFrameSizeUsed();
void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
void BindLabel(BytecodeLabel* label) override;
void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
Handle<BytecodeArray> ToBytecodeArray(
int fixed_register_count, int parameter_count,
Handle<FixedArray> handler_table) override;
private:
void PatchJump(size_t jump_target, size_t jump_location);
void PatchJumpWith8BitOperand(size_t jump_location, int delta);
void PatchJumpWith16BitOperand(size_t jump_location, int delta);
void PatchJumpWith32BitOperand(size_t jump_location, int delta);
void EmitBytecode(const BytecodeNode* const node);
void EmitJump(BytecodeNode* node, BytecodeLabel* label);
void UpdateSourcePositionTable(const BytecodeNode* const node);
Isolate* isolate() { return isolate_; }
ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
SourcePositionTableBuilder* source_position_table_builder() {
return &source_position_table_builder_;
}
ConstantArrayBuilder* constant_array_builder() {
return constant_array_builder_;
}
int max_register_count() { return max_register_count_; }
Isolate* isolate_;
ZoneVector<uint8_t> bytecodes_;
int max_register_count_;
SourcePositionTableBuilder* source_position_table_builder_;
int unbound_jumps_;
SourcePositionTableBuilder source_position_table_builder_;
ConstantArrayBuilder* constant_array_builder_;
friend class BytecodeArrayWriterUnittest;
DISALLOW_COPY_AND_ASSIGN(BytecodeArrayWriter);
};
......
This diff is collapsed.
......@@ -7,6 +7,7 @@
#include "src/ast/ast.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecodes.h"
namespace v8 {
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_INTERPRETER_BYTECODE_LABEL_H_
#define V8_INTERPRETER_BYTECODE_LABEL_H_
namespace v8 {
namespace internal {
namespace interpreter {
// A label representing a branch target in a bytecode array. When a
// label is bound, it represents a known position in the bytecode
// array. For labels that are forward references there can be at most
// one reference whilst it is unbound.
class BytecodeLabel final {
public:
BytecodeLabel() : bound_(false), offset_(kInvalidOffset) {}
bool is_bound() const { return bound_; }
size_t offset() const { return offset_; }
private:
static const size_t kInvalidOffset = static_cast<size_t>(-1);
void bind_to(size_t offset) {
DCHECK(!bound_ && offset != kInvalidOffset);
offset_ = offset;
bound_ = true;
}
void set_referrer(size_t offset) {
DCHECK(!bound_ && offset != kInvalidOffset && offset_ == kInvalidOffset);
offset_ = offset;
}
bool is_forward_target() const {
return offset() != kInvalidOffset && !is_bound();
}
// There are three states for a label:
// bound_ offset_
// UNSET false kInvalidOffset
// FORWARD_TARGET false Offset of referring jump
// BACKWARD_TARGET true Offset of label in bytecode array when bound
bool bound_;
size_t offset_;
friend class BytecodeArrayWriter;
};
} // namespace interpreter
} // namespace internal
} // namespace v8
#endif // V8_INTERPRETER_BYTECODE_LABEL_H_
......@@ -15,67 +15,67 @@ namespace interpreter {
BytecodePeepholeOptimizer::BytecodePeepholeOptimizer(
ConstantArrayBuilder* constant_array_builder,
BytecodePipelineStage* next_stage)
: constant_array_builder_(constant_array_builder),
next_stage_(next_stage),
last_is_discardable_(false) {
: constant_array_builder_(constant_array_builder), next_stage_(next_stage) {
InvalidateLast();
}
void BytecodePeepholeOptimizer::InvalidateLast() {
last_.set_bytecode(Bytecode::kIllegal);
// override
Handle<BytecodeArray> BytecodePeepholeOptimizer::ToBytecodeArray(
int fixed_register_count, int parameter_count,
Handle<FixedArray> handler_table) {
Flush();
return next_stage_->ToBytecodeArray(fixed_register_count, parameter_count,
handler_table);
}
bool BytecodePeepholeOptimizer::LastIsValid() const {
return last_.bytecode() != Bytecode::kIllegal;
// override
void BytecodePeepholeOptimizer::Write(BytecodeNode* node) {
node = OptimizeAndEmitLast(node);
if (node != nullptr) {
SetLast(node);
}
}
void BytecodePeepholeOptimizer::SetLast(const BytecodeNode* const node) {
last_.Clone(node);
last_is_discardable_ = true;
// override
void BytecodePeepholeOptimizer::WriteJump(BytecodeNode* node,
BytecodeLabel* label) {
node = OptimizeAndEmitLast(node);
next_stage_->WriteJump(node, label);
}
// override
size_t BytecodePeepholeOptimizer::FlushForOffset() {
size_t buffered_size = next_stage_->FlushForOffset();
if (LastIsValid()) {
if (last_.bytecode() == Bytecode::kNop &&
!last_.source_info().is_statement()) {
// The Nop can be dropped as it doesn't have a statement
// position for the debugger and doesn't have any effects by
// definition.
InvalidateLast();
} else {
buffered_size += last_.Size();
last_is_discardable_ = false;
}
}
return buffered_size;
void BytecodePeepholeOptimizer::BindLabel(BytecodeLabel* label) {
Flush();
next_stage_->BindLabel(label);
}
// override
void BytecodePeepholeOptimizer::FlushBasicBlock() {
if (LastIsValid()) {
next_stage_->Write(&last_);
InvalidateLast();
}
next_stage_->FlushBasicBlock();
void BytecodePeepholeOptimizer::BindLabel(const BytecodeLabel& target,
BytecodeLabel* label) {
// There is no need to flush here, it will have been flushed when |target|
// was bound.
next_stage_->BindLabel(target, label);
}
// override
void BytecodePeepholeOptimizer::Write(BytecodeNode* node) {
// Attempt optimization if there is an earlier node to optimize with.
if (LastIsValid()) {
node = Optimize(node);
// Only output the last node if it wasn't invalidated by the optimization.
void BytecodePeepholeOptimizer::Flush() {
// TODO(oth/rmcilroy): We could check CanElideLast() here to potentially
// eliminate last rather than writing it.
if (LastIsValid()) {
next_stage_->Write(&last_);
InvalidateLast();
}
}
}
if (node != nullptr) {
SetLast(node);
}
void BytecodePeepholeOptimizer::InvalidateLast() {
last_.set_bytecode(Bytecode::kIllegal);
}
bool BytecodePeepholeOptimizer::LastIsValid() const {
return last_.bytecode() != Bytecode::kIllegal;
}
void BytecodePeepholeOptimizer::SetLast(const BytecodeNode* const node) {
last_.Clone(node);
}
Handle<Object> BytecodePeepholeOptimizer::GetConstantForIndexOperand(
......@@ -260,10 +260,6 @@ bool BytecodePeepholeOptimizer::TransformLastAndCurrentBytecodes(
bool BytecodePeepholeOptimizer::CanElideLast(
const BytecodeNode* const current) const {
if (!last_is_discardable_) {
return false;
}
if (last_.bytecode() == Bytecode::kNop) {
// Nop are placeholders for holding source position information.
return true;
......@@ -311,6 +307,20 @@ BytecodeNode* BytecodePeepholeOptimizer::Optimize(BytecodeNode* current) {
return current;
}
BytecodeNode* BytecodePeepholeOptimizer::OptimizeAndEmitLast(
BytecodeNode* current) {
// Attempt optimization if there is an earlier node to optimize with.
if (LastIsValid()) {
current = Optimize(current);
// Only output the last node if it wasn't invalidated by the optimization.
if (LastIsValid()) {
next_stage_->Write(&last_);
InvalidateLast();
}
}
return current;
}
} // namespace interpreter
} // namespace internal
} // namespace v8
......@@ -24,11 +24,17 @@ class BytecodePeepholeOptimizer final : public BytecodePipelineStage,
// BytecodePipelineStage interface.
void Write(BytecodeNode* node) override;
size_t FlushForOffset() override;
void FlushBasicBlock() override;
void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
void BindLabel(BytecodeLabel* label) override;
void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
Handle<BytecodeArray> ToBytecodeArray(
int fixed_register_count, int parameter_count,
Handle<FixedArray> handler_table) override;
private:
BytecodeNode* OptimizeAndEmitLast(BytecodeNode* current);
BytecodeNode* Optimize(BytecodeNode* current);
void Flush();
void TryToRemoveLastExpressionPosition(const BytecodeNode* const current);
bool TransformLastAndCurrentBytecodes(BytecodeNode* const current);
......@@ -54,7 +60,6 @@ class BytecodePeepholeOptimizer final : public BytecodePipelineStage,
ConstantArrayBuilder* constant_array_builder_;
BytecodePipelineStage* next_stage_;
BytecodeNode last_;
bool last_is_discardable_;
DISALLOW_COPY_AND_ASSIGN(BytecodePeepholeOptimizer);
};
......
......@@ -13,6 +13,7 @@ namespace v8 {
namespace internal {
namespace interpreter {
class BytecodeLabel;
class BytecodeNode;
class BytecodeSourceInfo;
......@@ -26,12 +27,26 @@ class BytecodePipelineStage {
// deferring Write() to the next stage.
virtual void Write(BytecodeNode* node) = 0;
// Flush state for bytecode array offset calculation. Returns the
// current size of bytecode array.
virtual size_t FlushForOffset() = 0;
// Flush state to terminate basic block.
virtual void FlushBasicBlock() = 0;
// Write jump bytecode node |node| which jumps to |label| into pipeline.
// The node and label are only valid for the duration of the call. This call
// implicitly ends the current basic block so should always write to the next
// stage.
virtual void WriteJump(BytecodeNode* node, BytecodeLabel* label) = 0;
// Binds |label| to the current bytecode location. This call implicitly
// ends the current basic block and so any deferred bytecodes should be
// written to the next stage.
virtual void BindLabel(BytecodeLabel* label) = 0;
// Binds |label| to the location of |target|. This call implicitly
// ends the current basic block and so any deferred bytecodes should be
// written to the next stage.
virtual void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) = 0;
// Flush the pipeline and generate a bytecode array.
virtual Handle<BytecodeArray> ToBytecodeArray(
int fixed_register_count, int parameter_count,
Handle<FixedArray> handler_table) = 0;
};
// Source code position information.
......
......@@ -198,44 +198,13 @@ BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
DCHECK(accumulator_info_->register_value() == accumulator_);
}
void BytecodeRegisterOptimizer::FlushState() {
if (flushed_) {
return;
}
// Materialize all live registers.
size_t count = register_info_table_.size();
for (size_t i = 0; i < count; ++i) {
RegisterInfo* reg_info = register_info_table_[i];
if (!reg_info->IsOnlyMemberOfEquivalenceSet() &&
!reg_info->materialized()) {
DCHECK(RegisterIsTemporary(reg_info->register_value()) ||
reg_info->register_value() == accumulator_);
Materialize(reg_info);
}
}
// Break all existing equivalences.
for (size_t i = 0; i < count; ++i) {
RegisterInfo* reg_info = register_info_table_[i];
if (!reg_info->IsOnlyMemberOfEquivalenceSet()) {
reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
}
}
flushed_ = true;
}
// override
void BytecodeRegisterOptimizer::FlushBasicBlock() {
FlushState();
next_stage_->FlushBasicBlock();
}
// override
size_t BytecodeRegisterOptimizer::FlushForOffset() {
Handle<BytecodeArray> BytecodeRegisterOptimizer::ToBytecodeArray(
int fixed_register_count, int parameter_count,
Handle<FixedArray> handler_table) {
FlushState();
return next_stage_->FlushForOffset();
return next_stage_->ToBytecodeArray(fixed_register_count, parameter_count,
handler_table);
}
// override
......@@ -283,6 +252,55 @@ void BytecodeRegisterOptimizer::Write(BytecodeNode* node) {
WriteToNextStage(node);
}
// override
void BytecodeRegisterOptimizer::WriteJump(BytecodeNode* node,
BytecodeLabel* label) {
FlushState();
next_stage_->WriteJump(node, label);
}
// override
void BytecodeRegisterOptimizer::BindLabel(BytecodeLabel* label) {
FlushState();
next_stage_->BindLabel(label);
}
// override
void BytecodeRegisterOptimizer::BindLabel(const BytecodeLabel& target,
BytecodeLabel* label) {
// There is no need to flush here, it will have been flushed when |target|
// was bound.
next_stage_->BindLabel(target, label);
}
void BytecodeRegisterOptimizer::FlushState() {
if (flushed_) {
return;
}
// Materialize all live registers.
size_t count = register_info_table_.size();
for (size_t i = 0; i < count; ++i) {
RegisterInfo* reg_info = register_info_table_[i];
if (!reg_info->IsOnlyMemberOfEquivalenceSet() &&
!reg_info->materialized()) {
DCHECK(RegisterIsTemporary(reg_info->register_value()) ||
reg_info->register_value() == accumulator_);
Materialize(reg_info);
}
}
// Break all existing equivalences.
for (size_t i = 0; i < count; ++i) {
RegisterInfo* reg_info = register_info_table_[i];
if (!reg_info->IsOnlyMemberOfEquivalenceSet()) {
reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
}
}
flushed_ = true;
}
void BytecodeRegisterOptimizer::WriteToNextStage(BytecodeNode* node) const {
next_stage_->Write(node);
}
......
......@@ -26,9 +26,13 @@ class BytecodeRegisterOptimizer final : public BytecodePipelineStage,
virtual ~BytecodeRegisterOptimizer() {}
// BytecodePipelineStage interface.
size_t FlushForOffset() override;
void FlushBasicBlock() override;
void Write(BytecodeNode* node) override;
void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
void BindLabel(BytecodeLabel* label) override;
void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
Handle<BytecodeArray> ToBytecodeArray(
int fixed_register_count, int parameter_count,
Handle<FixedArray> handler_table) override;
private:
static const uint32_t kInvalidEquivalenceId = kMaxUInt32;
......
......@@ -7,6 +7,7 @@
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-label.h"
#include "src/zone-containers.h"
namespace v8 {
......
......@@ -902,6 +902,7 @@
'interpreter/bytecode-array-iterator.h',
'interpreter/bytecode-array-writer.cc',
'interpreter/bytecode-array-writer.h',
'interpreter/bytecode-label.h',
'interpreter/bytecode-peephole-optimizer.cc',
'interpreter/bytecode-peephole-optimizer.h',
'interpreter/bytecode-pipeline.cc',
......
......@@ -8,6 +8,7 @@
#include "src/handles.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/interpreter.h"
#include "test/cctest/cctest.h"
#include "test/cctest/interpreter/interpreter-tester.h"
......
......@@ -6,6 +6,7 @@
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "test/unittests/test-utils.h"
......@@ -277,6 +278,19 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.StoreLookupSlot(wide_name, LanguageMode::SLOPPY)
.StoreLookupSlot(wide_name, LanguageMode::STRICT);
// Emit loads which will be transformed to Ldr equivalents by the peephole
// optimizer.
builder.LoadNamedProperty(reg, name, 0)
.StoreAccumulatorInRegister(reg)
.LoadKeyedProperty(reg, 0)
.StoreAccumulatorInRegister(reg)
.LoadContextSlot(reg, 1)
.StoreAccumulatorInRegister(reg)
.LoadGlobal(name, 0, TypeofMode::NOT_INSIDE_TYPEOF)
.StoreAccumulatorInRegister(reg)
.LoadUndefined()
.StoreAccumulatorInRegister(reg);
// CreateClosureWide
Handle<SharedFunctionInfo> shared_info2 = factory->NewSharedFunctionInfo(
factory->NewStringFromStaticChars("function_b"), MaybeHandle<Code>(),
......@@ -352,12 +366,20 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Insert entry for nop bytecode as this often gets optimized out.
scorecard[Bytecodes::ToByte(Bytecode::kNop)] = 1;
// Insert entries for bytecodes only emiited by peephole optimizer.
if (!FLAG_ignition_peephole) {
// Insert entries for bytecodes only emitted by peephole optimizer.
scorecard[Bytecodes::ToByte(Bytecode::kLdrNamedProperty)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kLdrKeyedProperty)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kLdrGlobal)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kLdrContextSlot)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kLdrUndefined)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kLogicalNot)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kJump)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kJumpIfTrue)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kJumpIfFalse)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kJumpIfTrueConstant)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kJumpIfFalseConstant)] = 1;
}
// Check return occurs at the end and only once in the BytecodeArray.
CHECK_EQ(final_bytecode, Bytecode::kReturn);
......@@ -470,6 +492,11 @@ TEST_F(BytecodeArrayBuilderTest, Constants) {
CHECK_EQ(array->constant_pool()->length(), 3);
}
static Bytecode PeepholeToBoolean(Bytecode jump_bytecode) {
return FLAG_ignition_peephole
? Bytecodes::GetJumpWithoutToBoolean(jump_bytecode)
: jump_bytecode;
}
TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
static const int kFarJumpDistance = 256;
......@@ -520,14 +547,16 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
// Ignore compare operation.
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrue);
CHECK_EQ(iterator.current_bytecode(),
PeepholeToBoolean(Bytecode::kJumpIfToBooleanTrue));
CHECK_EQ(iterator.GetImmediateOperand(0), 14);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalse);
CHECK_EQ(iterator.current_bytecode(),
PeepholeToBoolean(Bytecode::kJumpIfToBooleanFalse));
CHECK_EQ(iterator.GetImmediateOperand(0), 10);
iterator.Advance();
......@@ -553,7 +582,8 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
// Ignore compare operation.
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrueConstant);
CHECK_EQ(iterator.current_bytecode(),
PeepholeToBoolean(Bytecode::kJumpIfToBooleanTrueConstant));
CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
Smi::FromInt(kFarJumpDistance - 4));
iterator.Advance();
......@@ -561,7 +591,8 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
// Ignore compare operation.
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalseConstant);
CHECK_EQ(iterator.current_bytecode(),
PeepholeToBoolean(Bytecode::kJumpIfToBooleanFalseConstant));
CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
Smi::FromInt(kFarJumpDistance - 8));
iterator.Advance();
......@@ -628,13 +659,15 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrue);
CHECK_EQ(iterator.current_bytecode(),
PeepholeToBoolean(Bytecode::kJumpIfToBooleanTrue));
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetImmediateOperand(0), -2);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalse);
CHECK_EQ(iterator.current_bytecode(),
PeepholeToBoolean(Bytecode::kJumpIfToBooleanFalse));
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetImmediateOperand(0), -2);
iterator.Advance();
......@@ -675,13 +708,15 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalse);
CHECK_EQ(iterator.current_bytecode(),
PeepholeToBoolean(Bytecode::kJumpIfToBooleanFalse));
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
CHECK_EQ(iterator.GetImmediateOperand(0), -409);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrue);
CHECK_EQ(iterator.current_bytecode(),
PeepholeToBoolean(Bytecode::kJumpIfToBooleanTrue));
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
CHECK_EQ(iterator.GetImmediateOperand(0), -419);
iterator.Advance();
......
......@@ -4,7 +4,11 @@
#include "src/v8.h"
#include "src/api.h"
#include "src/factory.h"
#include "src/interpreter/bytecode-array-writer.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/constant-array-builder.h"
#include "src/interpreter/source-position-table.h"
#include "src/isolate.h"
#include "src/utils.h"
......@@ -18,8 +22,8 @@ namespace interpreter {
class BytecodeArrayWriterUnittest : public TestWithIsolateAndZone {
public:
BytecodeArrayWriterUnittest()
: source_position_builder_(isolate(), zone()),
bytecode_array_writer_(zone(), &source_position_builder_) {}
: constant_array_builder_(isolate(), zone()),
bytecode_array_writer_(isolate(), zone(), &constant_array_builder_) {}
~BytecodeArrayWriterUnittest() override {}
void Write(BytecodeNode* node, const BytecodeSourceInfo& info);
......@@ -37,13 +41,19 @@ class BytecodeArrayWriterUnittest : public TestWithIsolateAndZone {
uint32_t operand2, uint32_t operand3, OperandScale operand_scale,
const BytecodeSourceInfo& info = BytecodeSourceInfo());
SourcePositionTableBuilder* source_position_builder() {
return &source_position_builder_;
}
void WriteJump(Bytecode bytecode, BytecodeLabel* label,
OperandScale operand_scale,
const BytecodeSourceInfo& info = BytecodeSourceInfo());
BytecodeArrayWriter* writer() { return &bytecode_array_writer_; }
ZoneVector<unsigned char>* bytecodes() { return writer()->bytecodes(); }
SourcePositionTableBuilder* source_position_table_builder() {
return writer()->source_position_table_builder();
}
int max_register_count() { return writer()->max_register_count(); }
private:
SourcePositionTableBuilder source_position_builder_;
ConstantArrayBuilder constant_array_builder_;
BytecodeArrayWriter bytecode_array_writer_;
};
......@@ -94,40 +104,50 @@ void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
Write(&node, info);
}
void BytecodeArrayWriterUnittest::WriteJump(Bytecode bytecode,
BytecodeLabel* label,
OperandScale operand_scale,
const BytecodeSourceInfo& info) {
BytecodeNode node(bytecode, 0, operand_scale);
if (info.is_valid()) {
node.source_info().Update(info);
}
writer()->WriteJump(&node, label);
}
TEST_F(BytecodeArrayWriterUnittest, SimpleExample) {
CHECK_EQ(writer()->bytecodes()->size(), 0);
CHECK_EQ(bytecodes()->size(), 0);
Write(Bytecode::kStackCheck, {10, false});
CHECK_EQ(writer()->bytecodes()->size(), 1);
CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 0);
CHECK_EQ(bytecodes()->size(), 1);
CHECK_EQ(max_register_count(), 0);
Write(Bytecode::kLdaSmi, 0xff, OperandScale::kSingle, {55, true});
CHECK_EQ(writer()->bytecodes()->size(), 3);
CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 0);
CHECK_EQ(bytecodes()->size(), 3);
CHECK_EQ(max_register_count(), 0);
Write(Bytecode::kLdar, Register(1).ToOperand(), OperandScale::kDouble);
CHECK_EQ(writer()->bytecodes()->size(), 7);
CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 2 * kPointerSize);
CHECK_EQ(bytecodes()->size(), 7);
CHECK_EQ(max_register_count(), 2);
Write(Bytecode::kReturn, {70, true});
CHECK_EQ(writer()->bytecodes()->size(), 8);
CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 2 * kPointerSize);
CHECK_EQ(bytecodes()->size(), 8);
CHECK_EQ(max_register_count(), 2);
static const uint8_t bytes[] = {B(StackCheck), B(LdaSmi), U8(0xff), B(Wide),
B(Ldar), R16(1), B(Return)};
CHECK_EQ(writer()->bytecodes()->size(), arraysize(bytes));
CHECK_EQ(bytecodes()->size(), arraysize(bytes));
for (size_t i = 0; i < arraysize(bytes); ++i) {
CHECK_EQ(writer()->bytecodes()->at(i), bytes[i]);
CHECK_EQ(bytecodes()->at(i), bytes[i]);
}
CHECK_EQ(writer()->FlushForOffset(), arraysize(bytes));
writer()->FlushBasicBlock();
CHECK_EQ(writer()->bytecodes()->size(), arraysize(bytes));
writer()->ToBytecodeArray(0, 0, factory()->empty_fixed_array());
CHECK_EQ(bytecodes()->size(), arraysize(bytes));
PositionTableEntry expected_positions[] = {
{0, 10, false}, {1, 55, true}, {7, 70, true}};
Handle<ByteArray> source_positions =
source_position_builder()->ToSourcePositionTable();
source_position_table_builder()->ToSourcePositionTable();
SourcePositionTableIterator source_iterator(*source_positions);
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
const PositionTableEntry& expected = expected_positions[i];
......@@ -173,50 +193,58 @@ TEST_F(BytecodeArrayWriterUnittest, ComplexExample) {
{0, 30, false}, {1, 42, true}, {3, 42, false}, {5, 68, true},
{17, 63, true}, {31, 54, false}, {36, 85, true}, {44, 85, true}};
BytecodeLabel back_jump, jump_for_in, jump_end_1, jump_end_2, jump_end_3;
#define R(i) static_cast<uint32_t>(Register(i).ToOperand())
Write(Bytecode::kStackCheck, {30, false});
Write(Bytecode::kLdaConstant, U8(0), OperandScale::kSingle, {42, true});
CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 0 * kPointerSize);
CHECK_EQ(max_register_count(), 0);
Write(Bytecode::kStar, R(1), OperandScale::kSingle, {42, false});
CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 2 * kPointerSize);
Write(Bytecode::kJumpIfUndefined, U8(38), OperandScale::kSingle, {68, true});
Write(Bytecode::kJumpIfNull, U8(36), OperandScale::kSingle);
CHECK_EQ(max_register_count(), 2);
WriteJump(Bytecode::kJumpIfUndefined, &jump_end_1, OperandScale::kSingle,
{68, true});
WriteJump(Bytecode::kJumpIfNull, &jump_end_2, OperandScale::kSingle);
Write(Bytecode::kToObject);
CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 2 * kPointerSize);
CHECK_EQ(max_register_count(), 2);
Write(Bytecode::kStar, R(3), OperandScale::kSingle);
CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 4 * kPointerSize);
CHECK_EQ(max_register_count(), 4);
Write(Bytecode::kForInPrepare, R(4), OperandScale::kSingle);
CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 7 * kPointerSize);
CHECK_EQ(max_register_count(), 7);
Write(Bytecode::kLdaZero);
CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 7 * kPointerSize);
CHECK_EQ(max_register_count(), 7);
Write(Bytecode::kStar, R(7), OperandScale::kSingle);
CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 8 * kPointerSize);
CHECK_EQ(max_register_count(), 8);
writer()->BindLabel(&back_jump);
Write(Bytecode::kForInDone, R(7), R(6), OperandScale::kSingle, {63, true});
CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 8 * kPointerSize);
Write(Bytecode::kJumpIfTrue, U8(23), OperandScale::kSingle);
CHECK_EQ(max_register_count(), 8);
WriteJump(Bytecode::kJumpIfTrue, &jump_end_3, OperandScale::kSingle);
Write(Bytecode::kForInNext, R(3), R(7), R(4), U8(1), OperandScale::kSingle);
Write(Bytecode::kJumpIfUndefined, U8(10), OperandScale::kSingle);
WriteJump(Bytecode::kJumpIfUndefined, &jump_for_in, OperandScale::kSingle);
Write(Bytecode::kStar, R(0), OperandScale::kSingle);
Write(Bytecode::kStackCheck, {54, false});
Write(Bytecode::kLdar, R(0), OperandScale::kSingle);
Write(Bytecode::kStar, R(2), OperandScale::kSingle);
Write(Bytecode::kReturn, {85, true});
writer()->BindLabel(&jump_for_in);
Write(Bytecode::kForInStep, R(7), OperandScale::kSingle);
Write(Bytecode::kStar, R(7), OperandScale::kSingle);
Write(Bytecode::kJump, U8(-24), OperandScale::kSingle);
WriteJump(Bytecode::kJump, &back_jump, OperandScale::kSingle);
writer()->BindLabel(&jump_end_1);
writer()->BindLabel(&jump_end_2);
writer()->BindLabel(&jump_end_3);
Write(Bytecode::kLdaUndefined);
Write(Bytecode::kReturn, {85, true});
CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 8 * kPointerSize);
CHECK_EQ(max_register_count(), 8);
#undef R
CHECK_EQ(writer()->bytecodes()->size(), arraysize(expected_bytes));
CHECK_EQ(bytecodes()->size(), arraysize(expected_bytes));
for (size_t i = 0; i < arraysize(expected_bytes); ++i) {
CHECK_EQ(static_cast<int>(writer()->bytecodes()->at(i)),
CHECK_EQ(static_cast<int>(bytecodes()->at(i)),
static_cast<int>(expected_bytes[i]));
}
Handle<ByteArray> source_positions =
source_position_builder()->ToSourcePositionTable();
source_position_table_builder()->ToSourcePositionTable();
SourcePositionTableIterator source_iterator(*source_positions);
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
const PositionTableEntry& expected = expected_positions[i];
......
......@@ -5,6 +5,7 @@
#include "src/v8.h"
#include "src/factory.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-optimizer.h"
#include "src/objects-inl.h"
#include "src/objects.h"
......@@ -27,14 +28,17 @@ class BytecodeRegisterOptimizerTest : public BytecodePipelineStage,
zone(), register_allocator_, number_of_parameters, this);
}
size_t FlushForOffset() override {
flush_for_offset_count_++;
return 0;
};
void FlushBasicBlock() override { flush_basic_block_count_++; }
void Write(BytecodeNode* node) override { output_.push_back(*node); }
void WriteJump(BytecodeNode* node, BytecodeLabel* label) override {
output_.push_back(*node);
}
void BindLabel(BytecodeLabel* label) override {}
void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override {}
Handle<BytecodeArray> ToBytecodeArray(
int fixed_register_count, int parameter_count,
Handle<FixedArray> handle_table) override {
return Handle<BytecodeArray>();
}
TemporaryRegisterAllocator* allocator() { return register_allocator_; }
BytecodeRegisterOptimizer* optimizer() { return register_optimizer_; }
......@@ -47,8 +51,6 @@ class BytecodeRegisterOptimizerTest : public BytecodePipelineStage,
allocator()->ReturnTemporaryRegister(reg.index());
}
int flush_for_offset_count() const { return flush_for_offset_count_; }
int flush_basic_block_count() const { return flush_basic_block_count_; }
size_t write_count() const { return output_.size(); }
const BytecodeNode& last_written() const { return output_.back(); }
const std::vector<BytecodeNode>* output() { return &output_; }
......@@ -57,76 +59,65 @@ class BytecodeRegisterOptimizerTest : public BytecodePipelineStage,
TemporaryRegisterAllocator* register_allocator_;
BytecodeRegisterOptimizer* register_optimizer_;
int flush_for_offset_count_ = 0;
int flush_basic_block_count_ = 0;
std::vector<BytecodeNode> output_;
};
// Sanity tests.
TEST_F(BytecodeRegisterOptimizerTest, FlushForOffsetPassThrough) {
Initialize(1, 1);
CHECK_EQ(flush_for_offset_count(), 0);
CHECK_EQ(optimizer()->FlushForOffset(), 0);
CHECK_EQ(flush_for_offset_count(), 1);
}
TEST_F(BytecodeRegisterOptimizerTest, FlushForOffsetRightSize) {
Initialize(1, 1);
BytecodeNode node(Bytecode::kAdd, Register(0).ToOperand(),
OperandScale::kQuadruple);
optimizer()->Write(&node);
CHECK_EQ(optimizer()->FlushForOffset(), 0);
CHECK_EQ(flush_for_offset_count(), 1);
CHECK_EQ(write_count(), 1);
}
TEST_F(BytecodeRegisterOptimizerTest, FlushForOffsetNop) {
TEST_F(BytecodeRegisterOptimizerTest, WriteNop) {
Initialize(1, 1);
BytecodeNode node(Bytecode::kNop);
optimizer()->Write(&node);
CHECK_EQ(optimizer()->FlushForOffset(), 0);
CHECK_EQ(flush_for_offset_count(), 1);
CHECK_EQ(write_count(), 1);
CHECK_EQ(node, last_written());
}
TEST_F(BytecodeRegisterOptimizerTest, FlushForOffsetNopExpression) {
TEST_F(BytecodeRegisterOptimizerTest, WriteNopExpression) {
Initialize(1, 1);
BytecodeNode node(Bytecode::kNop);
node.source_info().Update({3, false});
optimizer()->Write(&node);
CHECK_EQ(optimizer()->FlushForOffset(), 0);
CHECK_EQ(flush_for_offset_count(), 1);
CHECK_EQ(write_count(), 1);
CHECK_EQ(node, last_written());
}
TEST_F(BytecodeRegisterOptimizerTest, FlushForOffsetNopStatement) {
TEST_F(BytecodeRegisterOptimizerTest, WriteNopStatement) {
Initialize(1, 1);
BytecodeNode node(Bytecode::kNop);
node.source_info().Update({3, true});
optimizer()->Write(&node);
CHECK_EQ(optimizer()->FlushForOffset(), 0);
CHECK_EQ(flush_for_offset_count(), 1);
CHECK_EQ(write_count(), 1);
CHECK_EQ(node, last_written());
}
TEST_F(BytecodeRegisterOptimizerTest, FlushBasicBlockPassThrough) {
TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForJump) {
Initialize(1, 1);
CHECK_EQ(flush_basic_block_count(), 0);
optimizer()->FlushBasicBlock();
CHECK_EQ(flush_basic_block_count(), 1);
Register temp = NewTemporary();
BytecodeNode node(Bytecode::kStar, temp.ToOperand(), OperandScale::kSingle);
optimizer()->Write(&node);
CHECK_EQ(write_count(), 0);
BytecodeLabel label;
BytecodeNode jump(Bytecode::kJump, 0, OperandScale::kSingle);
optimizer()->WriteJump(&jump, &label);
CHECK_EQ(write_count(), 2);
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
CHECK_EQ(output()->at(0).operand(0), temp.ToOperand());
CHECK_EQ(output()->at(0).operand_scale(), OperandScale::kSingle);
CHECK_EQ(output()->at(1).bytecode(), Bytecode::kJump);
}
TEST_F(BytecodeRegisterOptimizerTest, WriteOneFlushBasicBlock) {
TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForBind) {
Initialize(1, 1);
BytecodeNode node(Bytecode::kAdd, Register(0).ToOperand(),
OperandScale::kQuadruple);
Register temp = NewTemporary();
BytecodeNode node(Bytecode::kStar, temp.ToOperand(), OperandScale::kSingle);
optimizer()->Write(&node);
CHECK_EQ(write_count(), 0);
BytecodeLabel label;
optimizer()->BindLabel(&label);
CHECK_EQ(write_count(), 1);
optimizer()->FlushBasicBlock();
CHECK_EQ(write_count(), 1);
CHECK_EQ(node, last_written());
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
CHECK_EQ(output()->at(0).operand(0), temp.ToOperand());
CHECK_EQ(output()->at(0).operand_scale(), OperandScale::kSingle);
}
// Basic Register Optimizations
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment