Commit c6414dac authored by Leszek Swirski's avatar Leszek Swirski Committed by Commit Bot

Revert "[ignition] Merge bytecode array builder and writer"

This reverts commit 87f71769.

Reason for revert: Performance regressions https://chromeperf.appspot.com/group_report?rev=46185

Original change's description:
> [ignition] Merge bytecode array builder and writer
> 
> Move bytecode array writing logic into the array builder, allowing us to
> remove the bytecode array writer and bytecode node, and convert runtime
> operand writing to compile-time bytecode operand writing using the
> information statically known at compile time.
> 
> Bug: v8:6474
> Change-Id: I210cd9897fd41293745614e4a253c7c251dfffc9
> Reviewed-on: https://chromium-review.googlesource.com/533055
> Commit-Queue: Leszek Swirski <leszeks@chromium.org>
> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#46183}

TBR=rmcilroy@chromium.org,leszeks@chromium.org

# Not skipping CQ checks because original CL landed > 1 day ago.

Bug: v8:6474
Bug: chromium:736646
Change-Id: I00287b2bbbb8efa5a3141bc9c2906f91a7d33e51
Reviewed-on: https://chromium-review.googlesource.com/549319Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46235}
parent bfad25a8
...@@ -1646,6 +1646,8 @@ v8_source_set("v8_base") { ...@@ -1646,6 +1646,8 @@ v8_source_set("v8_base") {
"src/interpreter/bytecode-array-iterator.h", "src/interpreter/bytecode-array-iterator.h",
"src/interpreter/bytecode-array-random-iterator.cc", "src/interpreter/bytecode-array-random-iterator.cc",
"src/interpreter/bytecode-array-random-iterator.h", "src/interpreter/bytecode-array-random-iterator.h",
"src/interpreter/bytecode-array-writer.cc",
"src/interpreter/bytecode-array-writer.h",
"src/interpreter/bytecode-decoder.cc", "src/interpreter/bytecode-decoder.cc",
"src/interpreter/bytecode-decoder.h", "src/interpreter/bytecode-decoder.h",
"src/interpreter/bytecode-flags.cc", "src/interpreter/bytecode-flags.cc",
...@@ -1655,6 +1657,8 @@ v8_source_set("v8_base") { ...@@ -1655,6 +1657,8 @@ v8_source_set("v8_base") {
"src/interpreter/bytecode-jump-table.h", "src/interpreter/bytecode-jump-table.h",
"src/interpreter/bytecode-label.cc", "src/interpreter/bytecode-label.cc",
"src/interpreter/bytecode-label.h", "src/interpreter/bytecode-label.h",
"src/interpreter/bytecode-node.cc",
"src/interpreter/bytecode-node.h",
"src/interpreter/bytecode-operands.cc", "src/interpreter/bytecode-operands.cc",
"src/interpreter/bytecode-operands.h", "src/interpreter/bytecode-operands.h",
"src/interpreter/bytecode-register-allocator.h", "src/interpreter/bytecode-register-allocator.h",
......
...@@ -5,11 +5,12 @@ ...@@ -5,11 +5,12 @@
#include "src/interpreter/bytecode-array-builder.h" #include "src/interpreter/bytecode-array-builder.h"
#include "src/globals.h" #include "src/globals.h"
#include "src/interpreter/bytecode-array-writer.h"
#include "src/interpreter/bytecode-jump-table.h" #include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h" #include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-node.h"
#include "src/interpreter/bytecode-register-optimizer.h" #include "src/interpreter/bytecode-register-optimizer.h"
#include "src/interpreter/bytecode-source-info.h" #include "src/interpreter/bytecode-source-info.h"
#include "src/interpreter/bytecode-traits.h"
#include "src/interpreter/interpreter-intrinsics.h" #include "src/interpreter/interpreter-intrinsics.h"
#include "src/objects-inl.h" #include "src/objects-inl.h"
...@@ -41,29 +42,17 @@ BytecodeArrayBuilder::BytecodeArrayBuilder( ...@@ -41,29 +42,17 @@ BytecodeArrayBuilder::BytecodeArrayBuilder(
FunctionLiteral* literal, FunctionLiteral* literal,
SourcePositionTableBuilder::RecordingMode source_position_mode) SourcePositionTableBuilder::RecordingMode source_position_mode)
: zone_(zone), : zone_(zone),
bytecodes_(zone),
literal_(literal), literal_(literal),
bytecode_generated_(false),
constant_array_builder_(zone), constant_array_builder_(zone),
handler_table_builder_(zone), handler_table_builder_(zone),
source_position_table_builder_(zone, source_position_mode), return_seen_in_block_(false),
register_allocator_(locals_count),
register_optimizer_(nullptr),
parameter_count_(parameter_count), parameter_count_(parameter_count),
local_register_count_(locals_count), local_register_count_(locals_count),
return_position_(literal ? literal->return_position() register_allocator_(fixed_register_count()),
: kNoSourcePosition), bytecode_array_writer_(zone, &constant_array_builder_,
unbound_jumps_(0), source_position_mode),
register_optimizer_(nullptr) {
bytecode_generated_(false),
elide_noneffectful_bytecodes_(FLAG_ignition_elide_noneffectful_bytecodes),
exit_seen_in_block_(false),
last_bytecode_had_source_info_(false),
last_bytecode_offset_(0),
last_bytecode_(Bytecode::kIllegal) {
DCHECK_GE(parameter_count_, 0); DCHECK_GE(parameter_count_, 0);
DCHECK_GE(local_register_count_, 0); DCHECK_GE(local_register_count_, 0);
...@@ -72,6 +61,8 @@ BytecodeArrayBuilder::BytecodeArrayBuilder( ...@@ -72,6 +61,8 @@ BytecodeArrayBuilder::BytecodeArrayBuilder(
zone, &register_allocator_, fixed_register_count(), parameter_count, zone, &register_allocator_, fixed_register_count(), parameter_count,
new (zone) RegisterTransferWriter(this)); new (zone) RegisterTransferWriter(this));
} }
return_position_ = literal ? literal->return_position() : kNoSourcePosition;
} }
Register BytecodeArrayBuilder::Parameter(int parameter_index) const { Register BytecodeArrayBuilder::Parameter(int parameter_index) const {
...@@ -92,7 +83,7 @@ Register BytecodeArrayBuilder::Local(int index) const { ...@@ -92,7 +83,7 @@ Register BytecodeArrayBuilder::Local(int index) const {
} }
Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) { Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
DCHECK(exit_seen_in_block_); DCHECK(return_seen_in_block_);
DCHECK(!bytecode_generated_); DCHECK(!bytecode_generated_);
bytecode_generated_ = true; bytecode_generated_ = true;
...@@ -103,25 +94,10 @@ Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) { ...@@ -103,25 +94,10 @@ Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
register_count = register_optimizer_->maxiumum_register_index() + 1; register_count = register_optimizer_->maxiumum_register_index() + 1;
} }
int bytecode_size = static_cast<int>(bytecodes()->size());
int frame_size = register_count * kPointerSize;
Handle<FixedArray> constant_pool =
constant_array_builder()->ToFixedArray(isolate);
Handle<BytecodeArray> bytecode_array = isolate->factory()->NewBytecodeArray(
bytecode_size, &bytecodes()->front(), frame_size, parameter_count(),
constant_pool);
Handle<FixedArray> handler_table = Handle<FixedArray> handler_table =
handler_table_builder()->ToHandlerTable(isolate); handler_table_builder()->ToHandlerTable(isolate);
bytecode_array->set_handler_table(*handler_table); return bytecode_array_writer_.ToBytecodeArray(
isolate, register_count, parameter_count(), handler_table);
Handle<ByteArray> source_position_table =
source_position_table_builder()->ToSourcePositionTable(
isolate, Handle<AbstractCode>::cast(bytecode_array));
bytecode_array->set_source_position_table(*source_position_table);
return bytecode_array;
} }
BytecodeSourceInfo BytecodeArrayBuilder::CurrentSourcePosition( BytecodeSourceInfo BytecodeArrayBuilder::CurrentSourcePosition(
...@@ -142,137 +118,63 @@ BytecodeSourceInfo BytecodeArrayBuilder::CurrentSourcePosition( ...@@ -142,137 +118,63 @@ BytecodeSourceInfo BytecodeArrayBuilder::CurrentSourcePosition(
return source_position; return source_position;
} }
void BytecodeArrayBuilder::PatchJump(size_t jump_target, size_t jump_location) { void BytecodeArrayBuilder::SetDeferredSourceInfo(
Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location)); BytecodeSourceInfo source_info) {
int delta = static_cast<int>(jump_target - jump_location); if (!source_info.is_valid()) return;
int prefix_offset = 0; if (deferred_source_info_.is_valid()) {
OperandScale operand_scale = OperandScale::kSingle; // Emit any previous deferred source info now as a nop.
if (Bytecodes::IsPrefixScalingBytecode(jump_bytecode)) { BytecodeNode node = BytecodeNode::Nop(deferred_source_info_);
// If a prefix scaling bytecode is emitted the target offset is one bytecode_array_writer_.Write(&node);
// less than the case of no prefix scaling bytecode.
delta -= 1;
prefix_offset = 1;
operand_scale = Bytecodes::PrefixBytecodeToOperandScale(jump_bytecode);
jump_bytecode =
Bytecodes::FromByte(bytecodes()->at(jump_location + prefix_offset));
}
DCHECK(Bytecodes::IsJump(jump_bytecode));
switch (operand_scale) {
case OperandScale::kSingle:
PatchJumpWith8BitOperand(jump_location, delta);
break;
case OperandScale::kDouble:
PatchJumpWith16BitOperand(jump_location + prefix_offset, delta);
break;
case OperandScale::kQuadruple:
PatchJumpWith32BitOperand(jump_location + prefix_offset, delta);
break;
default:
UNREACHABLE();
} }
unbound_jumps_--; deferred_source_info_ = source_info;
} }
void BytecodeArrayBuilder::PatchJumpWith8BitOperand(size_t jump_location, void BytecodeArrayBuilder::AttachOrEmitDeferredSourceInfo(BytecodeNode* node) {
int delta) { if (!deferred_source_info_.is_valid()) return;
Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
DCHECK(Bytecodes::IsForwardJump(jump_bytecode)); if (!node->source_info().is_valid()) {
DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode)); node->set_source_info(deferred_source_info_);
DCHECK_EQ(Bytecodes::GetOperandType(jump_bytecode, 0), OperandType::kUImm);
DCHECK_GT(delta, 0);
size_t operand_location = jump_location + 1;
DCHECK_EQ(bytecodes()->at(operand_location), k8BitJumpPlaceholder);
if (Bytecodes::ScaleForUnsignedOperand(delta) == OperandScale::kSingle) {
// The jump fits within the range of an UImm8 operand, so cancel
// the reservation and jump directly.
constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
bytecodes()->at(operand_location) = static_cast<uint8_t>(delta);
} else {
// The jump does not fit within the range of an UImm8 operand, so
// commit reservation putting the offset into the constant pool,
// and update the jump instruction and operand.
size_t entry = constant_array_builder()->CommitReservedEntry(
OperandSize::kByte, Smi::FromInt(delta));
DCHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<uint32_t>(entry)),
OperandSize::kByte);
jump_bytecode = Bytecodes::GetJumpWithConstantOperand(jump_bytecode);
bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
bytecodes()->at(operand_location) = static_cast<uint8_t>(entry);
}
}
void BytecodeArrayBuilder::PatchJumpWith16BitOperand(size_t jump_location,
int delta) {
Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
DCHECK(Bytecodes::IsForwardJump(jump_bytecode));
DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
DCHECK_EQ(Bytecodes::GetOperandType(jump_bytecode, 0), OperandType::kUImm);
DCHECK_GT(delta, 0);
size_t operand_location = jump_location + 1;
uint8_t operand_bytes[2];
if (Bytecodes::ScaleForUnsignedOperand(delta) <= OperandScale::kDouble) {
// The jump fits within the range of an Imm16 operand, so cancel
// the reservation and jump directly.
constant_array_builder()->DiscardReservedEntry(OperandSize::kShort);
WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(delta));
} else { } else {
// The jump does not fit within the range of an Imm16 operand, so BytecodeNode node = BytecodeNode::Nop(deferred_source_info_);
// commit reservation putting the offset into the constant pool, bytecode_array_writer_.Write(&node);
// and update the jump instruction and operand. }
size_t entry = constant_array_builder()->CommitReservedEntry( deferred_source_info_.set_invalid();
OperandSize::kShort, Smi::FromInt(delta)); }
jump_bytecode = Bytecodes::GetJumpWithConstantOperand(jump_bytecode);
bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode); void BytecodeArrayBuilder::Write(BytecodeNode* node) {
WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry)); AttachOrEmitDeferredSourceInfo(node);
} bytecode_array_writer_.Write(node);
DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder && }
bytecodes()->at(operand_location + 1) == k8BitJumpPlaceholder);
bytecodes()->at(operand_location++) = operand_bytes[0]; void BytecodeArrayBuilder::WriteJump(BytecodeNode* node, BytecodeLabel* label) {
bytecodes()->at(operand_location) = operand_bytes[1]; AttachOrEmitDeferredSourceInfo(node);
} bytecode_array_writer_.WriteJump(node, label);
}
void BytecodeArrayBuilder::PatchJumpWith32BitOperand(size_t jump_location,
int delta) { void BytecodeArrayBuilder::WriteSwitch(BytecodeNode* node,
DCHECK(Bytecodes::IsJumpImmediate( BytecodeJumpTable* jump_table) {
Bytecodes::FromByte(bytecodes()->at(jump_location)))); AttachOrEmitDeferredSourceInfo(node);
constant_array_builder()->DiscardReservedEntry(OperandSize::kQuad); bytecode_array_writer_.WriteSwitch(node, jump_table);
uint8_t operand_bytes[4];
WriteUnalignedUInt32(operand_bytes, static_cast<uint32_t>(delta));
size_t operand_location = jump_location + 1;
DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder &&
bytecodes()->at(operand_location + 1) == k8BitJumpPlaceholder &&
bytecodes()->at(operand_location + 2) == k8BitJumpPlaceholder &&
bytecodes()->at(operand_location + 3) == k8BitJumpPlaceholder);
bytecodes()->at(operand_location++) = operand_bytes[0];
bytecodes()->at(operand_location++) = operand_bytes[1];
bytecodes()->at(operand_location++) = operand_bytes[2];
bytecodes()->at(operand_location) = operand_bytes[3];
} }
void BytecodeArrayBuilder::OutputLdarRaw(Register reg) { void BytecodeArrayBuilder::OutputLdarRaw(Register reg) {
// Exit early for dead code. uint32_t operand = static_cast<uint32_t>(reg.ToOperand());
if (exit_seen_in_block_) return; BytecodeNode node(BytecodeNode::Ldar(BytecodeSourceInfo(), operand));
Write<AccumulatorUse::kWrite, OperandType::kReg>( Write(&node);
Bytecode::kLdar, BytecodeSourceInfo(),
{{static_cast<uint32_t>(reg.ToOperand())}});
} }
void BytecodeArrayBuilder::OutputStarRaw(Register reg) { void BytecodeArrayBuilder::OutputStarRaw(Register reg) {
// Exit early for dead code. uint32_t operand = static_cast<uint32_t>(reg.ToOperand());
if (exit_seen_in_block_) return; BytecodeNode node(BytecodeNode::Star(BytecodeSourceInfo(), operand));
Write<AccumulatorUse::kRead, OperandType::kRegOut>( Write(&node);
Bytecode::kStar, BytecodeSourceInfo(),
{{static_cast<uint32_t>(reg.ToOperand())}});
} }
void BytecodeArrayBuilder::OutputMovRaw(Register src, Register dest) { void BytecodeArrayBuilder::OutputMovRaw(Register src, Register dest) {
// Exit early for dead code. uint32_t operand0 = static_cast<uint32_t>(src.ToOperand());
if (exit_seen_in_block_) return; uint32_t operand1 = static_cast<uint32_t>(dest.ToOperand());
Write<AccumulatorUse::kNone, OperandType::kReg, OperandType::kRegOut>( BytecodeNode node(
Bytecode::kMov, BytecodeSourceInfo(), BytecodeNode::Mov(BytecodeSourceInfo(), operand0, operand1));
{{static_cast<uint32_t>(src.ToOperand()), Write(&node);
static_cast<uint32_t>(dest.ToOperand())}});
} }
namespace { namespace {
...@@ -388,418 +290,63 @@ class OperandHelper<OperandType::kRegOutTriple> { ...@@ -388,418 +290,63 @@ class OperandHelper<OperandType::kRegOutTriple> {
} }
}; };
// Recursively defined helper for operating on an array of operands (calculating } // namespace
// the maximum scale needed, and emitting them). Each recursion peels off one of
// the operand types, and increments the index we check in the value array, e.g.
// for:
//
// OperandArrayHelper<0, OperandType::kReg, OperandType::kImm>
//
// then OperandArrayHelper::ScaleForOperands is defined for std::array<uint32_t,
// 2>, and recursively calls
//
// OperandArrayHelper<1, OperandType::kImm>::ScaleForOperands()
// OperandArrayHelper<2>::ScaleForOperands()
template <size_t I, OperandType... operand_types>
struct OperandArrayHelper;
// Base case: I points past the end of the array.
template <size_t I>
struct OperandArrayHelper<I> {
private:
static const int kArraySize = I;
public:
typedef std::array<uint32_t, kArraySize> OperandArray;
static OperandScale ScaleForOperands(const OperandArray& operand_values) {
return OperandScale::kSingle;
}
template <OperandScale operand_scale>
static void Emit(ZoneVector<uint8_t>* out,
const OperandArray& operand_values) {}
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(OperandArrayHelper);
};
// Recursive case.
template <size_t I, OperandType operand_type, OperandType... operand_types>
struct OperandArrayHelper<I, operand_type, operand_types...> {
private:
// The array has had I items iterated through already, and has remaning the
// current operand type plus the remainining operand types.
static const int kArraySize = I + 1 + sizeof...(operand_types);
typedef OperandArrayHelper<I + 1, operand_types...> NextRecursion;
public:
typedef std::array<uint32_t, kArraySize> OperandArray;
static OperandScale ScaleForOperands(const OperandArray& operand_values) {
uint32_t operand_value = std::get<I>(operand_values);
OperandScale operand_scale = OperandScale::kSingle;
if (BytecodeOperands::IsScalableUnsignedByte(operand_type)) {
operand_scale = Bytecodes::ScaleForUnsignedOperand(operand_value);
} else if (BytecodeOperands::IsScalableSignedByte(operand_type)) {
operand_scale = Bytecodes::ScaleForSignedOperand(operand_value);
}
return std::max(operand_scale,
NextRecursion::ScaleForOperands(operand_values));
}
template <OperandScale operand_scale>
static void Emit(ZoneVector<uint8_t>* out,
const OperandArray& operand_values) {
uint32_t operand_value = std::get<I>(operand_values);
switch (OperandScaler<operand_type, operand_scale>::kOperandSize) {
case OperandSize::kNone:
UNREACHABLE();
break;
case OperandSize::kByte:
out->push_back(static_cast<uint8_t>(operand_value));
break;
case OperandSize::kShort: {
uint16_t operand_u16 = static_cast<uint16_t>(operand_value);
const uint8_t* raw_operand =
reinterpret_cast<const uint8_t*>(&operand_u16);
out->push_back(raw_operand[0]);
out->push_back(raw_operand[1]);
break;
}
case OperandSize::kQuad: {
const uint8_t* raw_operand =
reinterpret_cast<const uint8_t*>(&operand_value);
out->push_back(raw_operand[0]);
out->push_back(raw_operand[1]);
out->push_back(raw_operand[2]);
out->push_back(raw_operand[3]);
break;
}
}
NextRecursion::template Emit<operand_scale>(out, operand_values);
}
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(OperandArrayHelper);
};
// Helper class for building an array of integer operand values given template
// packs of operand types and generic operand values. This has to be a separate
// struct to allow us to initialize the operand_types template pack using a
// variadic macro.
template <Bytecode bytecode, AccumulatorUse accumulator_use, template <Bytecode bytecode, AccumulatorUse accumulator_use,
OperandType... operand_types> OperandType... operand_types>
class OperandArrayBuilder { class BytecodeNodeBuilder {
public: public:
typedef std::array<uint32_t, sizeof...(operand_types)> Array;
template <typename... Operands> template <typename... Operands>
INLINE(static Array Build(BytecodeArrayBuilder* builder, INLINE(static BytecodeNode Make(BytecodeArrayBuilder* builder,
Operands... operands)) { Operands... operands)) {
static_assert(sizeof...(Operands) <= Bytecodes::kMaxOperands, static_assert(sizeof...(Operands) <= Bytecodes::kMaxOperands,
"too many operands for bytecode"); "too many operands for bytecode");
static_assert(sizeof...(Operands) == sizeof...(operand_types), builder->PrepareToOutputBytecode<bytecode, accumulator_use>();
"wrong number of operands"); // The "OperandHelper<operand_types>::Convert(builder, operands)..." will
// expand both the OperandType... and Operands... parameter packs e.g. for:
// Calculate and store the converted operand values. // BytecodeNodeBuilder<OperandType::kReg, OperandType::kImm>::Make<
// // Register, int>(..., Register reg, int immediate)
// The "OperandHelper<operand_types>::Convert(builder, operands)..."
// will expand both the OperandType... and Operands... parameter packs
// e.g. for:
//
// OperandArrayBuilder<..., OperandType::kReg, OperandType::kImm>
// ::Build<Register, int>(..., Register reg, int immediate)
//
// the code will expand into: // the code will expand into:
//
// return {{
// OperandHelper<OperandType::kReg>::Convert(builder, reg), // OperandHelper<OperandType::kReg>::Convert(builder, reg),
// OperandHelper<OperandType::kImm>::Convert(builder, immediate) // OperandHelper<OperandType::kImm>::Convert(builder, immediate),
// }}; return BytecodeNode::Create<bytecode, accumulator_use, operand_types...>(
return {{OperandHelper<operand_types>::Convert(builder, operands)...}}; builder->CurrentSourcePosition(bytecode),
OperandHelper<operand_types>::Convert(builder, operands)...);
} }
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(OperandArrayBuilder);
}; };
} // namespace #define DEFINE_BYTECODE_OUTPUT(name, ...) \
void BytecodeArrayBuilder::AttachSourceInfo(
const BytecodeSourceInfo& source_info) {
if (!source_info.is_valid()) return;
int bytecode_offset = static_cast<int>(bytecodes()->size());
source_position_table_builder()->AddPosition(
bytecode_offset, SourcePosition(source_info.source_position()),
source_info.is_statement());
}
void BytecodeArrayBuilder::AttachDeferredAndCurrentSourceInfo(
BytecodeSourceInfo source_info) {
if (deferred_source_info_.is_valid()) {
if (source_info.is_valid()) {
// We need to attach the current source info to the current bytecode, so
// attach the deferred source to a nop instead.
AttachSourceInfo(deferred_source_info_);
bytecodes()->push_back(Bytecodes::ToByte(Bytecode::kNop));
} else {
if (last_bytecode_had_source_info_) {
// We've taken over an elided source info, but don't have source info
// for ourselves. Emit a nop for the elided source info, since we're
// attaching deferred source info to the current bytecode.
// TODO(leszeks): Eliding and deferring feel very similar, maybe we can
// unify them.
bytecodes()->push_back(Bytecodes::ToByte(Bytecode::kNop));
}
// Attach the deferred source info to the current bytecode.
source_info = deferred_source_info_;
}
// Either way, we can invalidate the stored deferred source info.
deferred_source_info_.set_invalid();
}
AttachSourceInfo(source_info);
// We may have decided to attach the last bytecode's source info to the
// current one, so include that decision in the builder state.
last_bytecode_had_source_info_ |= source_info.is_valid();
}
void BytecodeArrayBuilder::SetDeferredSourceInfo(
BytecodeSourceInfo source_info) {
if (!source_info.is_valid()) return;
if (deferred_source_info_.is_valid()) {
// Emit any previous deferred source info now as a nop.
AttachSourceInfo(deferred_source_info_);
bytecodes()->push_back(Bytecodes::ToByte(Bytecode::kNop));
}
deferred_source_info_ = source_info;
}
template <AccumulatorUse accumulator_use, OperandType... operand_types>
void BytecodeArrayBuilder::Write(
Bytecode bytecode, BytecodeSourceInfo source_info,
std::array<uint32_t, sizeof...(operand_types)> operand_values) {
DCHECK(!Bytecodes::IsJump(bytecode));
DCHECK(!Bytecodes::IsSwitch(bytecode));
DCHECK(!exit_seen_in_block_); // Don't emit dead code.
MaybeElideLastBytecode(bytecode, source_info.is_valid());
AttachDeferredAndCurrentSourceInfo(source_info);
EmitBytecode<operand_types...>(bytecode, operand_values);
}
template <AccumulatorUse accumulator_use, OperandType... operand_types>
void BytecodeArrayBuilder::WriteJump(
Bytecode bytecode, BytecodeSourceInfo source_info, BytecodeLabel* label,
std::array<uint32_t, sizeof...(operand_types)> operand_values) {
DCHECK(Bytecodes::IsJump(bytecode));
DCHECK_EQ(0u, operand_values[0]);
DCHECK(!exit_seen_in_block_); // Don't emit dead code.
MaybeElideLastBytecode(bytecode, source_info.is_valid());
AttachDeferredAndCurrentSourceInfo(source_info);
size_t current_offset = bytecodes()->size();
if (bytecode == Bytecode::kJumpLoop) {
// This is a backwards jump, so label has already been bound.
DCHECK(label->is_bound());
CHECK_GE(current_offset, label->offset());
CHECK_LE(current_offset, static_cast<size_t>(kMaxUInt32));
uint32_t delta = static_cast<uint32_t>(current_offset - label->offset());
OperandScale operand_scale = Bytecodes::ScaleForUnsignedOperand(delta);
if (operand_scale > OperandScale::kSingle) {
// Adjust for scaling byte prefix for wide jump offset.
delta += 1;
}
operand_values[0] = delta;
} else {
DCHECK(Bytecodes::IsForwardJump(bytecode));
DCHECK(!label->is_bound());
// The label has not yet been bound so this is a forward reference
// that will be patched when the label is bound. We create a
// reservation in the constant pool so the jump can be patched
// when the label is bound. The reservation means the maximum size
// of the operand for the constant is known and the jump can
// be emitted into the bytecode stream with space for the operand.
unbound_jumps_++;
label->set_referrer(current_offset);
OperandSize reserved_operand_size =
constant_array_builder()->CreateReservedEntry();
switch (reserved_operand_size) {
case OperandSize::kNone:
UNREACHABLE();
break;
case OperandSize::kByte:
operand_values[0] = k8BitJumpPlaceholder;
break;
case OperandSize::kShort:
operand_values[0] = k16BitJumpPlaceholder;
break;
case OperandSize::kQuad:
operand_values[0] = k32BitJumpPlaceholder;
break;
}
}
EmitBytecode<operand_types...>(bytecode, operand_values);
}
template <AccumulatorUse accumulator_use, OperandType... operand_types>
void BytecodeArrayBuilder::WriteSwitch(
Bytecode bytecode, BytecodeSourceInfo source_info,
BytecodeJumpTable* jump_table,
std::array<uint32_t, sizeof...(operand_types)> operand_values) {
DCHECK(Bytecodes::IsSwitch(bytecode));
DCHECK(!exit_seen_in_block_); // Don't emit dead code.
MaybeElideLastBytecode(bytecode, source_info.is_valid());
AttachDeferredAndCurrentSourceInfo(source_info);
size_t current_offset = bytecodes()->size();
const OperandScale operand_scale =
OperandArrayHelper<0, operand_types...>::ScaleForOperands(operand_values);
if (operand_scale > OperandScale::kSingle) {
// Adjust for scaling byte prefix.
current_offset += 1;
}
jump_table->set_switch_bytecode_offset(current_offset);
EmitBytecode<operand_types...>(bytecode, operand_values);
}
template <OperandType... operand_types>
void BytecodeArrayBuilder::EmitBytecode(
Bytecode bytecode,
std::array<uint32_t, sizeof...(operand_types)> operand_values) {
// Create a typedef for the OperandArrayHelper we'll be using for operating
// on the operands.
typedef OperandArrayHelper<0, operand_types...> OperandArrayHelper;
// Calculate the maximum scale needed by the operand values.
const OperandScale operand_scale =
OperandArrayHelper::ScaleForOperands(operand_values);
// Update the state of the builder
last_bytecode_ = bytecode;
last_bytecode_offset_ = bytecodes()->size();
exit_seen_in_block_ = Bytecodes::EndsBasicBlock(bytecode);
// Emit a prefix scaling bytecode if needed.
if (operand_scale != OperandScale::kSingle) {
Bytecode prefix = Bytecodes::OperandScaleToPrefixBytecode(operand_scale);
bytecodes()->push_back(Bytecodes::ToByte(prefix));
}
// Emit the current bytecode.
bytecodes()->push_back(Bytecodes::ToByte(bytecode));
// Emit the operands using the helper. We switch once on the operand scale
// and call OperandArrayHelper::Emit with a static operand scale, to avoid
// testing the operand scale multiple times.
switch (operand_scale) {
case OperandScale::kSingle:
OperandArrayHelper::template Emit<OperandScale::kSingle>(bytecodes(),
operand_values);
break;
case OperandScale::kDouble:
OperandArrayHelper::template Emit<OperandScale::kDouble>(bytecodes(),
operand_values);
break;
case OperandScale::kQuadruple:
OperandArrayHelper::template Emit<OperandScale::kQuadruple>(
bytecodes(), operand_values);
break;
}
}
void BytecodeArrayBuilder::MaybeElideLastBytecode(Bytecode next_bytecode,
bool has_source_info) {
if (!elide_noneffectful_bytecodes_) return;
// If the last bytecode loaded the accumulator without any external effect,
// and the next bytecode clobbers this load without reading the accumulator,
// then the previous bytecode can be elided as it has no effect.
if (Bytecodes::IsAccumulatorLoadWithoutEffects(last_bytecode_) &&
Bytecodes::GetAccumulatorUse(next_bytecode) == AccumulatorUse::kWrite) {
DCHECK_GT(bytecodes()->size(), last_bytecode_offset_);
bytecodes()->resize(last_bytecode_offset_);
if (last_bytecode_had_source_info_) {
// If we can, attach the last bytecode's source info to the current
// bytecode, otherwise emit a nop to attach it to.
if (!has_source_info) {
has_source_info = true;
} else {
bytecodes()->push_back(Bytecodes::ToByte(Bytecode::kNop));
}
}
}
// We may have decided to attach the last bytecode's source info to the
// current one, so update the builder state now.
last_bytecode_had_source_info_ = has_source_info;
}
void BytecodeArrayBuilder::InvalidateLastBytecode() {
last_bytecode_ = Bytecode::kIllegal;
}
#define DEFINE_BYTECODE_OUTPUT(Name, ...) \
\
template <typename... Operands> \ template <typename... Operands> \
void BytecodeArrayBuilder::Output##Name(Operands... operands) { \ BytecodeNode BytecodeArrayBuilder::Create##name##Node( \
PrepareToOutputBytecode<Bytecode::k##Name, __VA_ARGS__>(); \ Operands... operands) { \
/* Exit early for dead code. */ \ return BytecodeNodeBuilder<Bytecode::k##name, __VA_ARGS__>::Make( \
if (exit_seen_in_block_) return; \ this, operands...); \
BytecodeSourceInfo source_info = CurrentSourcePosition(Bytecode::k##Name); \
Write<__VA_ARGS__>( \
Bytecode::k##Name, source_info, \
OperandArrayBuilder<Bytecode::k##Name, __VA_ARGS__>::Build( \
this, operands...)); \
} \ } \
\ \
template <typename... Operands> \ template <typename... Operands> \
void BytecodeArrayBuilder::Output##Name(BytecodeLabel* label, \ void BytecodeArrayBuilder::Output##name(Operands... operands) { \
Operands... operands) { \ BytecodeNode node(Create##name##Node(operands...)); \
PrepareToOutputBytecode<Bytecode::k##Name, __VA_ARGS__>(); \ Write(&node); \
/* Exit early for dead code. */ \
if (exit_seen_in_block_) return; \
BytecodeSourceInfo source_info = CurrentSourcePosition(Bytecode::k##Name); \
WriteJump<__VA_ARGS__>( \
Bytecode::k##Name, source_info, label, \
OperandArrayBuilder<Bytecode::k##Name, __VA_ARGS__>::Build( \
this, operands...)); \
} \ } \
\ \
template <typename... Operands> \ template <typename... Operands> \
void BytecodeArrayBuilder::Output##Name(BytecodeJumpTable* jump_table, \ void BytecodeArrayBuilder::Output##name(BytecodeLabel* label, \
Operands... operands) { \ Operands... operands) { \
PrepareToOutputBytecode<Bytecode::k##Name, __VA_ARGS__>(); \ DCHECK(Bytecodes::IsJump(Bytecode::k##name)); \
/* Exit early for dead code. */ \ BytecodeNode node(Create##name##Node(operands...)); \
if (exit_seen_in_block_) return; \ WriteJump(&node, label); \
BytecodeSourceInfo source_info = CurrentSourcePosition(Bytecode::k##Name); \ LeaveBasicBlock(); \
WriteSwitch<__VA_ARGS__>( \
Bytecode::k##Name, source_info, jump_table, \
OperandArrayBuilder<Bytecode::k##Name, __VA_ARGS__>::Build( \
this, operands...)); \
} }
BYTECODE_LIST(DEFINE_BYTECODE_OUTPUT) BYTECODE_LIST(DEFINE_BYTECODE_OUTPUT)
#undef DEFINE_BYTECODE_OUTPUT #undef DEFINE_BYTECODE_OUTPUT
void BytecodeArrayBuilder::OutputSwitchOnSmiNoFeedback( void BytecodeArrayBuilder::OutputSwitchOnSmiNoFeedback(
BytecodeJumpTable* jump_table) { BytecodeJumpTable* jump_table) {
// We pass in the jump table object so that its offset can be updated, as well BytecodeNode node(CreateSwitchOnSmiNoFeedbackNode(
// as the jump table's parameters which are operands for the bytecode. jump_table->constant_pool_index(), jump_table->size(),
// TODO(leszeks): Do this parameter extraction in the macro-defined function, jump_table->case_value_base()));
// to avoid overloading OutputSwitchOnSmiNoFeedback. WriteSwitch(&node, jump_table);
OutputSwitchOnSmiNoFeedback(jump_table, jump_table->constant_pool_index(), LeaveBasicBlock();
jump_table->size(),
jump_table->case_value_base());
} }
BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op, BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
...@@ -1469,36 +1016,15 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) { ...@@ -1469,36 +1016,15 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) {
// Flush the register optimizer when binding a label to ensure all // Flush the register optimizer when binding a label to ensure all
// expected registers are valid when jumping to this label. // expected registers are valid when jumping to this label.
if (register_optimizer_) register_optimizer_->Flush(); if (register_optimizer_) register_optimizer_->Flush();
bytecode_array_writer_.BindLabel(label);
size_t current_offset = bytecodes()->size();
if (label->is_forward_target()) {
// An earlier jump instruction refers to this label. Update it's location.
PatchJump(current_offset, label->offset());
// Now treat as if the label will only be back referred to.
}
label->bind_to(current_offset);
InvalidateLastBytecode();
// Starting a new basic block.
LeaveBasicBlock(); LeaveBasicBlock();
return *this; return *this;
} }
BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(const BytecodeLabel& target, BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(const BytecodeLabel& target,
BytecodeLabel* label) { BytecodeLabel* label) {
DCHECK(!label->is_bound()); bytecode_array_writer_.BindLabel(target, label);
DCHECK(target.is_bound()); LeaveBasicBlock();
if (label->is_forward_target()) {
// An earlier jump instruction refers to this label. Update it's location.
PatchJump(target.offset(), label->offset());
// Now treat as if the label will only be back referred to.
}
label->bind_to(target.offset());
InvalidateLastBytecode();
// exit_seen_in_block_ was reset when target was bound, so shouldn't be
// changed here.
return *this; return *this;
} }
...@@ -1507,21 +1033,8 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeJumpTable* jump_table, ...@@ -1507,21 +1033,8 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeJumpTable* jump_table,
// Flush the register optimizer when binding a jump table entry to ensure // Flush the register optimizer when binding a jump table entry to ensure
// all expected registers are valid when jumping to this location. // all expected registers are valid when jumping to this location.
if (register_optimizer_) register_optimizer_->Flush(); if (register_optimizer_) register_optimizer_->Flush();
bytecode_array_writer_.BindJumpTableEntry(jump_table, case_value);
DCHECK(!jump_table->is_bound(case_value));
size_t current_offset = bytecodes()->size();
size_t relative_jump = current_offset - jump_table->switch_bytecode_offset();
constant_array_builder()->SetJumpTableSmi(
jump_table->ConstantPoolEntryFor(case_value),
Smi::FromInt(static_cast<int>(relative_jump)));
jump_table->mark_bound(case_value);
InvalidateLastBytecode();
// Starting a new basic block.
LeaveBasicBlock(); LeaveBasicBlock();
return *this; return *this;
} }
...@@ -1674,6 +1187,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::ReThrow() { ...@@ -1674,6 +1187,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::ReThrow() {
BytecodeArrayBuilder& BytecodeArrayBuilder::Return() { BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
SetReturnPosition(); SetReturnPosition();
OutputReturn(); OutputReturn();
return_seen_in_block_ = true;
return *this; return *this;
} }
...@@ -1988,8 +1502,7 @@ bool BytecodeArrayBuilder::RegisterListIsValid(RegisterList reg_list) const { ...@@ -1988,8 +1502,7 @@ bool BytecodeArrayBuilder::RegisterListIsValid(RegisterList reg_list) const {
} }
} }
template <Bytecode bytecode, AccumulatorUse accumulator_use, template <Bytecode bytecode, AccumulatorUse accumulator_use>
OperandType... operand_types>
void BytecodeArrayBuilder::PrepareToOutputBytecode() { void BytecodeArrayBuilder::PrepareToOutputBytecode() {
if (register_optimizer_) if (register_optimizer_)
register_optimizer_->PrepareForBytecode<bytecode, accumulator_use>(); register_optimizer_->PrepareForBytecode<bytecode, accumulator_use>();
......
...@@ -5,10 +5,10 @@ ...@@ -5,10 +5,10 @@
#ifndef V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_ #ifndef V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
#define V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_ #define V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
#include <array>
#include "src/ast/ast.h" #include "src/ast/ast.h"
#include "src/base/compiler-specific.h" #include "src/base/compiler-specific.h"
#include "src/globals.h" #include "src/globals.h"
#include "src/interpreter/bytecode-array-writer.h"
#include "src/interpreter/bytecode-flags.h" #include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-register-allocator.h" #include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/bytecode-register.h" #include "src/interpreter/bytecode-register.h"
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include "src/interpreter/bytecodes.h" #include "src/interpreter/bytecodes.h"
#include "src/interpreter/constant-array-builder.h" #include "src/interpreter/constant-array-builder.h"
#include "src/interpreter/handler-table-builder.h" #include "src/interpreter/handler-table-builder.h"
#include "src/source-position-table.h"
#include "src/zone/zone-containers.h" #include "src/zone/zone-containers.h"
namespace v8 { namespace v8 {
...@@ -463,7 +462,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final ...@@ -463,7 +462,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
latest_source_info_.MakeStatementPosition(expr->position()); latest_source_info_.MakeStatementPosition(expr->position());
} }
bool RequiresImplicitReturn() const { return !exit_seen_in_block_; } bool RequiresImplicitReturn() const { return !return_seen_in_block_; }
// Returns the raw operand value for the given register or register list. // Returns the raw operand value for the given register or register list.
uint32_t GetInputRegisterOperand(Register reg); uint32_t GetInputRegisterOperand(Register reg);
...@@ -487,103 +486,61 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final ...@@ -487,103 +486,61 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
Zone* zone() const { return zone_; } Zone* zone() const { return zone_; }
private: private:
// Maximum sized packed bytecode is comprised of a prefix bytecode, friend class BytecodeRegisterAllocator;
// plus the actual bytecode, plus the maximum number of operands times template <Bytecode bytecode, AccumulatorUse accumulator_use,
// the maximum operand size. OperandType... operand_types>
static const size_t kMaxSizeOfPackedBytecode = friend class BytecodeNodeBuilder;
2 * sizeof(Bytecode) +
Bytecodes::kMaxOperands * static_cast<size_t>(OperandSize::kLast); const FeedbackVectorSpec* feedback_vector_spec() const {
return literal_->feedback_vector_spec();
// Constants that act as placeholders for jump operands to be }
// patched. These have operand sizes that match the sizes of
// reserved constant pool entries. // Returns the current source position for the given |bytecode|.
static const uint32_t k8BitJumpPlaceholder = 0x7f; INLINE(BytecodeSourceInfo CurrentSourcePosition(Bytecode bytecode));
static const uint32_t k16BitJumpPlaceholder =
k8BitJumpPlaceholder | (k8BitJumpPlaceholder << 8);
static const uint32_t k32BitJumpPlaceholder =
k16BitJumpPlaceholder | (k16BitJumpPlaceholder << 16);
void PatchJump(size_t jump_target, size_t jump_location);
void PatchJumpWith8BitOperand(size_t jump_location, int delta);
void PatchJumpWith16BitOperand(size_t jump_location, int delta);
void PatchJumpWith32BitOperand(size_t jump_location, int delta);
// Emit a non-jump bytecode with the given integer operand values.
template <AccumulatorUse accumulator_use, OperandType... operand_types>
void Write(Bytecode bytecode, BytecodeSourceInfo source_info,
std::array<uint32_t, sizeof...(operand_types)> operand_values);
// Emit a jump bytecode with the given integer operand values.
template <AccumulatorUse accumulator_use, OperandType... operand_types>
void WriteJump(Bytecode bytecode, BytecodeSourceInfo source_info,
BytecodeLabel* label,
std::array<uint32_t, sizeof...(operand_types)> operand_values);
// Emit a switch bytecode with the given integer operand values.
template <AccumulatorUse accumulator_use, OperandType... operand_types>
void WriteSwitch(
Bytecode bytecode, BytecodeSourceInfo source_info,
BytecodeJumpTable* jump_table,
std::array<uint32_t, sizeof...(operand_types)> operand_values);
// Emit the actual bytes of a bytecode and its operands. Called by Emit for
// jump and non-jump bytecodes.
template <OperandType... operand_types>
void EmitBytecode(
Bytecode bytecode,
std::array<uint32_t, sizeof...(operand_types)> operand_values);
#define DECLARE_BYTECODE_OUTPUT(Name, ...) \ #define DECLARE_BYTECODE_OUTPUT(Name, ...) \
template <typename... Operands> \ template <typename... Operands> \
INLINE(void Output##Name(Operands... operands)); \ INLINE(BytecodeNode Create##Name##Node(Operands... operands)); \
\
template <typename... Operands> \ template <typename... Operands> \
INLINE(void Output##Name(BytecodeLabel* label, Operands... operands)); \ INLINE(void Output##Name(Operands... operands)); \
\
template <typename... Operands> \ template <typename... Operands> \
INLINE( \ INLINE(void Output##Name(BytecodeLabel* label, Operands... operands));
void Output##Name(BytecodeJumpTable* jump_table, Operands... operands));
BYTECODE_LIST(DECLARE_BYTECODE_OUTPUT) BYTECODE_LIST(DECLARE_BYTECODE_OUTPUT)
#undef DECLARE_OPERAND_TYPE_INFO #undef DECLARE_OPERAND_TYPE_INFO
INLINE(void OutputSwitchOnSmiNoFeedback(BytecodeJumpTable* jump_table)); INLINE(void OutputSwitchOnSmiNoFeedback(BytecodeJumpTable* jump_table));
void MaybeElideLastBytecode(Bytecode next_bytecode, bool has_source_info);
void InvalidateLastBytecode();
bool RegisterIsValid(Register reg) const; bool RegisterIsValid(Register reg) const;
bool RegisterListIsValid(RegisterList reg_list) const; bool RegisterListIsValid(RegisterList reg_list) const;
// Set position for return. // Set position for return.
void SetReturnPosition(); void SetReturnPosition();
// Returns the current source position for the given |bytecode|.
INLINE(BytecodeSourceInfo CurrentSourcePosition(Bytecode bytecode));
// Update the source table for the current offset with the given source info.
void AttachSourceInfo(const BytecodeSourceInfo& source_info);
// Sets a deferred source info which should be emitted before any future // Sets a deferred source info which should be emitted before any future
// source info (either attached to a following bytecode or as a nop). // source info (either attached to a following bytecode or as a nop).
void SetDeferredSourceInfo(BytecodeSourceInfo source_info); void SetDeferredSourceInfo(BytecodeSourceInfo source_info);
// Attach the deferred and given source infos to the current bytecode, // Either attach deferred source info to node, or emit it as a nop bytecode
// possibly emitting a nop for the deferred info if both the deferred and // if node already have valid source info.
// given source infos are valid. void AttachOrEmitDeferredSourceInfo(BytecodeNode* node);
void AttachDeferredAndCurrentSourceInfo(BytecodeSourceInfo source_info);
// Write bytecode to bytecode array.
void Write(BytecodeNode* node);
void WriteJump(BytecodeNode* node, BytecodeLabel* label);
void WriteSwitch(BytecodeNode* node, BytecodeJumpTable* label);
// Not implemented as the illegal bytecode is used inside internally // Not implemented as the illegal bytecode is used inside internally
// to indicate a bytecode field is not valid or an error has occured // to indicate a bytecode field is not valid or an error has occured
// during bytecode generation. // during bytecode generation.
BytecodeArrayBuilder& Illegal(); BytecodeArrayBuilder& Illegal();
template <Bytecode bytecode, AccumulatorUse accumulator_use, template <Bytecode bytecode, AccumulatorUse accumulator_use>
OperandType... operand_types>
void PrepareToOutputBytecode(); void PrepareToOutputBytecode();
void LeaveBasicBlock() { exit_seen_in_block_ = false; } void LeaveBasicBlock() { return_seen_in_block_ = false; }
ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; } BytecodeArrayWriter* bytecode_array_writer() {
const ZoneVector<uint8_t>* bytecodes() const { return &bytecodes_; } return &bytecode_array_writer_;
}
ConstantArrayBuilder* constant_array_builder() { ConstantArrayBuilder* constant_array_builder() {
return &constant_array_builder_; return &constant_array_builder_;
} }
...@@ -593,45 +550,24 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final ...@@ -593,45 +550,24 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
HandlerTableBuilder* handler_table_builder() { HandlerTableBuilder* handler_table_builder() {
return &handler_table_builder_; return &handler_table_builder_;
} }
SourcePositionTableBuilder* source_position_table_builder() {
return &source_position_table_builder_;
}
const FeedbackVectorSpec* feedback_vector_spec() const {
return literal_->feedback_vector_spec();
}
void set_latest_source_info(BytecodeSourceInfo source_info) {
latest_source_info_ = source_info;
}
Zone* zone_; Zone* zone_;
ZoneVector<uint8_t> bytecodes_;
FunctionLiteral* literal_; FunctionLiteral* literal_;
bool bytecode_generated_;
ConstantArrayBuilder constant_array_builder_; ConstantArrayBuilder constant_array_builder_;
HandlerTableBuilder handler_table_builder_; HandlerTableBuilder handler_table_builder_;
SourcePositionTableBuilder source_position_table_builder_; bool return_seen_in_block_;
int parameter_count_;
int local_register_count_;
int return_position_;
BytecodeRegisterAllocator register_allocator_; BytecodeRegisterAllocator register_allocator_;
BytecodeArrayWriter bytecode_array_writer_;
BytecodeRegisterOptimizer* register_optimizer_; BytecodeRegisterOptimizer* register_optimizer_;
BytecodeSourceInfo latest_source_info_; BytecodeSourceInfo latest_source_info_;
BytecodeSourceInfo deferred_source_info_; BytecodeSourceInfo deferred_source_info_;
int parameter_count_;
int local_register_count_;
int return_position_;
int unbound_jumps_;
bool bytecode_generated_;
bool elide_noneffectful_bytecodes_;
bool exit_seen_in_block_;
bool last_bytecode_had_source_info_;
size_t last_bytecode_offset_;
Bytecode last_bytecode_;
static int const kNoFeedbackSlot = 0; static int const kNoFeedbackSlot = 0;
friend class BytecodeArrayBuilderTest;
DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder); DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
}; };
......
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/interpreter/bytecode-array-writer.h"
#include "src/api.h"
#include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-node.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecode-source-info.h"
#include "src/interpreter/constant-array-builder.h"
#include "src/log.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
namespace interpreter {
STATIC_CONST_MEMBER_DEFINITION const size_t
BytecodeArrayWriter::kMaxSizeOfPackedBytecode;
BytecodeArrayWriter::BytecodeArrayWriter(
Zone* zone, ConstantArrayBuilder* constant_array_builder,
SourcePositionTableBuilder::RecordingMode source_position_mode)
: bytecodes_(zone),
unbound_jumps_(0),
source_position_table_builder_(zone, source_position_mode),
constant_array_builder_(constant_array_builder),
last_bytecode_(Bytecode::kIllegal),
last_bytecode_offset_(0),
last_bytecode_had_source_info_(false),
elide_noneffectful_bytecodes_(FLAG_ignition_elide_noneffectful_bytecodes),
exit_seen_in_block_(false) {
bytecodes_.reserve(512); // Derived via experimentation.
}
Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
Isolate* isolate, int register_count, int parameter_count,
Handle<FixedArray> handler_table) {
DCHECK_EQ(0, unbound_jumps_);
int bytecode_size = static_cast<int>(bytecodes()->size());
int frame_size = register_count * kPointerSize;
Handle<FixedArray> constant_pool =
constant_array_builder()->ToFixedArray(isolate);
Handle<BytecodeArray> bytecode_array = isolate->factory()->NewBytecodeArray(
bytecode_size, &bytecodes()->front(), frame_size, parameter_count,
constant_pool);
bytecode_array->set_handler_table(*handler_table);
Handle<ByteArray> source_position_table =
source_position_table_builder()->ToSourcePositionTable(
isolate, Handle<AbstractCode>::cast(bytecode_array));
bytecode_array->set_source_position_table(*source_position_table);
return bytecode_array;
}
void BytecodeArrayWriter::Write(BytecodeNode* node) {
DCHECK(!Bytecodes::IsJump(node->bytecode()));
if (exit_seen_in_block_) return; // Don't emit dead code.
UpdateExitSeenInBlock(node->bytecode());
MaybeElideLastBytecode(node->bytecode(), node->source_info().is_valid());
UpdateSourcePositionTable(node);
EmitBytecode(node);
}
void BytecodeArrayWriter::WriteJump(BytecodeNode* node, BytecodeLabel* label) {
DCHECK(Bytecodes::IsJump(node->bytecode()));
// TODO(rmcilroy): For forward jumps we could also mark the label as dead,
// thereby avoiding emitting dead code when we bind the label.
if (exit_seen_in_block_) return; // Don't emit dead code.
UpdateExitSeenInBlock(node->bytecode());
MaybeElideLastBytecode(node->bytecode(), node->source_info().is_valid());
UpdateSourcePositionTable(node);
EmitJump(node, label);
}
void BytecodeArrayWriter::WriteSwitch(BytecodeNode* node,
BytecodeJumpTable* jump_table) {
DCHECK(Bytecodes::IsSwitch(node->bytecode()));
// TODO(rmcilroy): For jump tables we could also mark the table as dead,
// thereby avoiding emitting dead code when we bind the entries.
if (exit_seen_in_block_) return; // Don't emit dead code.
UpdateExitSeenInBlock(node->bytecode());
MaybeElideLastBytecode(node->bytecode(), node->source_info().is_valid());
UpdateSourcePositionTable(node);
EmitSwitch(node, jump_table);
}
void BytecodeArrayWriter::BindLabel(BytecodeLabel* label) {
size_t current_offset = bytecodes()->size();
if (label->is_forward_target()) {
// An earlier jump instruction refers to this label. Update it's location.
PatchJump(current_offset, label->offset());
// Now treat as if the label will only be back referred to.
}
label->bind_to(current_offset);
InvalidateLastBytecode();
exit_seen_in_block_ = false; // Starting a new basic block.
}
void BytecodeArrayWriter::BindLabel(const BytecodeLabel& target,
BytecodeLabel* label) {
DCHECK(!label->is_bound());
DCHECK(target.is_bound());
if (label->is_forward_target()) {
// An earlier jump instruction refers to this label. Update it's location.
PatchJump(target.offset(), label->offset());
// Now treat as if the label will only be back referred to.
}
label->bind_to(target.offset());
InvalidateLastBytecode();
// exit_seen_in_block_ was reset when target was bound, so shouldn't be
// changed here.
}
void BytecodeArrayWriter::BindJumpTableEntry(BytecodeJumpTable* jump_table,
int case_value) {
DCHECK(!jump_table->is_bound(case_value));
size_t current_offset = bytecodes()->size();
size_t relative_jump = current_offset - jump_table->switch_bytecode_offset();
constant_array_builder()->SetJumpTableSmi(
jump_table->ConstantPoolEntryFor(case_value),
Smi::FromInt(static_cast<int>(relative_jump)));
jump_table->mark_bound(case_value);
InvalidateLastBytecode();
exit_seen_in_block_ = false; // Starting a new basic block.
}
void BytecodeArrayWriter::UpdateSourcePositionTable(
const BytecodeNode* const node) {
int bytecode_offset = static_cast<int>(bytecodes()->size());
const BytecodeSourceInfo& source_info = node->source_info();
if (source_info.is_valid()) {
source_position_table_builder()->AddPosition(
bytecode_offset, SourcePosition(source_info.source_position()),
source_info.is_statement());
}
}
void BytecodeArrayWriter::UpdateExitSeenInBlock(Bytecode bytecode) {
switch (bytecode) {
case Bytecode::kReturn:
case Bytecode::kThrow:
case Bytecode::kReThrow:
case Bytecode::kJump:
case Bytecode::kJumpConstant:
exit_seen_in_block_ = true;
break;
default:
break;
}
}
void BytecodeArrayWriter::MaybeElideLastBytecode(Bytecode next_bytecode,
bool has_source_info) {
if (!elide_noneffectful_bytecodes_) return;
// If the last bytecode loaded the accumulator without any external effect,
// and the next bytecode clobbers this load without reading the accumulator,
// then the previous bytecode can be elided as it has no effect.
if (Bytecodes::IsAccumulatorLoadWithoutEffects(last_bytecode_) &&
Bytecodes::GetAccumulatorUse(next_bytecode) == AccumulatorUse::kWrite &&
(!last_bytecode_had_source_info_ || !has_source_info)) {
DCHECK_GT(bytecodes()->size(), last_bytecode_offset_);
bytecodes()->resize(last_bytecode_offset_);
// If the last bytecode had source info we will transfer the source info
// to this bytecode.
has_source_info |= last_bytecode_had_source_info_;
}
last_bytecode_ = next_bytecode;
last_bytecode_had_source_info_ = has_source_info;
last_bytecode_offset_ = bytecodes()->size();
}
void BytecodeArrayWriter::InvalidateLastBytecode() {
last_bytecode_ = Bytecode::kIllegal;
}
void BytecodeArrayWriter::EmitBytecode(const BytecodeNode* const node) {
DCHECK_NE(node->bytecode(), Bytecode::kIllegal);
Bytecode bytecode = node->bytecode();
OperandScale operand_scale = node->operand_scale();
if (operand_scale != OperandScale::kSingle) {
Bytecode prefix = Bytecodes::OperandScaleToPrefixBytecode(operand_scale);
bytecodes()->push_back(Bytecodes::ToByte(prefix));
}
bytecodes()->push_back(Bytecodes::ToByte(bytecode));
const uint32_t* const operands = node->operands();
const int operand_count = node->operand_count();
const OperandSize* operand_sizes =
Bytecodes::GetOperandSizes(bytecode, operand_scale);
for (int i = 0; i < operand_count; ++i) {
switch (operand_sizes[i]) {
case OperandSize::kNone:
UNREACHABLE();
break;
case OperandSize::kByte:
bytecodes()->push_back(static_cast<uint8_t>(operands[i]));
break;
case OperandSize::kShort: {
uint16_t operand = static_cast<uint16_t>(operands[i]);
const uint8_t* raw_operand = reinterpret_cast<const uint8_t*>(&operand);
bytecodes()->push_back(raw_operand[0]);
bytecodes()->push_back(raw_operand[1]);
break;
}
case OperandSize::kQuad: {
const uint8_t* raw_operand =
reinterpret_cast<const uint8_t*>(&operands[i]);
bytecodes()->push_back(raw_operand[0]);
bytecodes()->push_back(raw_operand[1]);
bytecodes()->push_back(raw_operand[2]);
bytecodes()->push_back(raw_operand[3]);
break;
}
}
}
}
// static
Bytecode GetJumpWithConstantOperand(Bytecode jump_bytecode) {
switch (jump_bytecode) {
case Bytecode::kJump:
return Bytecode::kJumpConstant;
case Bytecode::kJumpIfTrue:
return Bytecode::kJumpIfTrueConstant;
case Bytecode::kJumpIfFalse:
return Bytecode::kJumpIfFalseConstant;
case Bytecode::kJumpIfToBooleanTrue:
return Bytecode::kJumpIfToBooleanTrueConstant;
case Bytecode::kJumpIfToBooleanFalse:
return Bytecode::kJumpIfToBooleanFalseConstant;
case Bytecode::kJumpIfNull:
return Bytecode::kJumpIfNullConstant;
case Bytecode::kJumpIfNotNull:
return Bytecode::kJumpIfNotNullConstant;
case Bytecode::kJumpIfUndefined:
return Bytecode::kJumpIfUndefinedConstant;
case Bytecode::kJumpIfNotUndefined:
return Bytecode::kJumpIfNotUndefinedConstant;
case Bytecode::kJumpIfJSReceiver:
return Bytecode::kJumpIfJSReceiverConstant;
default:
UNREACHABLE();
}
}
void BytecodeArrayWriter::PatchJumpWith8BitOperand(size_t jump_location,
int delta) {
Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
DCHECK(Bytecodes::IsForwardJump(jump_bytecode));
DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
DCHECK_EQ(Bytecodes::GetOperandType(jump_bytecode, 0), OperandType::kUImm);
DCHECK_GT(delta, 0);
size_t operand_location = jump_location + 1;
DCHECK_EQ(bytecodes()->at(operand_location), k8BitJumpPlaceholder);
if (Bytecodes::ScaleForUnsignedOperand(delta) == OperandScale::kSingle) {
// The jump fits within the range of an UImm8 operand, so cancel
// the reservation and jump directly.
constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
bytecodes()->at(operand_location) = static_cast<uint8_t>(delta);
} else {
// The jump does not fit within the range of an UImm8 operand, so
// commit reservation putting the offset into the constant pool,
// and update the jump instruction and operand.
size_t entry = constant_array_builder()->CommitReservedEntry(
OperandSize::kByte, Smi::FromInt(delta));
DCHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<uint32_t>(entry)),
OperandSize::kByte);
jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
bytecodes()->at(operand_location) = static_cast<uint8_t>(entry);
}
}
void BytecodeArrayWriter::PatchJumpWith16BitOperand(size_t jump_location,
int delta) {
Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
DCHECK(Bytecodes::IsForwardJump(jump_bytecode));
DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
DCHECK_EQ(Bytecodes::GetOperandType(jump_bytecode, 0), OperandType::kUImm);
DCHECK_GT(delta, 0);
size_t operand_location = jump_location + 1;
uint8_t operand_bytes[2];
if (Bytecodes::ScaleForUnsignedOperand(delta) <= OperandScale::kDouble) {
// The jump fits within the range of an Imm16 operand, so cancel
// the reservation and jump directly.
constant_array_builder()->DiscardReservedEntry(OperandSize::kShort);
WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(delta));
} else {
// The jump does not fit within the range of an Imm16 operand, so
// commit reservation putting the offset into the constant pool,
// and update the jump instruction and operand.
size_t entry = constant_array_builder()->CommitReservedEntry(
OperandSize::kShort, Smi::FromInt(delta));
jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
}
DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder &&
bytecodes()->at(operand_location + 1) == k8BitJumpPlaceholder);
bytecodes()->at(operand_location++) = operand_bytes[0];
bytecodes()->at(operand_location) = operand_bytes[1];
}
void BytecodeArrayWriter::PatchJumpWith32BitOperand(size_t jump_location,
int delta) {
DCHECK(Bytecodes::IsJumpImmediate(
Bytecodes::FromByte(bytecodes()->at(jump_location))));
constant_array_builder()->DiscardReservedEntry(OperandSize::kQuad);
uint8_t operand_bytes[4];
WriteUnalignedUInt32(operand_bytes, static_cast<uint32_t>(delta));
size_t operand_location = jump_location + 1;
DCHECK(bytecodes()->at(operand_location) == k8BitJumpPlaceholder &&
bytecodes()->at(operand_location + 1) == k8BitJumpPlaceholder &&
bytecodes()->at(operand_location + 2) == k8BitJumpPlaceholder &&
bytecodes()->at(operand_location + 3) == k8BitJumpPlaceholder);
bytecodes()->at(operand_location++) = operand_bytes[0];
bytecodes()->at(operand_location++) = operand_bytes[1];
bytecodes()->at(operand_location++) = operand_bytes[2];
bytecodes()->at(operand_location) = operand_bytes[3];
}
void BytecodeArrayWriter::PatchJump(size_t jump_target, size_t jump_location) {
Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
int delta = static_cast<int>(jump_target - jump_location);
int prefix_offset = 0;
OperandScale operand_scale = OperandScale::kSingle;
if (Bytecodes::IsPrefixScalingBytecode(jump_bytecode)) {
// If a prefix scaling bytecode is emitted the target offset is one
// less than the case of no prefix scaling bytecode.
delta -= 1;
prefix_offset = 1;
operand_scale = Bytecodes::PrefixBytecodeToOperandScale(jump_bytecode);
jump_bytecode =
Bytecodes::FromByte(bytecodes()->at(jump_location + prefix_offset));
}
DCHECK(Bytecodes::IsJump(jump_bytecode));
switch (operand_scale) {
case OperandScale::kSingle:
PatchJumpWith8BitOperand(jump_location, delta);
break;
case OperandScale::kDouble:
PatchJumpWith16BitOperand(jump_location + prefix_offset, delta);
break;
case OperandScale::kQuadruple:
PatchJumpWith32BitOperand(jump_location + prefix_offset, delta);
break;
default:
UNREACHABLE();
}
unbound_jumps_--;
}
void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
DCHECK(Bytecodes::IsJump(node->bytecode()));
DCHECK_EQ(0u, node->operand(0));
size_t current_offset = bytecodes()->size();
if (label->is_bound()) {
CHECK_GE(current_offset, label->offset());
CHECK_LE(current_offset, static_cast<size_t>(kMaxUInt32));
// Label has been bound already so this is a backwards jump.
uint32_t delta = static_cast<uint32_t>(current_offset - label->offset());
OperandScale operand_scale = Bytecodes::ScaleForUnsignedOperand(delta);
if (operand_scale > OperandScale::kSingle) {
// Adjust for scaling byte prefix for wide jump offset.
delta += 1;
}
DCHECK_EQ(Bytecode::kJumpLoop, node->bytecode());
node->update_operand0(delta);
} else {
// The label has not yet been bound so this is a forward reference
// that will be patched when the label is bound. We create a
// reservation in the constant pool so the jump can be patched
// when the label is bound. The reservation means the maximum size
// of the operand for the constant is known and the jump can
// be emitted into the bytecode stream with space for the operand.
unbound_jumps_++;
label->set_referrer(current_offset);
OperandSize reserved_operand_size =
constant_array_builder()->CreateReservedEntry();
DCHECK_NE(Bytecode::kJumpLoop, node->bytecode());
switch (reserved_operand_size) {
case OperandSize::kNone:
UNREACHABLE();
break;
case OperandSize::kByte:
node->update_operand0(k8BitJumpPlaceholder);
break;
case OperandSize::kShort:
node->update_operand0(k16BitJumpPlaceholder);
break;
case OperandSize::kQuad:
node->update_operand0(k32BitJumpPlaceholder);
break;
}
}
EmitBytecode(node);
}
void BytecodeArrayWriter::EmitSwitch(BytecodeNode* node,
BytecodeJumpTable* jump_table) {
DCHECK(Bytecodes::IsSwitch(node->bytecode()));
size_t current_offset = bytecodes()->size();
if (node->operand_scale() > OperandScale::kSingle) {
// Adjust for scaling byte prefix.
current_offset += 1;
}
jump_table->set_switch_bytecode_offset(current_offset);
EmitBytecode(node);
}
} // namespace interpreter
} // namespace internal
} // namespace v8
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
#define V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
#include "src/base/compiler-specific.h"
#include "src/globals.h"
#include "src/interpreter/bytecodes.h"
#include "src/source-position-table.h"
namespace v8 {
namespace internal {
class SourcePositionTableBuilder;
namespace interpreter {
class BytecodeLabel;
class BytecodeNode;
class BytecodeJumpTable;
class ConstantArrayBuilder;
// Class for emitting bytecode as the final stage of the bytecode
// generation pipeline.
class V8_EXPORT_PRIVATE BytecodeArrayWriter final {
public:
BytecodeArrayWriter(
Zone* zone, ConstantArrayBuilder* constant_array_builder,
SourcePositionTableBuilder::RecordingMode source_position_mode);
void Write(BytecodeNode* node);
void WriteJump(BytecodeNode* node, BytecodeLabel* label);
void WriteSwitch(BytecodeNode* node, BytecodeJumpTable* jump_table);
void BindLabel(BytecodeLabel* label);
void BindLabel(const BytecodeLabel& target, BytecodeLabel* label);
void BindJumpTableEntry(BytecodeJumpTable* jump_table, int case_value);
Handle<BytecodeArray> ToBytecodeArray(Isolate* isolate, int register_count,
int parameter_count,
Handle<FixedArray> handler_table);
private:
// Maximum sized packed bytecode is comprised of a prefix bytecode,
// plus the actual bytecode, plus the maximum number of operands times
// the maximum operand size.
static const size_t kMaxSizeOfPackedBytecode =
2 * sizeof(Bytecode) +
Bytecodes::kMaxOperands * static_cast<size_t>(OperandSize::kLast);
// Constants that act as placeholders for jump operands to be
// patched. These have operand sizes that match the sizes of
// reserved constant pool entries.
const uint32_t k8BitJumpPlaceholder = 0x7f;
const uint32_t k16BitJumpPlaceholder =
k8BitJumpPlaceholder | (k8BitJumpPlaceholder << 8);
const uint32_t k32BitJumpPlaceholder =
k16BitJumpPlaceholder | (k16BitJumpPlaceholder << 16);
void PatchJump(size_t jump_target, size_t jump_location);
void PatchJumpWith8BitOperand(size_t jump_location, int delta);
void PatchJumpWith16BitOperand(size_t jump_location, int delta);
void PatchJumpWith32BitOperand(size_t jump_location, int delta);
void EmitBytecode(const BytecodeNode* const node);
void EmitJump(BytecodeNode* node, BytecodeLabel* label);
void EmitSwitch(BytecodeNode* node, BytecodeJumpTable* jump_table);
void UpdateSourcePositionTable(const BytecodeNode* const node);
void UpdateExitSeenInBlock(Bytecode bytecode);
void MaybeElideLastBytecode(Bytecode next_bytecode, bool has_source_info);
void InvalidateLastBytecode();
ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
SourcePositionTableBuilder* source_position_table_builder() {
return &source_position_table_builder_;
}
ConstantArrayBuilder* constant_array_builder() {
return constant_array_builder_;
}
ZoneVector<uint8_t> bytecodes_;
int unbound_jumps_;
SourcePositionTableBuilder source_position_table_builder_;
ConstantArrayBuilder* constant_array_builder_;
Bytecode last_bytecode_;
size_t last_bytecode_offset_;
bool last_bytecode_had_source_info_;
bool elide_noneffectful_bytecodes_;
bool exit_seen_in_block_;
friend class BytecodeArrayWriterUnittest;
DISALLOW_COPY_AND_ASSIGN(BytecodeArrayWriter);
};
} // namespace interpreter
} // namespace internal
} // namespace v8
#endif // V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
...@@ -78,7 +78,7 @@ class V8_EXPORT_PRIVATE BytecodeJumpTable final : public ZoneObject { ...@@ -78,7 +78,7 @@ class V8_EXPORT_PRIVATE BytecodeJumpTable final : public ZoneObject {
int size_; int size_;
int case_value_base_; int case_value_base_;
friend class BytecodeArrayBuilder; friend class BytecodeArrayWriter;
}; };
} // namespace interpreter } // namespace interpreter
......
...@@ -50,7 +50,7 @@ class V8_EXPORT_PRIVATE BytecodeLabel final { ...@@ -50,7 +50,7 @@ class V8_EXPORT_PRIVATE BytecodeLabel final {
bool bound_; bool bound_;
size_t offset_; size_t offset_;
friend class BytecodeArrayBuilder; friend class BytecodeArrayWriter;
}; };
// Class representing a branch target of multiple jumps. // Class representing a branch target of multiple jumps.
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/interpreter/bytecode-node.h"
#include <iomanip>
#include "src/source-position-table.h"
namespace v8 {
namespace internal {
namespace interpreter {
void BytecodeNode::Print(std::ostream& os) const {
#ifdef DEBUG
std::ios saved_state(nullptr);
saved_state.copyfmt(os);
os << Bytecodes::ToString(bytecode_);
for (int i = 0; i < operand_count(); ++i) {
os << ' ' << std::setw(8) << std::setfill('0') << std::hex << operands_[i];
}
os.copyfmt(saved_state);
if (source_info_.is_valid()) {
os << ' ' << source_info_;
}
os << '\n';
#else
os << static_cast<const void*>(this);
#endif // DEBUG
}
bool BytecodeNode::operator==(const BytecodeNode& other) const {
if (this == &other) {
return true;
} else if (this->bytecode() != other.bytecode() ||
this->source_info() != other.source_info()) {
return false;
} else {
for (int i = 0; i < this->operand_count(); ++i) {
if (this->operand(i) != other.operand(i)) {
return false;
}
}
}
return true;
}
std::ostream& operator<<(std::ostream& os, const BytecodeNode& node) {
node.Print(os);
return os;
}
} // namespace interpreter
} // namespace internal
} // namespace v8
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_INTERPRETER_BYTECODE_NODE_H_
#define V8_INTERPRETER_BYTECODE_NODE_H_
#include <algorithm>
#include "src/globals.h"
#include "src/interpreter/bytecode-source-info.h"
#include "src/interpreter/bytecodes.h"
namespace v8 {
namespace internal {
namespace interpreter {
// A container for a generated bytecode, it's operands, and source information.
class V8_EXPORT_PRIVATE BytecodeNode final {
public:
INLINE(BytecodeNode(Bytecode bytecode,
BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(0),
operand_scale_(OperandScale::kSingle),
source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
}
INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0,
BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(1),
operand_scale_(OperandScale::kSingle),
source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
SetOperand(0, operand0);
}
INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(2),
operand_scale_(OperandScale::kSingle),
source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
SetOperand(0, operand0);
SetOperand(1, operand1);
}
INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
uint32_t operand2,
BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(3),
operand_scale_(OperandScale::kSingle),
source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
SetOperand(0, operand0);
SetOperand(1, operand1);
SetOperand(2, operand2);
}
INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
uint32_t operand2, uint32_t operand3,
BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(4),
operand_scale_(OperandScale::kSingle),
source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
SetOperand(0, operand0);
SetOperand(1, operand1);
SetOperand(2, operand2);
SetOperand(3, operand3);
}
INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
uint32_t operand2, uint32_t operand3, uint32_t operand4,
BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(5),
operand_scale_(OperandScale::kSingle),
source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
SetOperand(0, operand0);
SetOperand(1, operand1);
SetOperand(2, operand2);
SetOperand(3, operand3);
SetOperand(4, operand4);
}
#define DEFINE_BYTECODE_NODE_CREATOR(Name, ...) \
template <typename... Operands> \
INLINE(static BytecodeNode Name(BytecodeSourceInfo source_info, \
Operands... operands)) { \
return Create<Bytecode::k##Name, __VA_ARGS__>(source_info, operands...); \
}
BYTECODE_LIST(DEFINE_BYTECODE_NODE_CREATOR)
#undef DEFINE_BYTECODE_NODE_CREATOR
// Print to stream |os|.
void Print(std::ostream& os) const;
Bytecode bytecode() const { return bytecode_; }
uint32_t operand(int i) const {
DCHECK_LT(i, operand_count());
return operands_[i];
}
const uint32_t* operands() const { return operands_; }
void update_operand0(uint32_t operand0) { SetOperand(0, operand0); }
int operand_count() const { return operand_count_; }
OperandScale operand_scale() const { return operand_scale_; }
const BytecodeSourceInfo& source_info() const { return source_info_; }
void set_source_info(BytecodeSourceInfo source_info) {
source_info_ = source_info;
}
bool operator==(const BytecodeNode& other) const;
bool operator!=(const BytecodeNode& other) const { return !(*this == other); }
private:
template <Bytecode bytecode, AccumulatorUse accumulator_use,
OperandType... operand_types>
friend class BytecodeNodeBuilder;
INLINE(BytecodeNode(Bytecode bytecode, int operand_count,
OperandScale operand_scale,
BytecodeSourceInfo source_info, uint32_t operand0 = 0,
uint32_t operand1 = 0, uint32_t operand2 = 0,
uint32_t operand3 = 0, uint32_t operand4 = 0))
: bytecode_(bytecode),
operand_count_(operand_count),
operand_scale_(operand_scale),
source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count);
operands_[0] = operand0;
operands_[1] = operand1;
operands_[2] = operand2;
operands_[3] = operand3;
operands_[4] = operand4;
}
template <Bytecode bytecode, AccumulatorUse accum_use>
INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info)) {
return BytecodeNode(bytecode, 0, OperandScale::kSingle, source_info);
}
template <Bytecode bytecode, AccumulatorUse accum_use,
OperandType operand0_type>
INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
uint32_t operand0)) {
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
OperandScale scale = OperandScale::kSingle;
scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
return BytecodeNode(bytecode, 1, scale, source_info, operand0);
}
template <Bytecode bytecode, AccumulatorUse accum_use,
OperandType operand0_type, OperandType operand1_type>
INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
uint32_t operand0, uint32_t operand1)) {
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
OperandScale scale = OperandScale::kSingle;
scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
scale = std::max(scale, ScaleForOperand<operand1_type>(operand1));
return BytecodeNode(bytecode, 2, scale, source_info, operand0, operand1);
}
template <Bytecode bytecode, AccumulatorUse accum_use,
OperandType operand0_type, OperandType operand1_type,
OperandType operand2_type>
INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
uint32_t operand0, uint32_t operand1,
uint32_t operand2)) {
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 2), operand2_type);
OperandScale scale = OperandScale::kSingle;
scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
scale = std::max(scale, ScaleForOperand<operand1_type>(operand1));
scale = std::max(scale, ScaleForOperand<operand2_type>(operand2));
return BytecodeNode(bytecode, 3, scale, source_info, operand0, operand1,
operand2);
}
template <Bytecode bytecode, AccumulatorUse accum_use,
OperandType operand0_type, OperandType operand1_type,
OperandType operand2_type, OperandType operand3_type>
INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
uint32_t operand0, uint32_t operand1,
uint32_t operand2, uint32_t operand3)) {
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 2), operand2_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 3), operand3_type);
OperandScale scale = OperandScale::kSingle;
scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
scale = std::max(scale, ScaleForOperand<operand1_type>(operand1));
scale = std::max(scale, ScaleForOperand<operand2_type>(operand2));
scale = std::max(scale, ScaleForOperand<operand3_type>(operand3));
return BytecodeNode(bytecode, 4, scale, source_info, operand0, operand1,
operand2, operand3);
}
template <Bytecode bytecode, AccumulatorUse accum_use,
OperandType operand0_type, OperandType operand1_type,
OperandType operand2_type, OperandType operand3_type,
OperandType operand4_type>
INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
uint32_t operand0, uint32_t operand1,
uint32_t operand2, uint32_t operand3,
uint32_t operand4)) {
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 2), operand2_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 3), operand3_type);
DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 4), operand4_type);
OperandScale scale = OperandScale::kSingle;
scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
scale = std::max(scale, ScaleForOperand<operand1_type>(operand1));
scale = std::max(scale, ScaleForOperand<operand2_type>(operand2));
scale = std::max(scale, ScaleForOperand<operand3_type>(operand3));
scale = std::max(scale, ScaleForOperand<operand4_type>(operand4));
return BytecodeNode(bytecode, 5, scale, source_info, operand0, operand1,
operand2, operand3, operand4);
}
template <OperandType operand_type>
INLINE(static OperandScale ScaleForOperand(uint32_t operand)) {
if (BytecodeOperands::IsScalableUnsignedByte(operand_type)) {
return Bytecodes::ScaleForUnsignedOperand(operand);
} else if (BytecodeOperands::IsScalableSignedByte(operand_type)) {
return Bytecodes::ScaleForSignedOperand(operand);
} else {
return OperandScale::kSingle;
}
}
INLINE(void UpdateScaleForOperand(int operand_index, uint32_t operand)) {
if (Bytecodes::OperandIsScalableSignedByte(bytecode(), operand_index)) {
operand_scale_ =
std::max(operand_scale_, Bytecodes::ScaleForSignedOperand(operand));
} else if (Bytecodes::OperandIsScalableUnsignedByte(bytecode(),
operand_index)) {
operand_scale_ =
std::max(operand_scale_, Bytecodes::ScaleForUnsignedOperand(operand));
}
}
INLINE(void SetOperand(int operand_index, uint32_t operand)) {
operands_[operand_index] = operand;
UpdateScaleForOperand(operand_index, operand);
}
Bytecode bytecode_;
uint32_t operands_[Bytecodes::kMaxOperands];
int operand_count_;
OperandScale operand_scale_;
BytecodeSourceInfo source_info_;
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
const BytecodeNode& node);
} // namespace interpreter
} // namespace internal
} // namespace v8
#endif // V8_INTERPRETER_BYTECODE_NODE_H_
...@@ -624,37 +624,6 @@ class V8_EXPORT_PRIVATE Bytecodes final { ...@@ -624,37 +624,6 @@ class V8_EXPORT_PRIVATE Bytecodes final {
return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode); return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode);
} }
// Return the constant operand version of the given immediate operand forward
// jump.
static Bytecode GetJumpWithConstantOperand(Bytecode jump_bytecode) {
DCHECK(IsJumpImmediate(jump_bytecode));
DCHECK(IsForwardJump(jump_bytecode));
switch (jump_bytecode) {
case Bytecode::kJump:
return Bytecode::kJumpConstant;
case Bytecode::kJumpIfTrue:
return Bytecode::kJumpIfTrueConstant;
case Bytecode::kJumpIfFalse:
return Bytecode::kJumpIfFalseConstant;
case Bytecode::kJumpIfToBooleanTrue:
return Bytecode::kJumpIfToBooleanTrueConstant;
case Bytecode::kJumpIfToBooleanFalse:
return Bytecode::kJumpIfToBooleanFalseConstant;
case Bytecode::kJumpIfNull:
return Bytecode::kJumpIfNullConstant;
case Bytecode::kJumpIfNotNull:
return Bytecode::kJumpIfNotNullConstant;
case Bytecode::kJumpIfUndefined:
return Bytecode::kJumpIfUndefinedConstant;
case Bytecode::kJumpIfNotUndefined:
return Bytecode::kJumpIfNotUndefinedConstant;
case Bytecode::kJumpIfJSReceiver:
return Bytecode::kJumpIfJSReceiverConstant;
default:
UNREACHABLE();
}
}
// Returns true if the bytecode is a switch. // Returns true if the bytecode is a switch.
static constexpr bool IsSwitch(Bytecode bytecode) { static constexpr bool IsSwitch(Bytecode bytecode) {
return bytecode == Bytecode::kSwitchOnSmiNoFeedback; return bytecode == Bytecode::kSwitchOnSmiNoFeedback;
...@@ -669,13 +638,6 @@ class V8_EXPORT_PRIVATE Bytecodes final { ...@@ -669,13 +638,6 @@ class V8_EXPORT_PRIVATE Bytecodes final {
IsJumpWithoutEffects(bytecode) || IsSwitch(bytecode)); IsJumpWithoutEffects(bytecode) || IsSwitch(bytecode));
} }
// True if the given bytecode unconditionally ends the current basic block.
static constexpr bool EndsBasicBlock(Bytecode bytecode) {
return bytecode == Bytecode::kReturn || bytecode == Bytecode::kThrow ||
bytecode == Bytecode::kReThrow || bytecode == Bytecode::kJump ||
bytecode == Bytecode::kJumpConstant;
}
// Returns true if the bytecode is Ldar or Star. // Returns true if the bytecode is Ldar or Star.
static constexpr bool IsLdarOrStar(Bytecode bytecode) { static constexpr bool IsLdarOrStar(Bytecode bytecode) {
return bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar; return bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar;
......
...@@ -1081,6 +1081,8 @@ ...@@ -1081,6 +1081,8 @@
'interpreter/bytecode-array-iterator.h', 'interpreter/bytecode-array-iterator.h',
'interpreter/bytecode-array-random-iterator.cc', 'interpreter/bytecode-array-random-iterator.cc',
'interpreter/bytecode-array-random-iterator.h', 'interpreter/bytecode-array-random-iterator.h',
'interpreter/bytecode-array-writer.cc',
'interpreter/bytecode-array-writer.h',
'interpreter/bytecode-decoder.cc', 'interpreter/bytecode-decoder.cc',
'interpreter/bytecode-decoder.h', 'interpreter/bytecode-decoder.h',
'interpreter/bytecode-flags.cc', 'interpreter/bytecode-flags.cc',
...@@ -1089,6 +1091,8 @@ ...@@ -1089,6 +1091,8 @@
'interpreter/bytecode-generator.h', 'interpreter/bytecode-generator.h',
'interpreter/bytecode-label.cc', 'interpreter/bytecode-label.cc',
'interpreter/bytecode-label.h', 'interpreter/bytecode-label.h',
'interpreter/bytecode-node.cc',
'interpreter/bytecode-node.h',
'interpreter/bytecode-operands.cc', 'interpreter/bytecode-operands.cc',
'interpreter/bytecode-operands.h', 'interpreter/bytecode-operands.h',
'interpreter/bytecode-register.cc', 'interpreter/bytecode-register.cc',
......
...@@ -75,12 +75,12 @@ snippet: " ...@@ -75,12 +75,12 @@ snippet: "
" "
frame size: 1 frame size: 1
parameter count: 1 parameter count: 1
bytecode array length: 13 bytecode array length: 14
bytecodes: [ bytecodes: [
/* 30 E> */ B(StackCheck), /* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), I8(10), /* 44 S> */ B(LdaSmi), I8(10),
B(Star), R(0), B(Star), R(0),
/* 48 S> */ B(Nop), /* 48 S> */ B(LdaSmi), I8(20),
/* 50 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0), /* 50 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
B(LdaUndefined), B(LdaUndefined),
/* 56 S> */ B(Return), /* 56 S> */ B(Return),
......
...@@ -94,7 +94,7 @@ snippet: " ...@@ -94,7 +94,7 @@ snippet: "
" "
frame size: 2 frame size: 2
parameter count: 1 parameter count: 1
bytecode array length: 26 bytecode array length: 27
bytecodes: [ bytecodes: [
B(CreateFunctionContext), U8(1), B(CreateFunctionContext), U8(1),
B(PushContext), R(1), B(PushContext), R(1),
...@@ -105,7 +105,7 @@ bytecodes: [ ...@@ -105,7 +105,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck), /* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), I8(10), /* 44 S> */ B(LdaSmi), I8(10),
/* 44 E> */ B(StaCurrentContextSlot), U8(4), /* 44 E> */ B(StaCurrentContextSlot), U8(4),
/* 48 S> */ B(Nop), /* 48 S> */ B(LdaSmi), I8(20),
/* 50 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0), /* 50 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
B(LdaUndefined), B(LdaUndefined),
/* 82 S> */ B(Return), /* 82 S> */ B(Return),
......
...@@ -62,7 +62,7 @@ bytecodes: [ ...@@ -62,7 +62,7 @@ bytecodes: [
B(Star), R(0), B(Star), R(0),
B(Star), R(1), B(Star), R(1),
/* 74 S> */ B(Jump), U8(2), /* 74 S> */ B(Jump), U8(2),
/* 84 S> */ B(LdaUndefined), B(LdaUndefined),
/* 94 S> */ B(Return), /* 94 S> */ B(Return),
] ]
constant pool: [ constant pool: [
......
...@@ -116,7 +116,9 @@ v8_executable("unittests") { ...@@ -116,7 +116,9 @@ v8_executable("unittests") {
"interpreter/bytecode-array-builder-unittest.cc", "interpreter/bytecode-array-builder-unittest.cc",
"interpreter/bytecode-array-iterator-unittest.cc", "interpreter/bytecode-array-iterator-unittest.cc",
"interpreter/bytecode-array-random-iterator-unittest.cc", "interpreter/bytecode-array-random-iterator-unittest.cc",
"interpreter/bytecode-array-writer-unittest.cc",
"interpreter/bytecode-decoder-unittest.cc", "interpreter/bytecode-decoder-unittest.cc",
"interpreter/bytecode-node-unittest.cc",
"interpreter/bytecode-operands-unittest.cc", "interpreter/bytecode-operands-unittest.cc",
"interpreter/bytecode-register-allocator-unittest.cc", "interpreter/bytecode-register-allocator-unittest.cc",
"interpreter/bytecode-register-optimizer-unittest.cc", "interpreter/bytecode-register-optimizer-unittest.cc",
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include "src/interpreter/bytecode-label.h" #include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-allocator.h" #include "src/interpreter/bytecode-register-allocator.h"
#include "src/objects-inl.h" #include "src/objects-inl.h"
#include "test/unittests/interpreter/bytecode-utils.h"
#include "test/unittests/test-utils.h" #include "test/unittests/test-utils.h"
namespace v8 { namespace v8 {
...@@ -22,22 +21,6 @@ class BytecodeArrayBuilderTest : public TestWithIsolateAndZone { ...@@ -22,22 +21,6 @@ class BytecodeArrayBuilderTest : public TestWithIsolateAndZone {
public: public:
BytecodeArrayBuilderTest() {} BytecodeArrayBuilderTest() {}
~BytecodeArrayBuilderTest() override {} ~BytecodeArrayBuilderTest() override {}
const ZoneVector<unsigned char>* GetBytecodes(
const BytecodeArrayBuilder& builder) {
return builder.bytecodes();
}
// Helper methods to carefully control the source positions of a builder.
// These rely on setting an internal field of the BytecodeArrayBuilder, to
// avoid exposing builder externals outside this test.
void SetCurrentSourcePosition(BytecodeArrayBuilder& builder, int pos,
bool is_statement) {
builder.set_latest_source_info(BytecodeSourceInfo(pos, is_statement));
}
void ClearCurrentSourcePosition(BytecodeArrayBuilder& builder) {
builder.set_latest_source_info(BytecodeSourceInfo());
}
}; };
using ToBooleanMode = BytecodeArrayBuilder::ToBooleanMode; using ToBooleanMode = BytecodeArrayBuilder::ToBooleanMode;
...@@ -862,291 +845,6 @@ TEST_F(BytecodeArrayBuilderTest, LabelAddressReuse) { ...@@ -862,291 +845,6 @@ TEST_F(BytecodeArrayBuilderTest, LabelAddressReuse) {
CHECK(iterator.done()); CHECK(iterator.done());
} }
TEST_F(BytecodeArrayBuilderTest, SimpleExample) {
SaveFlags save_flags;
FLAG_ignition_reo = false;
BytecodeArrayBuilder builder(isolate(), zone(), 0, 201);
CHECK_EQ(GetBytecodes(builder)->size(), 0u);
builder.StackCheck(10);
CHECK_EQ(GetBytecodes(builder)->size(), 1u);
SetCurrentSourcePosition(builder, 55, true);
builder.LoadLiteral(Smi::FromInt(127));
CHECK_EQ(GetBytecodes(builder)->size(), 3u);
builder.StoreAccumulatorInRegister(Register(20));
CHECK_EQ(GetBytecodes(builder)->size(), 5u);
builder.LoadAccumulatorWithRegister(Register(200));
CHECK_EQ(GetBytecodes(builder)->size(), 9u);
SetCurrentSourcePosition(builder, 70, true);
builder.Return();
CHECK_EQ(GetBytecodes(builder)->size(), 10u);
static const uint8_t expected_bytes[] = {
// clang-format off
/* 0 10 E> */ B(StackCheck),
/* 1 55 S> */ B(LdaSmi), U8(127),
/* 3 */ B(Star), R8(20),
/* 5 */ B(Wide), B(Ldar), R16(200),
/* 9 70 S> */ B(Return),
// clang-format on
};
CHECK_EQ(GetBytecodes(builder)->size(), arraysize(expected_bytes));
for (size_t i = 0; i < arraysize(expected_bytes); ++i) {
CHECK_EQ(GetBytecodes(builder)->at(i), expected_bytes[i]);
}
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate());
CHECK_EQ(GetBytecodes(builder)->size(), arraysize(expected_bytes));
PositionTableEntry expected_positions[] = {
{0, 10, false}, {1, 55, true}, {9, 70, true}};
SourcePositionTableIterator source_iterator(
bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
const PositionTableEntry& expected = expected_positions[i];
CHECK_EQ(source_iterator.code_offset(), expected.code_offset);
CHECK_EQ(source_iterator.source_position().ScriptOffset(),
expected.source_position);
CHECK_EQ(source_iterator.is_statement(), expected.is_statement);
source_iterator.Advance();
}
CHECK(source_iterator.done());
}
TEST_F(BytecodeArrayBuilderTest, ComplexExample) {
SaveFlags save_flags;
FLAG_ignition_reo = false;
BytecodeArrayBuilder builder(isolate(), zone(), 0, 8);
static const uint8_t expected_bytes[] = {
// clang-format off
/* 0 30 E> */ B(StackCheck),
/* 1 42 S> */ B(LdaConstant), U8(0),
/* 3 42 E> */ B(Add), R8(1), U8(1),
/* 5 68 S> */ B(JumpIfUndefined), U8(39),
/* 7 */ B(JumpIfNull), U8(37),
/* 9 */ B(ToObject), R8(3),
/* 11 */ B(ForInPrepare), R8(3), R8(4),
/* 14 */ B(LdaZero),
/* 15 */ B(Star), R8(7),
/* 17 63 S> */ B(ForInContinue), R8(7), R8(6),
/* 20 */ B(JumpIfFalse), U8(24),
/* 22 */ B(ForInNext), R8(3), R8(7), R8(4), U8(1),
/* 27 */ B(JumpIfUndefined), U8(10),
/* 29 */ B(Star), R8(0),
/* 31 54 E> */ B(StackCheck),
/* 32 */ B(Ldar), R8(0),
/* 34 */ B(Star), R8(2),
/* 36 85 S> */ B(Return),
/* 37 */ B(ForInStep), R8(7),
/* 39 */ B(Star), R8(7),
/* 41 */ B(JumpLoop), U8(24), U8(0),
/* 44 */ B(LdaUndefined),
/* 45 85 S> */ B(Return),
// clang-format on
};
static const PositionTableEntry expected_positions[] = {
{0, 30, false}, {1, 42, true}, {3, 42, false}, {6, 68, true},
{18, 63, true}, {32, 54, false}, {37, 85, true}, {46, 85, true}};
BytecodeLabel back_jump, jump_for_in, jump_end_1, jump_end_2, jump_end_3;
builder.StackCheck(30);
SetCurrentSourcePosition(builder, 42, true);
builder.LoadConstantPoolEntry(0);
SetCurrentSourcePosition(builder, 42, false);
builder.BinaryOperation(Token::ADD, Register(1), 1);
SetCurrentSourcePosition(builder, 68, true);
builder.JumpIfUndefined(&jump_end_1);
builder.JumpIfNull(&jump_end_2);
builder.ToObject(Register(3));
builder.ForInPrepare(Register(3), RegisterList(4, 3));
builder.LoadLiteral(Smi::kZero);
builder.StoreAccumulatorInRegister(Register(7));
builder.Bind(&back_jump);
SetCurrentSourcePosition(builder, 63, true);
builder.ForInContinue(Register(7), Register(6));
builder.JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &jump_end_3);
builder.ForInNext(Register(3), Register(7), RegisterList(4, 2), 1);
builder.JumpIfUndefined(&jump_for_in);
builder.StoreAccumulatorInRegister(Register(0));
builder.StackCheck(54);
builder.LoadAccumulatorWithRegister(Register(0));
builder.StoreAccumulatorInRegister(Register(2));
SetCurrentSourcePosition(builder, 85, true);
builder.Return();
builder.Bind(&jump_for_in);
builder.ForInStep(Register(7));
builder.StoreAccumulatorInRegister(Register(7));
builder.JumpLoop(&back_jump, 0);
builder.Bind(&jump_end_1);
builder.Bind(&jump_end_2);
builder.Bind(&jump_end_3);
builder.LoadUndefined();
SetCurrentSourcePosition(builder, 85, true);
builder.Return();
CHECK_EQ(GetBytecodes(builder)->size(), arraysize(expected_bytes));
for (size_t i = 0; i < arraysize(expected_bytes); ++i) {
CHECK_EQ(static_cast<int>(GetBytecodes(builder)->at(i)),
static_cast<int>(expected_bytes[i]));
}
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate());
SourcePositionTableIterator source_iterator(
bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
const PositionTableEntry& expected = expected_positions[i];
CHECK_EQ(source_iterator.code_offset(), expected.code_offset);
CHECK_EQ(source_iterator.source_position().ScriptOffset(),
expected.source_position);
CHECK_EQ(source_iterator.is_statement(), expected.is_statement);
source_iterator.Advance();
}
CHECK(source_iterator.done());
}
TEST_F(BytecodeArrayBuilderTest, ElideNoneffectfulBytecodes) {
if (!i::FLAG_ignition_elide_noneffectful_bytecodes) return;
SaveFlags save_flags;
FLAG_ignition_reo = false;
BytecodeArrayBuilder builder(isolate(), zone(), 0, 21);
static const uint8_t expected_bytes[] = {
// clang-format off
/* 0 10 E> */ B(StackCheck),
/* 1 55 S> */ B(Ldar), R8(20),
/* 3 */ B(Star), R8(20),
/* 5 */ B(CreateMappedArguments),
/* 6 60 S> */ B(Nop),
/* 8 70 S> */ B(Ldar), R8(20),
/* 10 75 S> */ B(Return),
// clang-format on
};
static const PositionTableEntry expected_positions[] = {{0, 10, false},
{1, 55, true},
{6, 60, true},
{7, 70, true},
{9, 75, true}};
builder.StackCheck(10);
SetCurrentSourcePosition(builder, 55, true);
builder.LoadLiteral(Smi::FromInt(127)); // Should be elided.
builder.LoadAccumulatorWithRegister(Register(20));
builder.StoreAccumulatorInRegister(Register(20));
builder.LoadAccumulatorWithRegister(Register(20)); // Should be elided.
builder.CreateArguments(CreateArgumentsType::kMappedArguments);
SetCurrentSourcePosition(builder, 60, true);
builder.LoadLiteral(
Smi::FromInt(127)); // Replaced with nop due to source info.
SetCurrentSourcePosition(builder, 70, true);
builder.LoadAccumulatorWithRegister(Register(20));
SetCurrentSourcePosition(builder, 75, true);
builder.Return();
CHECK_EQ(GetBytecodes(builder)->size(), arraysize(expected_bytes));
for (size_t i = 0; i < arraysize(expected_bytes); ++i) {
CHECK_EQ(static_cast<int>(GetBytecodes(builder)->at(i)),
static_cast<int>(expected_bytes[i]));
}
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate());
SourcePositionTableIterator source_iterator(
bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
const PositionTableEntry& expected = expected_positions[i];
CHECK_EQ(source_iterator.code_offset(), expected.code_offset);
CHECK_EQ(source_iterator.source_position().ScriptOffset(),
expected.source_position);
CHECK_EQ(source_iterator.is_statement(), expected.is_statement);
source_iterator.Advance();
}
CHECK(source_iterator.done());
}
TEST_F(BytecodeArrayBuilderTest, DeadcodeElimination) {
SaveFlags save_flags;
FLAG_ignition_reo = false;
BytecodeArrayBuilder builder(isolate(), zone(), 0, 0);
static const uint8_t expected_bytes[] = {
// clang-format off
/* 0 10 E> */ B(StackCheck),
/* 1 55 S> */ B(LdaSmi), U8(127),
/* 3 */ B(Jump), U8(2),
/* 5 65 S> */ B(LdaSmi), U8(127),
/* 7 */ B(JumpIfFalse), U8(3),
/* 9 75 S> */ B(Return),
/* 10 */ B(JumpIfFalse), U8(3),
/* 12 */ B(Throw),
/* 13 */ B(JumpIfFalse), U8(3),
/* 15 */ B(ReThrow),
/* 16 */ B(Return),
// clang-format on
};
static const PositionTableEntry expected_positions[] = {
{0, 10, false}, {1, 55, true}, {5, 65, true}, {9, 75, true}};
BytecodeLabel after_jump, after_conditional_jump, after_return, after_throw,
after_rethrow;
builder.StackCheck(10);
SetCurrentSourcePosition(builder, 55, true);
builder.LoadLiteral(Smi::FromInt(127));
builder.Jump(&after_jump);
builder.LoadLiteral(Smi::FromInt(127)); // Dead code.
builder.JumpIfFalse(ToBooleanMode::kAlreadyBoolean,
&after_conditional_jump); // Dead code.
builder.Bind(&after_jump);
builder.Bind(&after_conditional_jump);
SetCurrentSourcePosition(builder, 65, true);
builder.LoadLiteral(Smi::FromInt(127));
builder.JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &after_return);
SetCurrentSourcePosition(builder, 75, true);
builder.Return();
SetCurrentSourcePosition(builder, 100, true);
builder.LoadLiteral(Smi::FromInt(127)); // Dead code.
ClearCurrentSourcePosition(builder);
builder.Bind(&after_return);
builder.JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &after_throw);
builder.Throw();
builder.LoadLiteral(Smi::FromInt(127)); // Dead code.
builder.Bind(&after_throw);
builder.JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &after_rethrow);
builder.ReThrow();
builder.LoadLiteral(Smi::FromInt(127)); // Dead code.
builder.Bind(&after_rethrow);
builder.Return();
CHECK_EQ(GetBytecodes(builder)->size(), arraysize(expected_bytes));
for (size_t i = 0; i < arraysize(expected_bytes); ++i) {
CHECK_EQ(static_cast<int>(GetBytecodes(builder)->at(i)),
static_cast<int>(expected_bytes[i]));
}
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate());
SourcePositionTableIterator source_iterator(
bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
const PositionTableEntry& expected = expected_positions[i];
CHECK_EQ(source_iterator.code_offset(), expected.code_offset);
CHECK_EQ(source_iterator.source_position().ScriptOffset(),
expected.source_position);
CHECK_EQ(source_iterator.is_statement(), expected.is_statement);
source_iterator.Advance();
}
CHECK(source_iterator.done());
}
} // namespace interpreter } // namespace interpreter
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/api.h"
#include "src/factory.h"
#include "src/interpreter/bytecode-array-writer.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-node.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecode-source-info.h"
#include "src/interpreter/constant-array-builder.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/source-position-table.h"
#include "src/utils.h"
#include "test/unittests/interpreter/bytecode-utils.h"
#include "test/unittests/test-utils.h"
namespace v8 {
namespace internal {
namespace interpreter {
#define R(i) static_cast<uint32_t>(Register(i).ToOperand())
class BytecodeArrayWriterUnittest : public TestWithIsolateAndZone {
public:
BytecodeArrayWriterUnittest()
: constant_array_builder_(zone()),
bytecode_array_writer_(
zone(), &constant_array_builder_,
SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS) {}
~BytecodeArrayWriterUnittest() override {}
void Write(Bytecode bytecode, BytecodeSourceInfo info = BytecodeSourceInfo());
void Write(Bytecode bytecode, uint32_t operand0,
BytecodeSourceInfo info = BytecodeSourceInfo());
void Write(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
BytecodeSourceInfo info = BytecodeSourceInfo());
void Write(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
uint32_t operand2, BytecodeSourceInfo info = BytecodeSourceInfo());
void Write(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
uint32_t operand2, uint32_t operand3,
BytecodeSourceInfo info = BytecodeSourceInfo());
void WriteJump(Bytecode bytecode, BytecodeLabel* label,
BytecodeSourceInfo info = BytecodeSourceInfo());
void WriteJumpLoop(Bytecode bytecode, BytecodeLabel* label, int depth,
BytecodeSourceInfo info = BytecodeSourceInfo());
BytecodeArrayWriter* writer() { return &bytecode_array_writer_; }
ZoneVector<unsigned char>* bytecodes() { return writer()->bytecodes(); }
SourcePositionTableBuilder* source_position_table_builder() {
return writer()->source_position_table_builder();
}
private:
ConstantArrayBuilder constant_array_builder_;
BytecodeArrayWriter bytecode_array_writer_;
};
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode,
BytecodeSourceInfo info) {
BytecodeNode node(bytecode, info);
writer()->Write(&node);
}
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
BytecodeSourceInfo info) {
BytecodeNode node(bytecode, operand0, info);
writer()->Write(&node);
}
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
uint32_t operand1,
BytecodeSourceInfo info) {
BytecodeNode node(bytecode, operand0, operand1, info);
writer()->Write(&node);
}
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
uint32_t operand1, uint32_t operand2,
BytecodeSourceInfo info) {
BytecodeNode node(bytecode, operand0, operand1, operand2, info);
writer()->Write(&node);
}
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
uint32_t operand1, uint32_t operand2,
uint32_t operand3,
BytecodeSourceInfo info) {
BytecodeNode node(bytecode, operand0, operand1, operand2, operand3, info);
writer()->Write(&node);
}
void BytecodeArrayWriterUnittest::WriteJump(Bytecode bytecode,
BytecodeLabel* label,
BytecodeSourceInfo info) {
BytecodeNode node(bytecode, 0, info);
writer()->WriteJump(&node, label);
}
void BytecodeArrayWriterUnittest::WriteJumpLoop(Bytecode bytecode,
BytecodeLabel* label, int depth,
BytecodeSourceInfo info) {
BytecodeNode node(bytecode, 0, depth, info);
writer()->WriteJump(&node, label);
}
TEST_F(BytecodeArrayWriterUnittest, SimpleExample) {
CHECK_EQ(bytecodes()->size(), 0u);
Write(Bytecode::kStackCheck, {10, false});
CHECK_EQ(bytecodes()->size(), 1u);
Write(Bytecode::kLdaSmi, 127, {55, true});
CHECK_EQ(bytecodes()->size(), 3u);
Write(Bytecode::kStar, Register(20).ToOperand());
CHECK_EQ(bytecodes()->size(), 5u);
Write(Bytecode::kLdar, Register(200).ToOperand());
CHECK_EQ(bytecodes()->size(), 9u);
Write(Bytecode::kReturn, {70, true});
CHECK_EQ(bytecodes()->size(), 10u);
static const uint8_t expected_bytes[] = {
// clang-format off
/* 0 10 E> */ B(StackCheck),
/* 1 55 S> */ B(LdaSmi), U8(127),
/* 3 */ B(Star), R8(20),
/* 5 */ B(Wide), B(Ldar), R16(200),
/* 9 70 S> */ B(Return),
// clang-format on
};
CHECK_EQ(bytecodes()->size(), arraysize(expected_bytes));
for (size_t i = 0; i < arraysize(expected_bytes); ++i) {
CHECK_EQ(bytecodes()->at(i), expected_bytes[i]);
}
Handle<BytecodeArray> bytecode_array = writer()->ToBytecodeArray(
isolate(), 0, 0, factory()->empty_fixed_array());
CHECK_EQ(bytecodes()->size(), arraysize(expected_bytes));
PositionTableEntry expected_positions[] = {
{0, 10, false}, {1, 55, true}, {9, 70, true}};
SourcePositionTableIterator source_iterator(
bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
const PositionTableEntry& expected = expected_positions[i];
CHECK_EQ(source_iterator.code_offset(), expected.code_offset);
CHECK_EQ(source_iterator.source_position().ScriptOffset(),
expected.source_position);
CHECK_EQ(source_iterator.is_statement(), expected.is_statement);
source_iterator.Advance();
}
CHECK(source_iterator.done());
}
TEST_F(BytecodeArrayWriterUnittest, ComplexExample) {
static const uint8_t expected_bytes[] = {
// clang-format off
/* 0 30 E> */ B(StackCheck),
/* 1 42 S> */ B(LdaConstant), U8(0),
/* 3 42 E> */ B(Add), R8(1), U8(1),
/* 5 68 S> */ B(JumpIfUndefined), U8(39),
/* 7 */ B(JumpIfNull), U8(37),
/* 9 */ B(ToObject), R8(3),
/* 11 */ B(ForInPrepare), R8(3), R8(4),
/* 14 */ B(LdaZero),
/* 15 */ B(Star), R8(7),
/* 17 63 S> */ B(ForInContinue), R8(7), R8(6),
/* 20 */ B(JumpIfFalse), U8(24),
/* 22 */ B(ForInNext), R8(3), R8(7), R8(4), U8(1),
/* 27 */ B(JumpIfUndefined), U8(10),
/* 29 */ B(Star), R8(0),
/* 31 54 E> */ B(StackCheck),
/* 32 */ B(Ldar), R8(0),
/* 34 */ B(Star), R8(2),
/* 36 85 S> */ B(Return),
/* 37 */ B(ForInStep), R8(7),
/* 39 */ B(Star), R8(7),
/* 41 */ B(JumpLoop), U8(24), U8(0),
/* 44 */ B(LdaUndefined),
/* 45 85 S> */ B(Return),
// clang-format on
};
static const PositionTableEntry expected_positions[] = {
{0, 30, false}, {1, 42, true}, {3, 42, false}, {6, 68, true},
{18, 63, true}, {32, 54, false}, {37, 85, true}, {46, 85, true}};
BytecodeLabel back_jump, jump_for_in, jump_end_1, jump_end_2, jump_end_3;
Write(Bytecode::kStackCheck, {30, false});
Write(Bytecode::kLdaConstant, U8(0), {42, true});
Write(Bytecode::kAdd, R(1), U8(1), {42, false});
WriteJump(Bytecode::kJumpIfUndefined, &jump_end_1, {68, true});
WriteJump(Bytecode::kJumpIfNull, &jump_end_2);
Write(Bytecode::kToObject, R(3));
Write(Bytecode::kForInPrepare, R(3), R(4));
Write(Bytecode::kLdaZero);
Write(Bytecode::kStar, R(7));
writer()->BindLabel(&back_jump);
Write(Bytecode::kForInContinue, R(7), R(6), {63, true});
WriteJump(Bytecode::kJumpIfFalse, &jump_end_3);
Write(Bytecode::kForInNext, R(3), R(7), R(4), U8(1));
WriteJump(Bytecode::kJumpIfUndefined, &jump_for_in);
Write(Bytecode::kStar, R(0));
Write(Bytecode::kStackCheck, {54, false});
Write(Bytecode::kLdar, R(0));
Write(Bytecode::kStar, R(2));
Write(Bytecode::kReturn, {85, true});
writer()->BindLabel(&jump_for_in);
Write(Bytecode::kForInStep, R(7));
Write(Bytecode::kStar, R(7));
WriteJumpLoop(Bytecode::kJumpLoop, &back_jump, 0);
writer()->BindLabel(&jump_end_1);
writer()->BindLabel(&jump_end_2);
writer()->BindLabel(&jump_end_3);
Write(Bytecode::kLdaUndefined);
Write(Bytecode::kReturn, {85, true});
CHECK_EQ(bytecodes()->size(), arraysize(expected_bytes));
for (size_t i = 0; i < arraysize(expected_bytes); ++i) {
CHECK_EQ(static_cast<int>(bytecodes()->at(i)),
static_cast<int>(expected_bytes[i]));
}
Handle<BytecodeArray> bytecode_array = writer()->ToBytecodeArray(
isolate(), 0, 0, factory()->empty_fixed_array());
SourcePositionTableIterator source_iterator(
bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
const PositionTableEntry& expected = expected_positions[i];
CHECK_EQ(source_iterator.code_offset(), expected.code_offset);
CHECK_EQ(source_iterator.source_position().ScriptOffset(),
expected.source_position);
CHECK_EQ(source_iterator.is_statement(), expected.is_statement);
source_iterator.Advance();
}
CHECK(source_iterator.done());
}
TEST_F(BytecodeArrayWriterUnittest, ElideNoneffectfulBytecodes) {
if (!i::FLAG_ignition_elide_noneffectful_bytecodes) return;
static const uint8_t expected_bytes[] = {
// clang-format off
/* 0 10 E> */ B(StackCheck),
/* 1 55 S> */ B(Ldar), R8(20),
/* 3 */ B(Star), R8(20),
/* 5 */ B(CreateMappedArguments),
/* 6 60 S> */ B(LdaSmi), U8(127),
/* 8 70 S> */ B(Ldar), R8(20),
/* 10 75 S> */ B(Return),
// clang-format on
};
static const PositionTableEntry expected_positions[] = {{0, 10, false},
{1, 55, true},
{6, 60, false},
{8, 70, true},
{10, 75, true}};
Write(Bytecode::kStackCheck, {10, false});
Write(Bytecode::kLdaSmi, 127, {55, true}); // Should be elided.
Write(Bytecode::kLdar, Register(20).ToOperand());
Write(Bytecode::kStar, Register(20).ToOperand());
Write(Bytecode::kLdar, Register(20).ToOperand()); // Should be elided.
Write(Bytecode::kCreateMappedArguments);
Write(Bytecode::kLdaSmi, 127, {60, false}); // Not elided due to source info.
Write(Bytecode::kLdar, Register(20).ToOperand(), {70, true});
Write(Bytecode::kReturn, {75, true});
CHECK_EQ(bytecodes()->size(), arraysize(expected_bytes));
for (size_t i = 0; i < arraysize(expected_bytes); ++i) {
CHECK_EQ(static_cast<int>(bytecodes()->at(i)),
static_cast<int>(expected_bytes[i]));
}
Handle<BytecodeArray> bytecode_array = writer()->ToBytecodeArray(
isolate(), 0, 0, factory()->empty_fixed_array());
SourcePositionTableIterator source_iterator(
bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
const PositionTableEntry& expected = expected_positions[i];
CHECK_EQ(source_iterator.code_offset(), expected.code_offset);
CHECK_EQ(source_iterator.source_position().ScriptOffset(),
expected.source_position);
CHECK_EQ(source_iterator.is_statement(), expected.is_statement);
source_iterator.Advance();
}
CHECK(source_iterator.done());
}
TEST_F(BytecodeArrayWriterUnittest, DeadcodeElimination) {
static const uint8_t expected_bytes[] = {
// clang-format off
/* 0 10 E> */ B(StackCheck),
/* 1 55 S> */ B(LdaSmi), U8(127),
/* 3 */ B(Jump), U8(2),
/* 5 65 S> */ B(LdaSmi), U8(127),
/* 7 */ B(JumpIfFalse), U8(3),
/* 9 75 S> */ B(Return),
/* 10 */ B(JumpIfFalse), U8(3),
/* 12 */ B(Throw),
/* 13 */ B(JumpIfFalse), U8(3),
/* 15 */ B(ReThrow),
/* 16 */ B(Return),
// clang-format on
};
static const PositionTableEntry expected_positions[] = {
{0, 10, false}, {1, 55, true}, {5, 65, true}, {9, 75, true}};
BytecodeLabel after_jump, after_conditional_jump, after_return, after_throw,
after_rethrow;
Write(Bytecode::kStackCheck, {10, false});
Write(Bytecode::kLdaSmi, 127, {55, true});
WriteJump(Bytecode::kJump, &after_jump);
Write(Bytecode::kLdaSmi, 127); // Dead code.
WriteJump(Bytecode::kJumpIfFalse, &after_conditional_jump); // Dead code.
writer()->BindLabel(&after_jump);
writer()->BindLabel(&after_conditional_jump);
Write(Bytecode::kLdaSmi, 127, {65, true});
WriteJump(Bytecode::kJumpIfFalse, &after_return);
Write(Bytecode::kReturn, {75, true});
Write(Bytecode::kLdaSmi, 127, {100, true}); // Dead code.
writer()->BindLabel(&after_return);
WriteJump(Bytecode::kJumpIfFalse, &after_throw);
Write(Bytecode::kThrow);
Write(Bytecode::kLdaSmi, 127); // Dead code.
writer()->BindLabel(&after_throw);
WriteJump(Bytecode::kJumpIfFalse, &after_rethrow);
Write(Bytecode::kReThrow);
Write(Bytecode::kLdaSmi, 127); // Dead code.
writer()->BindLabel(&after_rethrow);
Write(Bytecode::kReturn);
CHECK_EQ(bytecodes()->size(), arraysize(expected_bytes));
for (size_t i = 0; i < arraysize(expected_bytes); ++i) {
CHECK_EQ(static_cast<int>(bytecodes()->at(i)),
static_cast<int>(expected_bytes[i]));
}
Handle<BytecodeArray> bytecode_array = writer()->ToBytecodeArray(
isolate(), 0, 0, factory()->empty_fixed_array());
SourcePositionTableIterator source_iterator(
bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
const PositionTableEntry& expected = expected_positions[i];
CHECK_EQ(source_iterator.code_offset(), expected.code_offset);
CHECK_EQ(source_iterator.source_position().ScriptOffset(),
expected.source_position);
CHECK_EQ(source_iterator.is_statement(), expected.is_statement);
source_iterator.Advance();
}
CHECK(source_iterator.done());
}
} // namespace interpreter
} // namespace internal
} // namespace v8
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/interpreter/bytecode-node.h"
#include "test/unittests/test-utils.h"
namespace v8 {
namespace internal {
namespace interpreter {
using BytecodeNodeTest = TestWithIsolateAndZone;
TEST_F(BytecodeNodeTest, Constructor1) {
BytecodeNode node(Bytecode::kLdaZero);
CHECK_EQ(node.bytecode(), Bytecode::kLdaZero);
CHECK_EQ(node.operand_count(), 0);
CHECK(!node.source_info().is_valid());
}
TEST_F(BytecodeNodeTest, Constructor2) {
uint32_t operands[] = {0x11};
BytecodeNode node(Bytecode::kJumpIfTrue, operands[0]);
CHECK_EQ(node.bytecode(), Bytecode::kJumpIfTrue);
CHECK_EQ(node.operand_count(), 1);
CHECK_EQ(node.operand(0), operands[0]);
CHECK(!node.source_info().is_valid());
}
TEST_F(BytecodeNodeTest, Constructor3) {
uint32_t operands[] = {0x11, 0x22};
BytecodeNode node(Bytecode::kLdaGlobal, operands[0], operands[1]);
CHECK_EQ(node.bytecode(), Bytecode::kLdaGlobal);
CHECK_EQ(node.operand_count(), 2);
CHECK_EQ(node.operand(0), operands[0]);
CHECK_EQ(node.operand(1), operands[1]);
CHECK(!node.source_info().is_valid());
}
TEST_F(BytecodeNodeTest, Constructor4) {
uint32_t operands[] = {0x11, 0x22, 0x33};
BytecodeNode node(Bytecode::kLdaNamedProperty, operands[0], operands[1],
operands[2]);
CHECK_EQ(node.operand_count(), 3);
CHECK_EQ(node.bytecode(), Bytecode::kLdaNamedProperty);
CHECK_EQ(node.operand(0), operands[0]);
CHECK_EQ(node.operand(1), operands[1]);
CHECK_EQ(node.operand(2), operands[2]);
CHECK(!node.source_info().is_valid());
}
TEST_F(BytecodeNodeTest, Constructor5) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
operands[3]);
CHECK_EQ(node.operand_count(), 4);
CHECK_EQ(node.bytecode(), Bytecode::kForInNext);
CHECK_EQ(node.operand(0), operands[0]);
CHECK_EQ(node.operand(1), operands[1]);
CHECK_EQ(node.operand(2), operands[2]);
CHECK_EQ(node.operand(3), operands[3]);
CHECK(!node.source_info().is_valid());
}
TEST_F(BytecodeNodeTest, Equality) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
operands[3]);
CHECK_EQ(node, node);
BytecodeNode other(Bytecode::kForInNext, operands[0], operands[1],
operands[2], operands[3]);
CHECK_EQ(node, other);
}
TEST_F(BytecodeNodeTest, EqualityWithSourceInfo) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeSourceInfo first_source_info(3, true);
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
operands[3], first_source_info);
CHECK_EQ(node, node);
BytecodeSourceInfo second_source_info(3, true);
BytecodeNode other(Bytecode::kForInNext, operands[0], operands[1],
operands[2], operands[3], second_source_info);
CHECK_EQ(node, other);
}
TEST_F(BytecodeNodeTest, NoEqualityWithDifferentSourceInfo) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeSourceInfo source_info(77, true);
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
operands[3], source_info);
BytecodeNode other(Bytecode::kForInNext, operands[0], operands[1],
operands[2], operands[3]);
CHECK_NE(node, other);
}
} // namespace interpreter
} // namespace internal
} // namespace v8
...@@ -114,7 +114,9 @@ ...@@ -114,7 +114,9 @@
'interpreter/bytecode-array-builder-unittest.cc', 'interpreter/bytecode-array-builder-unittest.cc',
'interpreter/bytecode-array-iterator-unittest.cc', 'interpreter/bytecode-array-iterator-unittest.cc',
'interpreter/bytecode-array-random-iterator-unittest.cc', 'interpreter/bytecode-array-random-iterator-unittest.cc',
'interpreter/bytecode-array-writer-unittest.cc',
'interpreter/bytecode-decoder-unittest.cc', 'interpreter/bytecode-decoder-unittest.cc',
'interpreter/bytecode-node-unittest.cc',
'interpreter/bytecode-operands-unittest.cc', 'interpreter/bytecode-operands-unittest.cc',
'interpreter/bytecode-register-allocator-unittest.cc', 'interpreter/bytecode-register-allocator-unittest.cc',
'interpreter/bytecode-register-optimizer-unittest.cc', 'interpreter/bytecode-register-optimizer-unittest.cc',
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment