Merge from experimental code generator branch to bleeding edge.


git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@1389 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent d77a0468
......@@ -39,25 +39,32 @@ SOURCES = {
'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'code-stubs.cc',
'codegen.cc', 'compilation-cache.cc', 'compiler.cc', 'contexts.cc',
'conversions.cc', 'counters.cc', 'dateparser.cc', 'debug.cc',
'disassembler.cc', 'execution.cc', 'factory.cc', 'flags.cc', 'frames.cc',
'global-handles.cc', 'handles.cc', 'hashmap.cc', 'heap.cc', 'ic.cc',
'interpreter-irregexp.cc', 'jsregexp.cc', 'log.cc', 'mark-compact.cc',
'messages.cc', 'objects.cc', 'parser.cc', 'property.cc',
'regexp-macro-assembler.cc', 'regexp-macro-assembler-irregexp.cc',
'regexp-stack.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
'disassembler.cc', 'execution.cc', 'factory.cc', 'flags.cc',
'frames.cc', 'global-handles.cc', 'handles.cc', 'hashmap.cc', 'heap.cc',
'ic.cc', 'interpreter-irregexp.cc', 'jsregexp.cc', 'jump-target.cc',
'log.cc', 'mark-compact.cc', 'messages.cc', 'objects.cc', 'parser.cc',
'property.cc', 'regexp-macro-assembler.cc',
'regexp-macro-assembler-irregexp.cc', 'regexp-stack.cc',
'register-allocator.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc',
'spaces.cc', 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc', 'v8.cc',
'v8threads.cc', 'variables.cc', 'zone.cc'
'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc',
'v8.cc', 'v8threads.cc', 'variables.cc', 'virtual-frame.cc', 'zone.cc'
],
'arch:arm': [
'assembler-arm.cc', 'builtins-arm.cc', 'codegen-arm.cc', 'cpu-arm.cc',
'disasm-arm.cc', 'debug-arm.cc', 'frames-arm.cc', 'ic-arm.cc',
'jump-target-arm.cc', 'macro-assembler-arm.cc',
'regexp-macro-assembler-arm.cc', 'register-allocator-arm.cc',
'stub-cache-arm.cc', 'virtual-frame-arm.cc'
],
'arch:ia32': [
'assembler-ia32.cc', 'builtins-ia32.cc', 'codegen-ia32.cc',
'cpu-ia32.cc', 'disasm-ia32.cc', 'debug-ia32.cc', 'frames-ia32.cc',
'ic-ia32.cc', 'jump-target-ia32.cc', 'macro-assembler-ia32.cc',
'regexp-macro-assembler-ia32.cc', 'register-allocator-ia32.cc',
'stub-cache-ia32.cc', 'virtual-frame-ia32.cc'
],
'arch:arm': ['assembler-arm.cc', 'builtins-arm.cc', 'codegen-arm.cc',
'cpu-arm.cc', 'debug-arm.cc', 'disasm-arm.cc', 'frames-arm.cc',
'ic-arm.cc', 'macro-assembler-arm.cc', 'regexp-macro-assembler-arm.cc',
'stub-cache-arm.cc'],
'arch:ia32': ['assembler-ia32.cc', 'builtins-ia32.cc', 'codegen-ia32.cc',
'cpu-ia32.cc', 'debug-ia32.cc', 'disasm-ia32.cc', 'frames-ia32.cc',
'ic-ia32.cc', 'macro-assembler-ia32.cc', 'regexp-macro-assembler-ia32.cc',
'stub-cache-ia32.cc'],
'simulator:arm': ['simulator-arm.cc'],
'os:freebsd': ['platform-freebsd.cc'],
'os:linux': ['platform-linux.cc'],
......
......@@ -83,6 +83,8 @@ struct Register {
};
const int kNumRegisters = 16;
extern Register no_reg;
extern Register r0;
extern Register r1;
......@@ -211,6 +213,12 @@ inline Condition ReverseCondition(Condition cc) {
}
// Branch hints are not used on the ARM. They are defined so that they can
// appear in shared function signatures, but will be ignored in ARM
// implementations.
enum Hint { no_hint };
// The pc store offset may be 8 or 12 depending on the processor implementation.
int PcStoreOffset();
......
......@@ -751,6 +751,18 @@ void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
}
void Assembler::xchg(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
if (src.is(eax) || dst.is(eax)) { // Single-byte encoding
EMIT(0x90 | (src.is(eax) ? dst.code() : src.code()));
} else {
EMIT(0x87);
EMIT(0xC0 | src.code() << 3 | dst.code());
}
}
void Assembler::adc(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
......@@ -1847,6 +1859,16 @@ void Assembler::sahf() {
}
void Assembler::setcc(Condition cc, Register reg) {
ASSERT(reg.is_byte_register());
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x0F);
EMIT(0x90 | cc);
EMIT(0xC0 | reg.code());
}
void Assembler::cvttss2si(Register dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
EnsureSpace ensure_space(this);
......
......@@ -63,6 +63,8 @@ namespace v8 { namespace internal {
struct Register {
bool is_valid() const { return 0 <= code_ && code_ < 8; }
bool is(Register reg) const { return code_ == reg.code_; }
// eax, ebx, ecx and edx are byte registers, the rest are not.
bool is_byte_register() const { return code_ <= 3; }
int code() const {
ASSERT(is_valid());
return code_;
......@@ -76,6 +78,8 @@ struct Register {
int code_;
};
const int kNumRegisters = 8;
extern Register eax;
extern Register ecx;
extern Register edx;
......@@ -174,6 +178,15 @@ enum Hint {
};
// The result of negating a hint is as if the corresponding condition
// were negated by NegateCondition. That is, no_hint is mapped to
// itself and not_taken and taken are mapped to each other.
inline Hint NegateHint(Hint hint) {
return (hint == no_hint)
? no_hint
: ((hint == not_taken) ? taken : not_taken);
}
// -----------------------------------------------------------------------------
// Machine instruction Immediates
......@@ -494,6 +507,9 @@ class Assembler : public Malloced {
void cmov(Condition cc, Register dst, Handle<Object> handle);
void cmov(Condition cc, Register dst, const Operand& src);
// Exchange two registers
void xchg(Register dst, Register src);
// Arithmetics
void adc(Register dst, int32_t imm32);
void adc(Register dst, const Operand& src);
......@@ -674,6 +690,7 @@ class Assembler : public Malloced {
void frndint();
void sahf();
void setcc(Condition cc, Register reg);
void cpuid();
......
......@@ -48,7 +48,7 @@ namespace v8 { namespace internal {
// unknown pc location. Assembler::bind() is used to bind a label to the
// current pc. A label can be bound only once.
class Label : public ZoneObject { // LabelShadows are dynamically allocated.
class Label BASE_EMBEDDED {
public:
INLINE(Label()) { Unuse(); }
INLINE(~Label()) { ASSERT(!is_linked()); }
......@@ -84,58 +84,11 @@ class Label : public ZoneObject { // LabelShadows are dynamically allocated.
friend class Assembler;
friend class RegexpAssembler;
friend class Displacement;
friend class LabelShadow;
friend class ShadowTarget;
friend class RegExpMacroAssemblerIrregexp;
};
// A LabelShadow represents a label that is temporarily shadowed by another
// label (represented by the original label during shadowing). They are used
// to catch jumps to labels in certain contexts, e.g. try blocks. After
// shadowing ends, the formerly shadowed label is again represented by the
// original label and the LabelShadow can be used as a label in its own
// right, representing the formerly shadowing label.
class LabelShadow : public Label {
public:
explicit LabelShadow(Label* original) {
ASSERT(original != NULL);
original_label_ = original;
original_pos_ = original->pos_;
original->Unuse();
#ifdef DEBUG
is_shadowing_ = true;
#endif
}
~LabelShadow() {
ASSERT(!is_shadowing_);
}
void StopShadowing() {
ASSERT(is_shadowing_ && is_unused());
pos_ = original_label_->pos_;
original_label_->pos_ = original_pos_;
#ifdef DEBUG
is_shadowing_ = false;
#endif
}
Label* original_label() const { return original_label_; }
private:
// During shadowing, the currently shadowing label. After shadowing, the
// label that was shadowed.
Label* original_label_;
// During shadowing, the saved state of the original label.
int original_pos_;
#ifdef DEBUG
bool is_shadowing_;
#endif
};
// -----------------------------------------------------------------------------
// Relocation information
......
......@@ -148,13 +148,13 @@ ObjectLiteral::Property::Property(bool is_getter, FunctionLiteral* value) {
}
void LabelCollector::AddLabel(Label* label) {
void TargetCollector::AddTarget(JumpTarget* target) {
// Add the label to the collector, but discard duplicates.
int length = labels_->length();
int length = targets_->length();
for (int i = 0; i < length; i++) {
if (labels_->at(i) == label) return;
if (targets_->at(i) == target) return;
}
labels_->Add(label);
targets_->Add(target);
}
......
......@@ -35,6 +35,7 @@
#include "variables.h"
#include "macro-assembler.h"
#include "jsregexp.h"
#include "jump-target.h"
namespace v8 { namespace internal {
......@@ -92,6 +93,9 @@ namespace v8 { namespace internal {
V(ThisFunction)
// Forward declarations
class TargetCollector;
#define DEF_FORWARD_DECLARATION(type) class type;
NODE_LIST(DEF_FORWARD_DECLARATION)
#undef DEF_FORWARD_DECLARATION
......@@ -118,7 +122,7 @@ class Node: public ZoneObject {
virtual VariableProxy* AsVariableProxy() { return NULL; }
virtual Property* AsProperty() { return NULL; }
virtual Call* AsCall() { return NULL; }
virtual LabelCollector* AsLabelCollector() { return NULL; }
virtual TargetCollector* AsTargetCollector() { return NULL; }
virtual BreakableStatement* AsBreakableStatement() { return NULL; }
virtual IterationStatement* AsIterationStatement() { return NULL; }
virtual UnaryOperation* AsUnaryOperation() { return NULL; }
......@@ -192,7 +196,7 @@ class BreakableStatement: public Statement {
virtual BreakableStatement* AsBreakableStatement() { return this; }
// Code generation
Label* break_target() { return &break_target_; }
JumpTarget* break_target() { return &break_target_; }
// Used during code generation for restoring the stack when a
// break/continue crosses a statement that keeps stuff on the stack.
......@@ -211,7 +215,7 @@ class BreakableStatement: public Statement {
private:
ZoneStringList* labels_;
Type type_;
Label break_target_;
JumpTarget break_target_;
int break_stack_height_;
};
......@@ -268,7 +272,7 @@ class IterationStatement: public BreakableStatement {
Statement* body() const { return body_; }
// Code generation
Label* continue_target() { return &continue_target_; }
JumpTarget* continue_target() { return &continue_target_; }
protected:
explicit IterationStatement(ZoneStringList* labels)
......@@ -280,7 +284,7 @@ class IterationStatement: public BreakableStatement {
private:
Statement* body_;
Label continue_target_;
JumpTarget continue_target_;
};
......@@ -503,43 +507,45 @@ class IfStatement: public Statement {
};
// NOTE: LabelCollectors are represented as nodes to fit in the target
// NOTE: TargetCollectors are represented as nodes to fit in the target
// stack in the compiler; this should probably be reworked.
class LabelCollector: public Node {
class TargetCollector: public Node {
public:
explicit LabelCollector(ZoneList<Label*>* labels) : labels_(labels) { }
explicit TargetCollector(ZoneList<JumpTarget*>* targets)
: targets_(targets) {
}
// Adds a label to the collector. The collector stores a pointer not
// a copy of the label to make binding work, so make sure not to
// pass in references to something on the stack.
void AddLabel(Label* label);
// Adds a jump target to the collector. The collector stores a pointer not
// a copy of the target to make binding work, so make sure not to pass in
// references to something on the stack.
void AddTarget(JumpTarget* target);
// Virtual behaviour. LabelCollectors are never part of the AST.
// Virtual behaviour. TargetCollectors are never part of the AST.
virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
virtual LabelCollector* AsLabelCollector() { return this; }
virtual TargetCollector* AsTargetCollector() { return this; }
ZoneList<Label*>* labels() { return labels_; }
ZoneList<JumpTarget*>* targets() { return targets_; }
private:
ZoneList<Label*>* labels_;
ZoneList<JumpTarget*>* targets_;
};
class TryStatement: public Statement {
public:
explicit TryStatement(Block* try_block)
: try_block_(try_block), escaping_labels_(NULL) { }
: try_block_(try_block), escaping_targets_(NULL) { }
void set_escaping_labels(ZoneList<Label*>* labels) {
escaping_labels_ = labels;
void set_escaping_targets(ZoneList<JumpTarget*>* targets) {
escaping_targets_ = targets;
}
Block* try_block() const { return try_block_; }
ZoneList<Label*>* escaping_labels() const { return escaping_labels_; }
ZoneList<JumpTarget*>* escaping_targets() const { return escaping_targets_; }
private:
Block* try_block_;
ZoneList<Label*>* escaping_labels_;
ZoneList<JumpTarget*>* escaping_targets_;
};
......
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -38,8 +38,10 @@
namespace v8 { namespace internal {
DeferredCode::DeferredCode(CodeGenerator* generator)
: masm_(generator->masm()),
generator_(generator),
: generator_(generator),
masm_(generator->masm()),
enter_(generator),
exit_(generator, JumpTarget::BIDIRECTIONAL),
statement_position_(masm_->current_statement_position()),
position_(masm_->current_position()) {
generator->AddDeferred(this);
......@@ -60,13 +62,39 @@ void CodeGenerator::ProcessDeferred() {
if (code->position() != RelocInfo::kNoPosition) {
masm->RecordPosition(code->position());
}
// Bind labels and generate the code.
masm->bind(code->enter());
// Generate the code.
Comment cmnt(masm, code->comment());
code->Generate();
if (code->exit()->is_bound()) {
masm->jmp(code->exit()); // platform independent?
}
ASSERT(code->enter()->is_bound());
}
}
void CodeGenerator::SetFrame(VirtualFrame* new_frame,
RegisterFile* non_frame_registers) {
RegisterFile saved_counts;
if (has_valid_frame()) {
frame_->DetachFromCodeGenerator();
// The remaining register reference counts are the non-frame ones.
allocator_->SaveTo(&saved_counts);
}
if (new_frame != NULL) {
// Restore the non-frame register references that go with the new frame.
allocator_->RestoreFrom(non_frame_registers);
new_frame->AttachToCodeGenerator();
}
frame_ = new_frame;
saved_counts.CopyTo(non_frame_registers);
}
void CodeGenerator::DeleteFrame() {
if (has_valid_frame()) {
frame_->DetachFromCodeGenerator();
delete frame_;
frame_ = NULL;
}
}
......@@ -122,9 +150,6 @@ Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* flit,
return Handle<Code>::null();
}
// Process any deferred code.
cgen.ProcessDeferred();
// Allocate and install the code.
CodeDesc desc;
cgen.masm()->GetCode(&desc);
......@@ -386,14 +411,14 @@ void CodeGenerator::GenerateFastCaseSwitchStatement(SwitchStatement* node,
ZoneList<CaseClause*>* cases = node->cases();
int length = cases->length();
// Label pointer per number in range
// Label pointer per number in range.
SmartPointer<Label*> case_targets(NewArray<Label*>(range));
// Label per switch case
// Label per switch case.
SmartPointer<Label> case_labels(NewArray<Label>(length));
Label* fail_label = default_index >= 0 ? &(case_labels[default_index])
: node->break_target();
Label* fail_label =
default_index >= 0 ? &(case_labels[default_index]) : NULL;
// Populate array of label pointers for each number in the range.
// Initally put the failure label everywhere.
......@@ -404,7 +429,7 @@ void CodeGenerator::GenerateFastCaseSwitchStatement(SwitchStatement* node,
// Overwrite with label of a case for the number value of that case.
// (In reverse order, so that if the same label occurs twice, the
// first one wins).
for (int i = length-1; i >= 0 ; i--) {
for (int i = length - 1; i >= 0 ; i--) {
CaseClause* clause = cases->at(i);
if (!clause->is_default()) {
Object* label_value = *(clause->label()->AsLiteral()->handle());
......@@ -424,21 +449,36 @@ void CodeGenerator::GenerateFastCaseSwitchStatement(SwitchStatement* node,
void CodeGenerator::GenerateFastCaseSwitchCases(
SwitchStatement* node,
Vector<Label> case_labels) {
Vector<Label> case_labels,
VirtualFrame* start_frame) {
ZoneList<CaseClause*>* cases = node->cases();
int length = cases->length();
for (int i = 0; i < length; i++) {
Comment cmnt(masm(), "[ Case clause");
masm()->bind(&(case_labels[i]));
// We may not have a virtual frame if control flow did not fall
// off the end of the previous case. In that case, use the start
// frame. Otherwise, we have to merge the existing one to the
// start frame as part of the previous case.
if (!has_valid_frame()) {
RegisterFile non_frame_registers = RegisterAllocator::Reserved();
SetFrame(new VirtualFrame(start_frame), &non_frame_registers);
} else {
frame_->MergeTo(start_frame);
}
masm()->bind(&case_labels[i]);
VisitStatements(cases->at(i)->statements());
}
masm()->bind(node->break_target());
}
bool CodeGenerator::TryGenerateFastCaseSwitchStatement(SwitchStatement* node) {
// TODO(238): Due to issue 238, fast case switches can crash on ARM
// and possibly IA32. They are disabled for now.
// See http://code.google.com/p/v8/issues/detail?id=238
return false;
ZoneList<CaseClause*>* cases = node->cases();
int length = cases->length();
......@@ -454,9 +494,10 @@ bool CodeGenerator::TryGenerateFastCaseSwitchStatement(SwitchStatement* node) {
CaseClause* clause = cases->at(i);
if (clause->is_default()) {
if (default_index >= 0) {
return false; // More than one default label:
// Defer to normal case for error.
}
// There is more than one default label. Defer to the normal case
// for error.
return false;
}
default_index = i;
} else {
Expression* label = clause->label();
......@@ -468,9 +509,9 @@ bool CodeGenerator::TryGenerateFastCaseSwitchStatement(SwitchStatement* node) {
if (!value->IsSmi()) {
return false;
}
int smi = Smi::cast(value)->value();
if (smi < min_index) { min_index = smi; }
if (smi > max_index) { max_index = smi; }
int int_value = Smi::cast(value)->value();
min_index = Min(int_value, min_index);
max_index = Max(int_value, max_index);
}
}
......@@ -486,7 +527,18 @@ bool CodeGenerator::TryGenerateFastCaseSwitchStatement(SwitchStatement* node) {
}
void CodeGenerator::CodeForStatement(Node* node) {
void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
if (FLAG_debug_info) {
int pos = fun->start_position();
if (pos != RelocInfo::kNoPosition) {
masm()->RecordStatementPosition(pos);
masm()->RecordPosition(pos);
}
}
}
void CodeGenerator::CodeForStatementPosition(Node* node) {
if (FLAG_debug_info) {
int pos = node->statement_pos();
if (pos != RelocInfo::kNoPosition) {
......
......@@ -37,8 +37,15 @@
// of Visitor and that the following methods are available publicly:
// CodeGenerator::MakeCode
// CodeGenerator::SetFunctionInfo
// CodeGenerator::AddDeferred
// CodeGenerator::masm
// CodeGenerator::frame
// CodeGenerator::has_valid_frame
// CodeGenerator::SetFrame
// CodeGenerator::DeleteFrame
// CodeGenerator::allocator
// CodeGenerator::AddDeferred
// CodeGenerator::in_spilled_code
// CodeGenerator::set_in_spilled_code
//
// These methods are either used privately by the shared code or implemented as
// shared code:
......@@ -88,8 +95,12 @@ class DeferredCode: public ZoneObject {
MacroAssembler* masm() const { return masm_; }
CodeGenerator* generator() const { return generator_; }
Label* enter() { return &enter_; }
Label* exit() { return &exit_; }
JumpTarget* enter() { return &enter_; }
void BindExit() { exit_.Bind(0); }
void BindExit(Result* result) { exit_.Bind(result, 1); }
void BindExit(Result* result0, Result* result1, Result* result2) {
exit_.Bind(result0, result1, result2, 3);
}
int statement_position() const { return statement_position_; }
int position() const { return position_; }
......@@ -103,15 +114,12 @@ class DeferredCode: public ZoneObject {
#endif
protected:
// The masm_ field is manipulated when compiling stubs with the
// BEGIN_STUB and END_STUB macros. For that reason, it cannot be
// constant.
MacroAssembler* masm_;
CodeGenerator* const generator_;
MacroAssembler* const masm_;
JumpTarget enter_;
JumpTarget exit_;
private:
CodeGenerator* const generator_;
Label enter_;
Label exit_;
int statement_position_;
int position_;
#ifdef DEBUG
......
......@@ -829,6 +829,12 @@ const char* NameConverter::NameOfCPURegister(int reg) const {
}
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // ARM does not have the concept of a byte register
return "nobytereg";
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
UNREACHABLE(); // ARM does not have any XMM registers
return "noxmmreg";
......
......@@ -65,6 +65,7 @@ static ByteMnemonic two_operands_instr[] = {
{0x85, "test", REG_OPER_OP_ORDER},
{0x31, "xor", OPER_REG_OP_ORDER},
{0x33, "xor", REG_OPER_OP_ORDER},
{0x87, "xchg", REG_OPER_OP_ORDER},
{0x8A, "mov_b", REG_OPER_OP_ORDER},
{0x8B, "mov", REG_OPER_OP_ORDER},
{-1, "", UNSET_OP_ORDER}
......@@ -115,6 +116,14 @@ static const char* jump_conditional_mnem[] = {
};
static const char* set_conditional_mnem[] = {
/*0*/ "seto", "setno", "setc", "setnc",
/*4*/ "setz", "setnz", "setna", "seta",
/*8*/ "sets", "setns", "setpe", "setpo",
/*12*/ "setl", "setnl", "setng", "setg"
};
enum InstructionType {
NO_INSTR,
ZERO_OPERANDS_INSTR,
......@@ -177,6 +186,7 @@ void InstructionTable::Init() {
SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
SetTableRange(REGISTER_INSTR, 0x50, 0x57, "push");
SetTableRange(REGISTER_INSTR, 0x58, 0x5F, "pop");
SetTableRange(REGISTER_INSTR, 0x91, 0x97, "xchg eax,"); // 0x90 is nop.
SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, "mov");
}
......@@ -259,6 +269,11 @@ class DisassemblerIA32 {
}
const char* NameOfByteCPURegister(int reg) const {
return converter_.NameOfByteCPURegister(reg);
}
const char* NameOfXMMRegister(int reg) const {
return converter_.NameOfXMMRegister(reg);
}
......@@ -283,8 +298,11 @@ class DisassemblerIA32 {
*base = data & 7;
}
typedef const char* (DisassemblerIA32::*RegisterNameMapping)(int reg) const;
int PrintRightOperandHelper(byte* modrmp, RegisterNameMapping register_name);
int PrintRightOperand(byte* modrmp);
int PrintRightByteOperand(byte* modrmp);
int PrintOperands(const char* mnem, OperandOrder op_order, byte* data);
int PrintImmediateOp(byte* data);
int F7Instruction(byte* data);
......@@ -292,6 +310,7 @@ class DisassemblerIA32 {
int JumpShort(byte* data);
int JumpConditional(byte* data, const char* comment);
int JumpConditionalShort(byte* data, const char* comment);
int SetCC(byte* data);
int FPUInstruction(byte* data);
void AppendToBuffer(const char* format, ...);
......@@ -315,10 +334,9 @@ void DisassemblerIA32::AppendToBuffer(const char* format, ...) {
tmp_buffer_pos_ += result;
}
// Returns number of bytes used including the current *modrmp.
// Writes instruction's right operand to 'tmp_buffer_'.
int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
int DisassemblerIA32::PrintRightOperandHelper(
byte* modrmp,
RegisterNameMapping register_name) {
int mod, regop, rm;
get_modrm(*modrmp, &mod, &regop, &rm);
switch (mod) {
......@@ -332,20 +350,20 @@ int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
int scale, index, base;
get_sib(sib, &scale, &index, &base);
if (index == esp && base == esp && scale == 0 /*times_1*/) {
AppendToBuffer("[%s]", NameOfCPURegister(rm));
AppendToBuffer("[%s]", (this->*register_name)(rm));
return 2;
} else if (base == ebp) {
int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
AppendToBuffer("[%s*%d+0x%x]",
NameOfCPURegister(index),
(this->*register_name)(index),
1 << scale,
disp);
return 6;
} else if (index != esp && base != ebp) {
// [base+index*scale]
AppendToBuffer("[%s+%s*%d]",
NameOfCPURegister(base),
NameOfCPURegister(index),
(this->*register_name)(base),
(this->*register_name)(index),
1 << scale);
return 2;
} else {
......@@ -353,7 +371,7 @@ int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
return 1;
}
} else {
AppendToBuffer("[%s]", NameOfCPURegister(rm));
AppendToBuffer("[%s]", (this->*register_name)(rm));
return 1;
}
break;
......@@ -366,11 +384,11 @@ int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
int disp =
mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2) : *(modrmp + 2);
if (index == base && index == rm /*esp*/ && scale == 0 /*times_1*/) {
AppendToBuffer("[%s+0x%x]", NameOfCPURegister(rm), disp);
AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
} else {
AppendToBuffer("[%s+%s*%d+0x%x]",
NameOfCPURegister(base),
NameOfCPURegister(index),
(this->*register_name)(base),
(this->*register_name)(index),
1 << scale,
disp);
}
......@@ -379,12 +397,12 @@ int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
// No sib.
int disp =
mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1) : *(modrmp + 1);
AppendToBuffer("[%s+0x%x]", NameOfCPURegister(rm), disp);
AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
return mod == 2 ? 5 : 2;
}
break;
case 3:
AppendToBuffer("%s", NameOfCPURegister(rm));
AppendToBuffer("%s", (this->*register_name)(rm));
return 1;
default:
UnimplementedInstruction();
......@@ -394,6 +412,17 @@ int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
}
int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
return PrintRightOperandHelper(modrmp, &DisassemblerIA32::NameOfCPURegister);
}
int DisassemblerIA32::PrintRightByteOperand(byte* modrmp) {
return PrintRightOperandHelper(modrmp,
&DisassemblerIA32::NameOfByteCPURegister);
}
// Returns number of bytes used including the current *data.
// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
int DisassemblerIA32::PrintOperands(const char* mnem,
......@@ -574,6 +603,17 @@ int DisassemblerIA32::JumpConditionalShort(byte* data, const char* comment) {
}
// Returns number of bytes used, including *data.
int DisassemblerIA32::SetCC(byte* data) {
assert(*data == 0x0F);
byte cond = *(data+1) & 0x0F;
const char* mnem = set_conditional_mnem[cond];
AppendToBuffer("%s ", mnem);
PrintRightByteOperand(data+2);
return 3; // includes 0x0F
}
// Returns number of bytes used, including *data.
int DisassemblerIA32::FPUInstruction(byte* data) {
byte b1 = *data;
......@@ -819,6 +859,8 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
f0byte == 0xB7 || f0byte == 0xAF) {
data += 2;
data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
} else if ((f0byte & 0xF0) == 0x90) {
data += SetCC(data);
} else {
data += 2;
if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
......@@ -1054,12 +1096,17 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
static const char* cpu_regs[8] = {
"eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi",
"eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
};
static const char* byte_cpu_regs[8] = {
"al", "cl", "dl", "bl", "ah", "ch", "dh", "bh"
};
static const char* xmm_regs[8] = {
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
};
......@@ -1081,6 +1128,12 @@ const char* NameConverter::NameOfCPURegister(int reg) const {
}
const char* NameConverter::NameOfByteCPURegister(int reg) const {
if (0 <= reg && reg < 8) return byte_cpu_regs[reg];
return "noreg";
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
if (0 <= reg && reg < 8) return xmm_regs[reg];
return "noxmmreg";
......
......@@ -39,6 +39,7 @@ class NameConverter {
public:
virtual ~NameConverter() {}
virtual const char* NameOfCPURegister(int reg) const;
virtual const char* NameOfByteCPURegister(int reg) const;
virtual const char* NameOfXMMRegister(int reg) const;
virtual const char* NameOfAddress(byte* addr) const;
virtual const char* NameOfConstant(byte* addr) const;
......
......@@ -99,7 +99,7 @@ static void DumpBuffer(FILE* f, char* buff) {
}
}
static const int kOutBufferSize = 256 + String::kMaxShortPrintLength;
static const int kOutBufferSize = 1024 + String::kMaxShortPrintLength;
static const int kRelocInfoPosition = 57;
static int DecodeIt(FILE* f,
......
......@@ -170,7 +170,6 @@ class IterationStatement;
class JSArray;
class JSFunction;
class JSObject;
class LabelCollector;
class LargeObjectSpace;
template <typename T, class P = FreeStoreAllocationPolicy> class List;
class LookupResult;
......
......@@ -739,15 +739,15 @@ void KeyedLoadIC::PatchInlinedMapCheck(Address address, Object* value) {
// The keyed load has a fast inlined case if the IC call instruction
// is immediately followed by a test instruction.
if (*test_instruction_address == kTestEaxByte) {
// Fetch the offset from the call instruction to the map cmp
// Fetch the offset from the test instruction to the map cmp
// instruction. This offset is stored in the last 4 bytes of the
// 5 byte test instruction.
Address offset_address = test_instruction_address + 1;
int offset_value = *(reinterpret_cast<int*>(offset_address));
// Compute the map address. The operand-immediate compare
// instruction is two bytes larger than a call instruction so we
// add 2 to get to the map address.
Address map_address = address + offset_value + 2;
// Compute the map address. The map address is in the last 4
// bytes of the 7-byte operand-immediate compare instruction, so
// we add 3 to the offset to get the map address.
Address map_address = test_instruction_address + offset_value + 3;
// patch the map check.
(*(reinterpret_cast<Object**>(map_address))) = value;
}
......
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen.h"
#include "jump-target.h"
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// JumpTarget implementation.
#define __ masm_->
void JumpTarget::Jump() {
ASSERT(cgen_ != NULL);
ASSERT(cgen_->has_valid_frame());
// Live non-frame registers are not allowed at unconditional jumps
// because we have no way of invalidating the corresponding results
// which are still live in the C++ code.
ASSERT(cgen_->HasValidEntryRegisters());
if (is_bound()) {
// Backward jump. There is an expected frame to merge to.
ASSERT(direction_ == BIDIRECTIONAL);
cgen_->frame()->MergeTo(entry_frame_);
cgen_->DeleteFrame();
__ jmp(&entry_label_);
} else {
// Forward jump. The current frame is added to the end of the list
// of frames reaching the target block and a jump to the merge code
// is emitted.
AddReachingFrame(cgen_->frame());
RegisterFile empty;
cgen_->SetFrame(NULL, &empty);
__ jmp(&merge_labels_.last());
}
is_linked_ = !is_bound_;
}
void JumpTarget::Branch(Condition cc, Hint ignored) {
ASSERT(cgen_ != NULL);
ASSERT(cgen_->has_valid_frame());
if (is_bound()) {
// Backward branch. We have an expected frame to merge to on the
// backward edge. We negate the condition and emit the merge code
// here.
//
// TODO(210): we should try to avoid negating the condition in the
// case where there is no merge code to emit. Otherwise, we emit
// a branch around an unconditional jump.
ASSERT(direction_ == BIDIRECTIONAL);
Label original_fall_through;
__ b(NegateCondition(cc), &original_fall_through);
// Swap the current frame for a copy of it, saving non-frame
// register reference counts and invalidating all non-frame register
// references except the reserved ones on the backward edge.
VirtualFrame* original_frame = cgen_->frame();
VirtualFrame* working_frame = new VirtualFrame(original_frame);
RegisterFile non_frame_registers = RegisterAllocator::Reserved();
cgen_->SetFrame(working_frame, &non_frame_registers);
working_frame->MergeTo(entry_frame_);
cgen_->DeleteFrame();
__ jmp(&entry_label_);
// Restore the frame and its associated non-frame registers.
cgen_->SetFrame(original_frame, &non_frame_registers);
__ bind(&original_fall_through);
} else {
// Forward branch. A copy of the current frame is added to the end
// of the list of frames reaching the target block and a branch to
// the merge code is emitted.
AddReachingFrame(new VirtualFrame(cgen_->frame()));
__ b(cc, &merge_labels_.last());
}
is_linked_ = !is_bound_;
}
void JumpTarget::Call() {
// Call is used to push the address of the catch block on the stack as
// a return address when compiling try/catch and try/finally. We
// fully spill the frame before making the call. The expected frame
// at the label (which should be the only one) is the spilled current
// frame plus an in-memory return address. The "fall-through" frame
// at the return site is the spilled current frame.
ASSERT(cgen_ != NULL);
ASSERT(cgen_->has_valid_frame());
// There are no non-frame references across the call.
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_linked());
cgen_->frame()->SpillAll();
VirtualFrame* target_frame = new VirtualFrame(cgen_->frame());
target_frame->Adjust(1);
AddReachingFrame(target_frame);
__ bl(&merge_labels_.last());
is_linked_ = !is_bound_;
}
void JumpTarget::Bind(int mergable_elements) {
ASSERT(cgen_ != NULL);
ASSERT(!is_bound());
// Live non-frame registers are not allowed at the start of a basic
// block.
ASSERT(!cgen_->has_valid_frame() || cgen_->HasValidEntryRegisters());
// Compute the frame to use for entry to the block.
ComputeEntryFrame(mergable_elements);
if (is_linked()) {
// There were forward jumps. Handle merging the reaching frames
// and possible fall through to the entry frame.
// Some moves required to merge to an expected frame require
// purely frame state changes, and do not require any code
// generation. Perform those first to increase the possibility of
// finding equal frames below.
if (cgen_->has_valid_frame()) {
cgen_->frame()->PrepareMergeTo(entry_frame_);
}
for (int i = 0; i < reaching_frames_.length(); i++) {
reaching_frames_[i]->PrepareMergeTo(entry_frame_);
}
// If there is a fall through to the jump target and it needs
// merge code, process it first.
if (cgen_->has_valid_frame() && !cgen_->frame()->Equals(entry_frame_)) {
// Loop over all the reaching frames, looking for any that can
// share merge code with this one.
for (int i = 0; i < reaching_frames_.length(); i++) {
if (cgen_->frame()->Equals(reaching_frames_[i])) {
// Set the reaching frames element to null to avoid
// processing it later, and then bind its entry label.
delete reaching_frames_[i];
reaching_frames_[i] = NULL;
__ bind(&merge_labels_[i]);
}
}
// Emit the merge code.
cgen_->frame()->MergeTo(entry_frame_);
}
// Loop over the (non-null) reaching frames and process any that
// need merge code.
for (int i = 0; i < reaching_frames_.length(); i++) {
VirtualFrame* frame = reaching_frames_[i];
if (frame != NULL && !frame->Equals(entry_frame_)) {
// Set the reaching frames element to null to avoid processing
// it later. Do not delete it as it is needed for merging.
reaching_frames_[i] = NULL;
// If the code generator has a current frame (a fall-through
// or a previously merged frame), insert a jump around the
// merge code we are about to generate.
if (cgen_->has_valid_frame()) {
cgen_->DeleteFrame();
__ jmp(&entry_label_);
}
// Set the frame to merge as the code generator's current
// frame and bind its merge label.
RegisterFile reserved_registers = RegisterAllocator::Reserved();
cgen_->SetFrame(frame, &reserved_registers);
__ bind(&merge_labels_[i]);
// Loop over the remaining (non-null) reaching frames, looking
// for any that can share merge code with this one.
for (int j = i + 1; j < reaching_frames_.length(); j++) {
VirtualFrame* other = reaching_frames_[j];
if (other != NULL && frame->Equals(other)) {
delete other;
reaching_frames_[j] = NULL;
__ bind(&merge_labels_[j]);
}
}
// Emit the merge code.
cgen_->frame()->MergeTo(entry_frame_);
}
}
// The code generator may not have a current frame if there was no
// fall through and none of the reaching frames needed merging.
// In that case, clone the entry frame as the current frame.
if (!cgen_->has_valid_frame()) {
RegisterFile reserved_registers = RegisterAllocator::Reserved();
cgen_->SetFrame(new VirtualFrame(entry_frame_), &reserved_registers);
}
// There is certainly a current frame equal to the entry frame.
// Bind the entry frame label.
__ bind(&entry_label_);
// There may be unprocessed reaching frames that did not need
// merge code. Bind their merge labels to be the same as the
// entry label.
for (int i = 0; i < reaching_frames_.length(); i++) {
if (reaching_frames_[i] != NULL) {
delete reaching_frames_[i];
__ bind(&merge_labels_[i]);
}
}
// All the reaching frames except the one that is the current
// frame (if it is one of the reaching frames) have been deleted.
reaching_frames_.Clear();
merge_labels_.Clear();
} else {
// There were no forward jumps. The current frame is merged to
// the entry frame.
cgen_->frame()->MergeTo(entry_frame_);
__ bind(&entry_label_);
}
is_linked_ = false;
is_bound_ = true;
}
#undef __
} } // namespace v8::internal
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen.h"
#include "jump-target.h"
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// JumpTarget implementation.
#define __ masm_->
void JumpTarget::Jump() {
ASSERT(cgen_ != NULL);
ASSERT(cgen_->has_valid_frame());
// Live non-frame registers are not allowed at unconditional jumps
// because we have no way of invalidating the corresponding results
// which are still live in the C++ code.
ASSERT(cgen_->HasValidEntryRegisters());
if (is_bound()) {
// Backward jump. There is an expected frame to merge to.
ASSERT(direction_ == BIDIRECTIONAL);
cgen_->frame()->MergeTo(entry_frame_);
cgen_->DeleteFrame();
__ jmp(&entry_label_);
} else {
// Forward jump. The current frame is added to the end of the list
// of frames reaching the target block and a jump to the merge code
// is emitted.
AddReachingFrame(cgen_->frame());
RegisterFile empty;
cgen_->SetFrame(NULL, &empty);
__ jmp(&merge_labels_.last());
}
is_linked_ = !is_bound_;
}
void JumpTarget::Branch(Condition cc, Hint hint) {
ASSERT(cgen_ != NULL);
ASSERT(cgen_->has_valid_frame());
if (is_bound()) {
// Backward branch. We have an expected frame to merge to on the
// backward edge. We negate the condition and emit the merge code
// here.
//
// TODO(210): we should try to avoid negating the condition in the
// case where there is no merge code to emit. Otherwise, we emit
// a branch around an unconditional jump.
ASSERT(direction_ == BIDIRECTIONAL);
Label original_fall_through;
__ j(NegateCondition(cc), &original_fall_through, NegateHint(hint));
// Swap the current frame for a copy of it, saving non-frame
// register reference counts and invalidating all non-frame register
// references except the reserved ones on the backward edge.
VirtualFrame* original_frame = cgen_->frame();
VirtualFrame* working_frame = new VirtualFrame(original_frame);
RegisterFile non_frame_registers = RegisterAllocator::Reserved();
cgen_->SetFrame(working_frame, &non_frame_registers);
working_frame->MergeTo(entry_frame_);
cgen_->DeleteFrame();
__ jmp(&entry_label_);
// Restore the frame and its associated non-frame registers.
cgen_->SetFrame(original_frame, &non_frame_registers);
__ bind(&original_fall_through);
} else {
// Forward branch. A copy of the current frame is added to the end
// of the list of frames reaching the target block and a branch to
// the merge code is emitted.
AddReachingFrame(new VirtualFrame(cgen_->frame()));
__ j(cc, &merge_labels_.last(), hint);
}
is_linked_ = !is_bound_;
}
void JumpTarget::Call() {
// Call is used to push the address of the catch block on the stack as
// a return address when compiling try/catch and try/finally. We
// fully spill the frame before making the call. The expected frame
// at the label (which should be the only one) is the spilled current
// frame plus an in-memory return address. The "fall-through" frame
// at the return site is the spilled current frame.
ASSERT(cgen_ != NULL);
ASSERT(cgen_->has_valid_frame());
// There are no non-frame references across the call.
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_linked());
cgen_->frame()->SpillAll();
VirtualFrame* target_frame = new VirtualFrame(cgen_->frame());
target_frame->Adjust(1);
AddReachingFrame(target_frame);
__ call(&merge_labels_.last());
is_linked_ = !is_bound_;
}
void JumpTarget::Bind(int mergable_elements) {
ASSERT(cgen_ != NULL);
ASSERT(!is_bound());
// Live non-frame registers are not allowed at the start of a basic
// block.
ASSERT(!cgen_->has_valid_frame() || cgen_->HasValidEntryRegisters());
// Compute the frame to use for entry to the block.
ComputeEntryFrame(mergable_elements);
if (is_linked()) {
// There were forward jumps. Handle merging the reaching frames
// and possible fall through to the entry frame.
// Some moves required to merge to an expected frame require
// purely frame state changes, and do not require any code
// generation. Perform those first to increase the possibility of
// finding equal frames below.
if (cgen_->has_valid_frame()) {
cgen_->frame()->PrepareMergeTo(entry_frame_);
}
for (int i = 0; i < reaching_frames_.length(); i++) {
reaching_frames_[i]->PrepareMergeTo(entry_frame_);
}
// If there is a fall through to the jump target and it needs
// merge code, process it first.
if (cgen_->has_valid_frame() && !cgen_->frame()->Equals(entry_frame_)) {
// Loop over all the reaching frames, looking for any that can
// share merge code with this one.
for (int i = 0; i < reaching_frames_.length(); i++) {
if (cgen_->frame()->Equals(reaching_frames_[i])) {
// Set the reaching frames element to null to avoid
// processing it later, and then bind its entry label.
delete reaching_frames_[i];
reaching_frames_[i] = NULL;
__ bind(&merge_labels_[i]);
}
}
// Emit the merge code.
cgen_->frame()->MergeTo(entry_frame_);
}
// Loop over the (non-null) reaching frames and process any that
// need merge code.
for (int i = 0; i < reaching_frames_.length(); i++) {
VirtualFrame* frame = reaching_frames_[i];
if (frame != NULL && !frame->Equals(entry_frame_)) {
// Set the reaching frames element to null to avoid processing
// it later. Do not delete it as it is needed for merging.
reaching_frames_[i] = NULL;
// If the code generator has a current frame (a fall-through
// or a previously merged frame), insert a jump around the
// merge code we are about to generate.
if (cgen_->has_valid_frame()) {
cgen_->DeleteFrame();
__ jmp(&entry_label_);
}
// Set the frame to merge as the code generator's current
// frame and bind its merge label.
RegisterFile reserved_registers = RegisterAllocator::Reserved();
cgen_->SetFrame(frame, &reserved_registers);
__ bind(&merge_labels_[i]);
// Loop over the remaining (non-null) reaching frames, looking
// for any that can share merge code with this one.
for (int j = i + 1; j < reaching_frames_.length(); j++) {
VirtualFrame* other = reaching_frames_[j];
if (other != NULL && frame->Equals(other)) {
delete other;
reaching_frames_[j] = NULL;
__ bind(&merge_labels_[j]);
}
}
// Emit the merge code.
cgen_->frame()->MergeTo(entry_frame_);
}
}
// The code generator may not have a current frame if there was no
// fall through and none of the reaching frames needed merging.
// In that case, clone the entry frame as the current frame.
if (!cgen_->has_valid_frame()) {
RegisterFile reserved_registers = RegisterAllocator::Reserved();
cgen_->SetFrame(new VirtualFrame(entry_frame_), &reserved_registers);
}
// There is certainly a current frame equal to the entry frame.
// Bind the entry frame label.
__ bind(&entry_label_);
// There may be unprocessed reaching frames that did not need
// merge code. Bind their merge labels to be the same as the
// entry label.
for (int i = 0; i < reaching_frames_.length(); i++) {
if (reaching_frames_[i] != NULL) {
delete reaching_frames_[i];
__ bind(&merge_labels_[i]);
}
}
// All the reaching frames except the one that is the current
// frame (if it is one of the reaching frames) have been deleted.
reaching_frames_.Clear();
merge_labels_.Clear();
} else {
// There were no forward jumps. The current frame is merged to
// the entry frame.
cgen_->frame()->MergeTo(entry_frame_);
__ bind(&entry_label_);
}
is_linked_ = false;
is_bound_ = true;
}
#undef __
} } // namespace v8::internal
This diff is collapsed.
This diff is collapsed.
......@@ -57,10 +57,10 @@ class List {
ASSERT(0 <= i && i < length_);
return data_[i];
}
inline T& at(int i) const { return this->operator[](i); }
INLINE(const T& last() const) {
inline T& at(int i) const { return operator[](i); }
inline T& last() const {
ASSERT(!is_empty());
return this->at(length_ - 1);
return at(length_ - 1);
}
INLINE(bool is_empty() const) { return length_ == 0; }
......
......@@ -35,6 +35,9 @@
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// MacroAssembler implementation.
MacroAssembler::MacroAssembler(void* buffer, int size)
: Assembler(buffer, size),
unresolved_(0),
......@@ -111,8 +114,7 @@ class RecordWriteStub : public CodeStub {
// scratch) OOOOAAAASSSS.
class ScratchBits: public BitField<uint32_t, 0, 4> {};
class AddressBits: public BitField<uint32_t, 4, 4> {};
class ObjectBits: public BitField<uint32_t, 8, 4> {
};
class ObjectBits: public BitField<uint32_t, 8, 4> {};
Major MajorKey() { return RecordWrite; }
......@@ -606,6 +608,19 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
Register result,
Register op,
JumpTarget* then_target) {
JumpTarget ok(cgen);
test(result, Operand(result));
ok.Branch(not_zero, taken);
test(op, Operand(op));
then_target->Branch(sign, not_taken);
ok.Bind();
}
void MacroAssembler::NegativeZeroTest(Register result,
Register op,
Label* then_label) {
......
......@@ -32,8 +32,11 @@
namespace v8 { namespace internal {
// Forward declaration.
class JumpTarget;
// Helper type to make boolean flag easier to read at call-site.
// Helper types to make flags easier to read at call sites.
enum InvokeFlag {
CALL_FUNCTION,
JUMP_FUNCTION
......@@ -179,6 +182,12 @@ class MacroAssembler: public Assembler {
// Check if result is zero and op is negative.
void NegativeZeroTest(Register result, Register op, Label* then_label);
// Check if result is zero and op is negative in code using jump targets.
void NegativeZeroTest(CodeGenerator* cgen,
Register result,
Register op,
JumpTarget* then_target);
// Check if result is zero and any of op1 and op2 are negative.
// Register scratch is destroyed, and it must be different from op2.
void NegativeZeroTest(Register result, Register op1, Register op2,
......@@ -327,7 +336,6 @@ static inline Operand FieldOperand(Register object,
return Operand(object, index, scale, offset - kHeapObjectTag);
}
} } // namespace v8::internal
#endif // V8_MACRO_ASSEMBLER_IA32_H_
......@@ -205,7 +205,7 @@ class Parser {
BreakableStatement* LookupBreakTarget(Handle<String> label, bool* ok);
IterationStatement* LookupContinueTarget(Handle<String> label, bool* ok);
void RegisterLabelUse(Label* label, int index);
void RegisterTargetUse(JumpTarget* target, int index);
// Create a number literal.
Literal* NewNumberLiteral(double value);
......@@ -2050,8 +2050,8 @@ Block* Parser::WithHelper(Expression* obj,
bool is_catch_block,
bool* ok) {
// Parse the statement and collect escaping labels.
ZoneList<Label*>* label_list = NEW(ZoneList<Label*>(0));
LabelCollector collector(label_list);
ZoneList<JumpTarget*>* target_list = NEW(ZoneList<JumpTarget*>(0));
TargetCollector collector(target_list);
Statement* stat;
{ Target target(this, &collector);
with_nesting_level_++;
......@@ -2064,7 +2064,7 @@ Block* Parser::WithHelper(Expression* obj,
// 2: The try-finally block evaluating the body.
Block* result = NEW(Block(NULL, 2, false));
if (result) {
if (result != NULL) {
result->AddStatement(NEW(WithEnterStatement(obj, is_catch_block)));
// Create body block.
......@@ -2077,12 +2077,10 @@ Block* Parser::WithHelper(Expression* obj,
// Return a try-finally statement.
TryFinally* wrapper = NEW(TryFinally(body, exit));
wrapper->set_escaping_labels(collector.labels());
wrapper->set_escaping_targets(collector.targets());
result->AddStatement(wrapper);
return result;
} else {
return NULL;
}
return result;
}
......@@ -2197,8 +2195,8 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Expect(Token::TRY, CHECK_OK);
ZoneList<Label*>* label_list = NEW(ZoneList<Label*>(0));
LabelCollector collector(label_list);
ZoneList<JumpTarget*>* target_list = NEW(ZoneList<JumpTarget*>(0));
TargetCollector collector(target_list);
Block* try_block;
{ Target target(this, &collector);
......@@ -2217,10 +2215,11 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
}
// If we can break out from the catch block and there is a finally block,
// then we will need to collect labels from the catch block. Since we don't
// know yet if there will be a finally block, we always collect the labels.
ZoneList<Label*>* catch_label_list = NEW(ZoneList<Label*>(0));
LabelCollector catch_collector(catch_label_list);
// then we will need to collect jump targets from the catch block. Since
// we don't know yet if there will be a finally block, we always collect
// the jump targets.
ZoneList<JumpTarget*>* catch_target_list = NEW(ZoneList<JumpTarget*>(0));
TargetCollector catch_collector(catch_target_list);
bool has_catch = false;
if (tok == Token::CATCH) {
has_catch = true;
......@@ -2260,7 +2259,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (!is_pre_parsing_ && catch_block != NULL && finally_block != NULL) {
TryCatch* statement = NEW(TryCatch(try_block, catch_var, catch_block));
statement->set_escaping_labels(collector.labels());
statement->set_escaping_targets(collector.targets());
try_block = NEW(Block(NULL, 1, false));
try_block->AddStatement(statement);
catch_block = NULL;
......@@ -2271,15 +2270,15 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
if (catch_block != NULL) {
ASSERT(finally_block == NULL);
result = NEW(TryCatch(try_block, catch_var, catch_block));
result->set_escaping_labels(collector.labels());
result->set_escaping_targets(collector.targets());
} else {
ASSERT(finally_block != NULL);
result = NEW(TryFinally(try_block, finally_block));
// Add the labels of the try block and the catch block.
for (int i = 0; i < collector.labels()->length(); i++) {
catch_collector.labels()->Add(collector.labels()->at(i));
// Add the jump targets of the try block and the catch block.
for (int i = 0; i < collector.targets()->length(); i++) {
catch_collector.targets()->Add(collector.targets()->at(i));
}
result->set_escaping_labels(catch_collector.labels());
result->set_escaping_targets(catch_collector.targets());
}
}
......@@ -3506,7 +3505,7 @@ BreakableStatement* Parser::LookupBreakTarget(Handle<String> label, bool* ok) {
if ((anonymous && stat->is_target_for_anonymous()) ||
(!anonymous && ContainsLabel(stat->labels(), label))) {
RegisterLabelUse(stat->break_target(), i);
RegisterTargetUse(stat->break_target(), i);
return stat;
}
}
......@@ -3523,7 +3522,7 @@ IterationStatement* Parser::LookupContinueTarget(Handle<String> label,
ASSERT(stat->is_target_for_anonymous());
if (anonymous || ContainsLabel(stat->labels(), label)) {
RegisterLabelUse(stat->continue_target(), i);
RegisterTargetUse(stat->continue_target(), i);
return stat;
}
}
......@@ -3531,13 +3530,13 @@ IterationStatement* Parser::LookupContinueTarget(Handle<String> label,
}
void Parser::RegisterLabelUse(Label* label, int index) {
// Register that a label found at the given index in the target
// stack has been used from the top of the target stack. Add the
// label to any LabelCollectors passed on the stack.
void Parser::RegisterTargetUse(JumpTarget* target, int index) {
// Register that a jump target found at the given index in the target
// stack has been used from the top of the target stack. Add the jump
// target to any TargetCollectors passed on the stack.
for (int i = target_stack_->length(); i-- > index;) {
LabelCollector* collector = target_stack_->at(i)->AsLabelCollector();
if (collector != NULL) collector->AddLabel(label);
TargetCollector* collector = target_stack_->at(i)->AsTargetCollector();
if (collector != NULL) collector->AddTarget(target);
}
}
......
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen.h"
#include "register-allocator.h"
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// Result implementation.
void Result::ToRegister() {
UNIMPLEMENTED();
}
void Result::ToRegister(Register target) {
UNIMPLEMENTED();
}
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
RegisterFile RegisterAllocator::Reserved() {
RegisterFile reserved;
reserved.Use(sp);
reserved.Use(fp);
reserved.Use(cp);
reserved.Use(pc);
return reserved;
}
void RegisterAllocator::UnuseReserved(RegisterFile* register_file) {
register_file->ref_counts_[sp.code()] = 0;
register_file->ref_counts_[fp.code()] = 0;
register_file->ref_counts_[cp.code()] = 0;
register_file->ref_counts_[pc.code()] = 0;
}
void RegisterAllocator::Initialize() {
Reset();
// The following registers are live on function entry, saved in the
// frame, and available for allocation during execution.
Use(r1); // JS function.
Use(lr); // Return address.
}
void RegisterAllocator::Reset() {
registers_.Reset();
// The following registers are live on function entry and reserved
// during execution.
Use(sp); // Stack pointer.
Use(fp); // Frame pointer (caller's frame pointer on entry).
Use(cp); // Context context (callee's context on entry).
Use(pc); // Program counter.
}
Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
UNIMPLEMENTED();
Result invalid(cgen_);
return invalid;
}
} } // namespace v8::internal
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen.h"
#include "register-allocator.h"
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// Result implementation.
void Result::ToRegister() {
ASSERT(is_valid());
if (is_constant()) {
Result fresh = cgen_->allocator()->Allocate();
ASSERT(fresh.is_valid());
if (cgen_->IsUnsafeSmi(handle())) {
cgen_->LoadUnsafeSmi(fresh.reg(), handle());
} else {
cgen_->masm()->Set(fresh.reg(), Immediate(handle()));
}
// This result becomes a copy of the fresh one.
*this = fresh;
}
ASSERT(is_register());
}
void Result::ToRegister(Register target) {
ASSERT(is_valid());
if (!is_register() || !reg().is(target)) {
Result fresh = cgen_->allocator()->Allocate(target);
ASSERT(fresh.is_valid());
if (is_register()) {
cgen_->masm()->mov(fresh.reg(), reg());
} else {
ASSERT(is_constant());
if (cgen_->IsUnsafeSmi(handle())) {
cgen_->LoadUnsafeSmi(fresh.reg(), handle());
} else {
cgen_->masm()->Set(fresh.reg(), Immediate(handle()));
}
}
*this = fresh;
} else if (is_register() && reg().is(target)) {
ASSERT(cgen_->has_valid_frame());
cgen_->frame()->Spill(target);
ASSERT(cgen_->allocator()->count(target) == 1);
}
ASSERT(is_register());
ASSERT(reg().is(target));
}
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
RegisterFile RegisterAllocator::Reserved() {
RegisterFile reserved;
reserved.Use(esp);
reserved.Use(ebp);
reserved.Use(esi);
return reserved;
}
void RegisterAllocator::UnuseReserved(RegisterFile* register_file) {
register_file->ref_counts_[esp.code()] = 0;
register_file->ref_counts_[ebp.code()] = 0;
register_file->ref_counts_[esi.code()] = 0;
}
void RegisterAllocator::Initialize() {
Reset();
// The following register is live on function entry, saved in the
// frame, and available for allocation during execution.
Use(edi); // JS function.
}
void RegisterAllocator::Reset() {
registers_.Reset();
// The following registers are live on function entry and reserved
// during execution.
Use(esp); // Stack pointer.
Use(ebp); // Frame pointer (caller's frame pointer on entry).
Use(esi); // Context (callee's context on entry).
}
Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
Result result = AllocateWithoutSpilling();
// Check that the register is a byte register. If not, unuse the
// register if valid and return an invalid result.
if (result.is_valid() && !result.reg().is_byte_register()) {
result.Unuse();
return Result(cgen_);
}
return result;
}
} } // namespace v8::internal
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen.h"
#include "register-allocator.h"
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// Result implementation.
Result::Result(Register reg, CodeGenerator* cgen)
: type_(REGISTER),
cgen_(cgen) {
data_.reg_ = reg;
ASSERT(reg.is_valid());
cgen_->allocator()->Use(reg);
}
void Result::CopyTo(Result* destination) const {
destination->type_ = type();
destination->cgen_ = cgen_;
if (is_register()) {
destination->data_.reg_ = reg();
cgen_->allocator()->Use(reg());
} else if (is_constant()) {
destination->data_.handle_ = data_.handle_;
} else {
ASSERT(!is_valid());
}
}
void Result::Unuse() {
if (is_register()) {
cgen_->allocator()->Unuse(reg());
}
type_ = INVALID;
}
// -------------------------------------------------------------------------
// RegisterFile implementation.
void RegisterFile::CopyTo(RegisterFile* other) {
for (int i = 0; i < kNumRegisters; i++) {
other->ref_counts_[i] = ref_counts_[i];
}
}
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
Result RegisterAllocator::AllocateWithoutSpilling() {
// Return the first free register, if any.
for (int i = 0; i < kNumRegisters; i++) {
if (!is_used(i)) {
Register free_reg = { i };
return Result(free_reg, cgen_);
}
}
return Result(cgen_);
}
Result RegisterAllocator::Allocate() {
Result result = AllocateWithoutSpilling();
if (!result.is_valid()) {
// Ask the current frame to spill a register.
ASSERT(cgen_->has_valid_frame());
Register free_reg = cgen_->frame()->SpillAnyRegister();
if (free_reg.is_valid()) {
ASSERT(!is_used(free_reg));
return Result(free_reg, cgen_);
}
}
return result;
}
Result RegisterAllocator::Allocate(Register target) {
// If the target is not referenced, it can simply be allocated.
if (!is_used(target)) {
return Result(target, cgen_);
}
// If the target is only referenced in the frame, it can be spilled and
// then allocated.
ASSERT(cgen_->has_valid_frame());
if (count(target) == cgen_->frame()->register_count(target)) {
cgen_->frame()->Spill(target);
ASSERT(!is_used(target));
return Result(target, cgen_);
}
// Otherwise (if it's referenced outside the frame) we cannot allocate it.
return Result(cgen_);
}
} } // namespace v8::internal
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_REGISTER_ALLOCATOR_H_
#define V8_REGISTER_ALLOCATOR_H_
#include "macro-assembler.h"
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// Results
//
// Results encapsulate the compile-time values manipulated by the code
// generator. They can represent registers or constants.
class Result BASE_EMBEDDED {
public:
enum Type {
INVALID,
REGISTER,
CONSTANT
};
// Construct an invalid result.
explicit Result(CodeGenerator* cgen) : type_(INVALID), cgen_(cgen) {}
// Construct a register Result.
Result(Register reg, CodeGenerator* cgen);
// Construct a Result whose value is a compile-time constant.
Result(Handle<Object> value, CodeGenerator * cgen)
: type_(CONSTANT),
cgen_(cgen) {
data_.handle_ = value.location();
}
// The copy constructor and assignment operators could each create a new
// register reference.
Result(const Result& other) {
other.CopyTo(this);
}
Result& operator=(const Result& other) {
if (this != &other) {
Unuse();
other.CopyTo(this);
}
return *this;
}
~Result() { Unuse(); }
void Unuse();
Type type() const { return type_; }
bool is_valid() const { return type() != INVALID; }
bool is_register() const { return type() == REGISTER; }
bool is_constant() const { return type() == CONSTANT; }
Register reg() const {
ASSERT(type() == REGISTER);
return data_.reg_;
}
Handle<Object> handle() const {
ASSERT(type() == CONSTANT);
return Handle<Object>(data_.handle_);
}
// Move this result to an arbitrary register. The register is not
// necessarily spilled from the frame or even singly-referenced outside
// it.
void ToRegister();
// Move this result to a specified register. The register is spilled from
// the frame, and the register is singly-referenced (by this result)
// outside the frame.
void ToRegister(Register reg);
private:
Type type_;
union {
Register reg_;
Object** handle_;
} data_;
CodeGenerator* cgen_;
void CopyTo(Result* destination) const;
};
// -------------------------------------------------------------------------
// Register file
//
// The register file tracks reference counts for the processor registers.
// It is used by both the register allocator and the virtual frame.
class RegisterFile BASE_EMBEDDED {
public:
RegisterFile() { Reset(); }
void Reset() {
for (int i = 0; i < kNumRegisters; i++) {
ref_counts_[i] = 0;
}
}
// Predicates and accessors for the reference counts. The versions
// that take a register code rather than a register are for
// convenience in loops over the register codes.
bool is_used(int reg_code) const { return ref_counts_[reg_code] > 0; }
bool is_used(Register reg) const { return is_used(reg.code()); }
int count(int reg_code) const { return ref_counts_[reg_code]; }
int count(Register reg) const { return count(reg.code()); }
// Record a use of a register by incrementing its reference count.
void Use(Register reg) {
ref_counts_[reg.code()]++;
}
// Record that a register will no longer be used by decrementing its
// reference count.
void Unuse(Register reg) {
ASSERT(is_used(reg.code()));
if (is_used(reg.code())) {
ref_counts_[reg.code()]--;
}
}
// Copy the reference counts from this register file to the other.
void CopyTo(RegisterFile* other);
private:
int ref_counts_[kNumRegisters];
friend class RegisterAllocator;
};
// -------------------------------------------------------------------------
// Register allocator
//
class RegisterAllocator BASE_EMBEDDED {
public:
explicit RegisterAllocator(CodeGenerator* cgen) : cgen_(cgen) {}
// A register file with each of the reserved registers counted once.
static RegisterFile Reserved();
// Unuse all the reserved registers in a register file.
static void UnuseReserved(RegisterFile* register_file);
// Predicates and accessors for the registers' reference counts.
bool is_used(int reg_code) const { return registers_.is_used(reg_code); }
bool is_used(Register reg) const { return registers_.is_used(reg.code()); }
int count(int reg_code) const { return registers_.count(reg_code); }
int count(Register reg) const { return registers_.count(reg.code()); }
// Explicitly record a reference to a register.
void Use(Register reg) { registers_.Use(reg); }
// Explicitly record that a register will no longer be used.
void Unuse(Register reg) { registers_.Unuse(reg); }
// Initialize the register allocator for entry to a JS function. On
// entry, the registers used by the JS calling convention are
// externally referenced (ie, outside the virtual frame); and the
// other registers are free.
void Initialize();
// Reset the register reference counts to free all non-reserved registers.
// A frame-external reference is kept to each of the reserved registers.
void Reset();
// Allocate a free register and return a register result if possible or
// fail and return an invalid result.
Result Allocate();
// Allocate a specific register if possible, spilling it from the frame if
// necessary, or else fail and return an invalid result.
Result Allocate(Register target);
// Allocate a free register without spilling any from the current frame or
// fail and return an invalid result.
Result AllocateWithoutSpilling();
// Allocate a free byte register without spilling any from the
// current frame or fail and return an invalid result.
Result AllocateByteRegisterWithoutSpilling();
// Copy the internal state to a register file, to be restored later by
// RestoreFrom.
void SaveTo(RegisterFile* register_file) {
registers_.CopyTo(register_file);
}
void RestoreFrom(RegisterFile* register_file) {
register_file->CopyTo(&registers_);
}
private:
CodeGenerator* cgen_;
RegisterFile registers_;
};
} } // namespace v8::internal
#endif // V8_REGISTER_ALLOCATOR_H_
......@@ -1525,7 +1525,7 @@ void Simulator::Execute() {
Object* Simulator::Call(int32_t entry, int32_t p0, int32_t p1, int32_t p2,
int32_t p3, int32_t p4) {
int32_t p3, int32_t p4) {
// Setup parameters
set_register(r0, p0);
set_register(r1, p1);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_VIRTUAL_FRAME_H_
#define V8_VIRTUAL_FRAME_H_
#include "macro-assembler.h"
#include "register-allocator.h"
namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// Virtual frame elements
//
// The internal elements of the virtual frames. There are several kinds of
// elements:
// * Invalid: elements that are uninitialized or not actually part
// of the virtual frame. They should not be read.
// * Memory: an element that resides in the actual frame. Its address is
// given by its position in the virtual frame.
// * Register: an element that resides in a register.
// * Constant: an element whose value is known at compile time.
class FrameElement BASE_EMBEDDED {
public:
enum SyncFlag {
SYNCED,
NOT_SYNCED
};
// The default constructor creates an invalid frame element.
FrameElement() {
type_ = TypeField::encode(INVALID) | SyncField::encode(NOT_SYNCED);
data_.reg_ = no_reg;
}
// Factory function to construct an invalid frame element.
static FrameElement InvalidElement() {
FrameElement result;
return result;
}
// Factory function to construct an in-memory frame element.
static FrameElement MemoryElement() {
FrameElement result;
result.type_ = TypeField::encode(MEMORY) | SyncField::encode(SYNCED);
// In-memory elements have no useful data.
result.data_.reg_ = no_reg;
return result;
}
// Factory function to construct an in-register frame element.
static FrameElement RegisterElement(Register reg, SyncFlag is_synced) {
FrameElement result;
result.type_ = TypeField::encode(REGISTER) | SyncField::encode(is_synced);
result.data_.reg_ = reg;
return result;
}
// Factory function to construct a frame element whose value is known at
// compile time.
static FrameElement ConstantElement(Handle<Object> value,
SyncFlag is_synced) {
FrameElement result;
result.type_ = TypeField::encode(CONSTANT) | SyncField::encode(is_synced);
result.data_.handle_ = value.location();
return result;
}
bool is_synced() const { return SyncField::decode(type_) == SYNCED; }
void set_sync() {
ASSERT(type() != MEMORY);
type_ = (type_ & ~SyncField::mask()) | SyncField::encode(SYNCED);
}
void clear_sync() {
ASSERT(type() != MEMORY);
type_ = (type_ & ~SyncField::mask()) | SyncField::encode(NOT_SYNCED);
}
bool is_valid() const { return type() != INVALID; }
bool is_memory() const { return type() == MEMORY; }
bool is_register() const { return type() == REGISTER; }
bool is_constant() const { return type() == CONSTANT; }
bool is_copy() const { return type() == COPY; }
Register reg() const {
ASSERT(is_register());
return data_.reg_;
}
Handle<Object> handle() const {
ASSERT(is_constant());
return Handle<Object>(data_.handle_);
}
int index() const {
ASSERT(is_copy());
return data_.index_;
}
bool Equals(FrameElement other);
private:
enum Type {
INVALID,
MEMORY,
REGISTER,
CONSTANT,
COPY
};
// BitField is <type, shift, size>.
class SyncField : public BitField<SyncFlag, 0, 1> {};
class TypeField : public BitField<Type, 1, 32 - 1> {};
Type type() const { return TypeField::decode(type_); }
// The element's type and a dirty bit. The dirty bit can be cleared
// for non-memory elements to indicate that the element agrees with
// the value in memory in the actual frame.
int type_;
union {
Register reg_;
Object** handle_;
int index_;
} data_;
friend class VirtualFrame;
};
} } // namespace v8::internal
#ifdef ARM
#include "virtual-frame-arm.h"
#else // ia32
#include "virtual-frame-ia32.h"
#endif
#endif // V8_VIRTUAL_FRAME_H_
This diff is collapsed.
This diff is collapsed.
......@@ -36,8 +36,7 @@ Debug = debug.Debug
var bp1, bp2;
function listener(event, exec_state, event_data, data) {
if (event == Debug.DebugEvent.Break)
{
if (event == Debug.DebugEvent.Break) {
if (state == 0) {
exec_state.prepareStep(Debug.StepAction.StepIn, 1000);
state = 1;
......@@ -68,7 +67,6 @@ bp1 = Debug.setBreakPoint(f, 1);
state = 0;
result = -1;
f();
print(state);
assertEquals(499, result);
// Check that performing 1000 steps with a break point on the statement in the
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment