MIPS: crankshaft implementation

BUG=
TEST=

Review URL: http://codereview.chromium.org/7934002
Patch from Paul Lind <plind44@gmail.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9828 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent b5b33457
......@@ -172,6 +172,9 @@ SOURCES = {
mips/frames-mips.cc
mips/full-codegen-mips.cc
mips/ic-mips.cc
mips/lithium-codegen-mips.cc
mips/lithium-gap-resolver-mips.cc
mips/lithium-mips.cc
mips/macro-assembler-mips.cc
mips/regexp-macro-assembler-mips.cc
mips/stub-cache-mips.cc
......
......@@ -369,7 +369,20 @@ class FrameDescription {
}
double GetDoubleFrameSlot(unsigned offset) {
return *reinterpret_cast<double*>(GetFrameSlotPointer(offset));
intptr_t* ptr = GetFrameSlotPointer(offset);
#if V8_TARGET_ARCH_MIPS
// Prevent gcc from using load-double (mips ldc1) on (possibly)
// non-64-bit aligned double. Uses two lwc1 instructions.
union conversion {
double d;
uint32_t u[2];
} c;
c.u[0] = *reinterpret_cast<uint32_t*>(ptr);
c.u[1] = *(reinterpret_cast<uint32_t*>(ptr) + 1);
return c.d;
#else
return *reinterpret_cast<double*>(ptr);
#endif
}
void SetFrameSlot(unsigned offset, intptr_t value) {
......
......@@ -114,11 +114,7 @@ DEFINE_bool(clever_optimizations,
"Optimize object size, Array shift, DOM strings and string +")
// Flags for Crankshaft.
#ifdef V8_TARGET_ARCH_MIPS
DEFINE_bool(crankshaft, false, "use crankshaft")
#else
DEFINE_bool(crankshaft, true, "use crankshaft")
#endif
DEFINE_bool(crankshaft, true, "use crankshaft")
DEFINE_string(hydrogen_filter, "", "hydrogen use/trace filter")
DEFINE_bool(use_hydrogen, true, "use generated hydrogen for compilation")
DEFINE_bool(build_lithium, true, "use lithium chunk builder")
......@@ -326,7 +322,8 @@ DEFINE_bool(strict_mode, true, "allow strict mode directives")
// simulator-arm.cc and simulator-mips.cc
DEFINE_bool(trace_sim, false, "Trace simulator execution")
DEFINE_bool(check_icache, false, "Check icache flushes in ARM simulator")
DEFINE_bool(check_icache, false,
"Check icache flushes in ARM and MIPS simulator")
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
DEFINE_int(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
......
......@@ -1176,24 +1176,93 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
}
// These functions are called from C++ but cannot be used in live code.
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass the function and deoptimization type to the runtime system.
__ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
__ push(a0);
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
}
// Get the full codegen state from the stack and untag it -> t2.
__ lw(t2, MemOperand(sp, 0 * kPointerSize));
__ SmiUntag(t2);
// Switch on the state.
Label with_tos_register, unknown_state;
__ Branch(&with_tos_register,
ne, t2, Operand(FullCodeGenerator::NO_REGISTERS));
__ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove state.
__ Ret();
__ bind(&with_tos_register);
__ lw(v0, MemOperand(sp, 1 * kPointerSize));
__ Branch(&unknown_state, ne, t2, Operand(FullCodeGenerator::TOS_REG));
__ Addu(sp, sp, Operand(2 * kPointerSize)); // Remove state.
__ Ret();
__ bind(&unknown_state);
__ stop("no cases left");
}
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
__ Abort("Call to unimplemented function in builtins-mips.cc");
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
__ Abort("Call to unimplemented function in builtins-mips.cc");
Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
__ Abort("Call to unimplemented function in builtins-mips.cc");
// For now, we are relying on the fact that Runtime::NotifyOSR
// doesn't do any garbage collection which allows us to save/restore
// the registers without worrying about which of them contain
// pointers. This seems a bit fragile.
RegList saved_regs =
(kJSCallerSaved | kCalleeSaved | ra.bit() | fp.bit()) & ~sp.bit();
__ MultiPush(saved_regs);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kNotifyOSR, 0);
}
__ MultiPop(saved_regs);
__ Ret();
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ Abort("Call to unimplemented function in builtins-mips.cc");
CpuFeatures::TryForceFeatureScope scope(VFP3);
if (!CpuFeatures::IsSupported(FPU)) {
__ Abort("Unreachable code: Cannot optimize without FPU support.");
return;
}
// Lookup the function in the JavaScript frame and push it as an
// argument to the on-stack replacement function.
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(a0);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
}
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
__ Ret(eq, v0, Operand(Smi::FromInt(-1)));
// Untag the AST id and push it on the stack.
__ SmiUntag(v0);
__ push(v0);
// Generate the code for doing the frame-to-frame translation using
// the deoptimizer infrastructure.
Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
generator.Generate();
}
......@@ -1395,8 +1464,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kFunctionOffset = 4 * kPointerSize;
{
FrameScope scope(masm, StackFrame::INTERNAL);
FrameScope frame_scope(masm, StackFrame::INTERNAL);
__ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
__ push(a0);
__ lw(a0, MemOperand(fp, kArgsOffset)); // Get the args array.
......@@ -1530,8 +1598,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ InvokeFunction(a1, actual, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
scope.GenerateLeaveFrame();
frame_scope.GenerateLeaveFrame();
__ Ret(USE_DELAY_SLOT);
__ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
......
......@@ -32,24 +32,112 @@
#include "full-codegen.h"
#include "safepoint-table.h"
// Note: this file was taken from the X64 version. ARM has a partially working
// lithium implementation, but for now it is not ported to mips.
namespace v8 {
namespace internal {
const int Deoptimizer::table_entry_size_ = 10;
const int Deoptimizer::table_entry_size_ = 32;
int Deoptimizer::patch_size() {
const int kCallInstructionSizeInWords = 3;
const int kCallInstructionSizeInWords = 4;
return kCallInstructionSizeInWords * Assembler::kInstrSize;
}
void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
// Nothing to do. No new relocation information is written for lazy
// deoptimization on MIPS.
}
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
UNIMPLEMENTED();
HandleScope scope;
AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return;
// Get the optimized code.
Code* code = function->code();
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
// For each return after a safepoint insert an absolute call to the
// corresponding deoptimization entry.
unsigned last_pc_offset = 0;
SafepointTable table(function->code());
for (unsigned i = 0; i < table.length(); i++) {
unsigned pc_offset = table.GetPcOffset(i);
SafepointEntry safepoint_entry = table.GetEntry(i);
int deoptimization_index = safepoint_entry.deoptimization_index();
int gap_code_size = safepoint_entry.gap_code_size();
// Check that we did not shoot past next safepoint.
CHECK(pc_offset >= last_pc_offset);
#ifdef DEBUG
// Destroy the code which is not supposed to be run again.
int instructions = (pc_offset - last_pc_offset) / Assembler::kInstrSize;
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
instructions);
for (int x = 0; x < instructions; x++) {
destroyer.masm()->break_(0);
}
#endif
last_pc_offset = pc_offset;
if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
Address deoptimization_entry = Deoptimizer::GetDeoptimizationEntry(
deoptimization_index, Deoptimizer::LAZY);
last_pc_offset += gap_code_size;
int call_size_in_bytes = MacroAssembler::CallSize(deoptimization_entry,
RelocInfo::NONE);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
ASSERT(call_size_in_bytes <= patch_size());
CodePatcher patcher(code->instruction_start() + last_pc_offset,
call_size_in_words);
patcher.masm()->Call(deoptimization_entry, RelocInfo::NONE);
last_pc_offset += call_size_in_bytes;
}
}
#ifdef DEBUG
// Destroy the code which is not supposed to be run again.
int instructions =
(code->safepoint_table_offset() - last_pc_offset) / Assembler::kInstrSize;
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
instructions);
for (int x = 0; x < instructions; x++) {
destroyer.masm()->break_(0);
}
#endif
Isolate* isolate = code->GetIsolate();
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
DeoptimizerData* data = isolate->deoptimizer_data();
node->set_next(data->deoptimizing_code_list_);
data->deoptimizing_code_list_ = node;
// We might be in the middle of incremental marking with compaction.
// Tell collector to treat this code object in a special way and
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(code);
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
if (FLAG_trace_deopt) {
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
#ifdef DEBUG
if (FLAG_print_code) {
code->PrintLn();
}
#endif
}
}
......@@ -57,7 +145,42 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED();
const int kInstrSize = Assembler::kInstrSize;
// This structure comes from FullCodeGenerator::EmitStackCheck.
// The call of the stack guard check has the following form:
// sltu at, sp, t0
// beq at, zero_reg, ok
// lui t9, <stack guard address> upper
// ori t9, <stack guard address> lower
// jalr t9
// nop
// ----- pc_after points here
ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
// Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
patcher.masm()->addiu(at, zero_reg, 1);
// Replace the stack check address in the load-immediate (lui/ori pair)
// with the entry address of the replacement code.
ASSERT(reinterpret_cast<uint32_t>(
Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
reinterpret_cast<uint32_t>(check_code->entry()));
Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
replacement_code->entry());
// We patched the code to the following form:
// addiu at, zero_reg, 1
// beq at, zero_reg, ok ;; Not changed
// lui t9, <on-stack replacement address> upper
// ori t9, <on-stack replacement address> lower
// jalr t9 ;; Not changed
// nop ;; Not changed
// ----- pc_after points here
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
}
......@@ -65,34 +188,618 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED();
// Exact opposite of the function above.
const int kInstrSize = Assembler::kInstrSize;
ASSERT(Assembler::IsAddImmediate(
Assembler::instr_at(pc_after - 6 * kInstrSize)));
ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
// Restore the sltu instruction so beq can be taken again.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
patcher.masm()->sltu(at, sp, t0);
// Replace the on-stack replacement address in the load-immediate (lui/ori
// pair) with the entry address of the normal stack-check code.
ASSERT(reinterpret_cast<uint32_t>(
Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
reinterpret_cast<uint32_t>(replacement_code->entry()));
Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
check_code->entry());
check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_after - 4 * kInstrSize, check_code);
}
static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
ByteArray* translations = data->TranslationByteArray();
int length = data->DeoptCount();
for (int i = 0; i < length; i++) {
if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
TranslationIterator it(translations, data->TranslationIndex(i)->value());
int value = it.Next();
ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
// Read the number of frames.
value = it.Next();
if (value == 1) return i;
}
}
UNREACHABLE();
return -1;
}
void Deoptimizer::DoComputeOsrOutputFrame() {
UNIMPLEMENTED();
DeoptimizationInputData* data = DeoptimizationInputData::cast(
optimized_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
int bailout_id = LookupBailoutId(data, ast_id);
unsigned translation_index = data->TranslationIndex(bailout_id)->value();
ByteArray* translations = data->TranslationByteArray();
TranslationIterator iterator(translations, translation_index);
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator.Next());
ASSERT(Translation::BEGIN == opcode);
USE(opcode);
int count = iterator.Next();
ASSERT(count == 1);
USE(count);
opcode = static_cast<Translation::Opcode>(iterator.Next());
USE(opcode);
ASSERT(Translation::FRAME == opcode);
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
USE(function);
ASSERT(function == function_);
unsigned height = iterator.Next();
unsigned height_in_bytes = height * kPointerSize;
USE(height_in_bytes);
unsigned fixed_size = ComputeFixedSize(function_);
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
if (FLAG_trace_osr) {
PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function_));
function_->PrintName();
PrintF(" => node=%u, frame=%d->%d]\n",
ast_id,
input_frame_size,
output_frame_size);
}
// There's only one output frame in the OSR case.
output_count_ = 1;
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
#ifdef DEBUG
output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
#endif
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
unsigned output_offset = output_frame_size - kPointerSize;
int parameter_count = function_->shared()->formal_parameter_count() + 1;
for (int i = 0; i < parameter_count; ++i) {
output_[0]->SetFrameSlot(output_offset, 0);
output_offset -= kPointerSize;
}
// Translate the incoming parameters. This may overwrite some of the
// incoming argument slots we've just cleared.
int input_offset = input_frame_size - kPointerSize;
bool ok = true;
int limit = input_offset - (parameter_count * kPointerSize);
while (ok && input_offset > limit) {
ok = DoOsrTranslateCommand(&iterator, &input_offset);
}
// There are no translation commands for the caller's pc and fp, the
// context, and the function. Set them up explicitly.
for (int i = StandardFrameConstants::kCallerPCOffset;
ok && i >= StandardFrameConstants::kMarkerOffset;
i -= kPointerSize) {
uint32_t input_value = input_->GetFrameSlot(input_offset);
if (FLAG_trace_osr) {
const char* name = "UNKNOWN";
switch (i) {
case StandardFrameConstants::kCallerPCOffset:
name = "caller's pc";
break;
case StandardFrameConstants::kCallerFPOffset:
name = "fp";
break;
case StandardFrameConstants::kContextOffset:
name = "context";
break;
case StandardFrameConstants::kMarkerOffset:
name = "function";
break;
}
PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
output_offset,
input_value,
input_offset,
name);
}
output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
input_offset -= kPointerSize;
output_offset -= kPointerSize;
}
// Translate the rest of the frame.
while (ok && input_offset >= 0) {
ok = DoOsrTranslateCommand(&iterator, &input_offset);
}
// If translation of any command failed, continue using the input frame.
if (!ok) {
delete output_[0];
output_[0] = input_;
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
// Setup the frame pointer and the context pointer.
output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
unsigned pc_offset = data->OsrPcOffset()->value();
uint32_t pc = reinterpret_cast<uint32_t>(
optimized_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
output_[0]->SetContinuation(
reinterpret_cast<uint32_t>(continuation->entry()));
if (FLAG_trace_osr) {
PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
ok ? "finished" : "aborted",
reinterpret_cast<intptr_t>(function));
function->PrintName();
PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
}
}
// This code is very similar to ia32/arm code, but relies on register names
// (fp, sp) and how the frame is laid out.
void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
int frame_index) {
UNIMPLEMENTED();
}
// Read the ast node id, function, and frame height for this output frame.
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
USE(opcode);
ASSERT(Translation::FRAME == opcode);
int node_id = iterator->Next();
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating ");
function->PrintName();
PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
}
// The 'fixed' part of the frame consists of the incoming parameters and
// the part described by JavaScriptFrameConstants.
unsigned fixed_frame_size = ComputeFixedSize(function);
unsigned input_frame_size = input_->GetFrameSize();
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
#ifdef DEBUG
output_frame->SetKind(Code::FUNCTION);
#endif
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
ASSERT(frame_index >= 0 && frame_index < output_count_);
ASSERT(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
// The top address for the bottommost output frame can be computed from
// the input frame pointer and the output frame's height. For all
// subsequent output frames, it can be computed from the previous one's
// top address and the current frame's size.
uint32_t top_address;
if (is_bottommost) {
// 2 = context and function in the frame.
top_address =
input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
int parameter_count = function->shared()->formal_parameter_count() + 1;
unsigned output_offset = output_frame_size;
unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
input_offset -= (parameter_count * kPointerSize);
// There are no translation commands for the caller's pc and fp, the
// context, and the function. Synthesize their values and set them up
// explicitly.
//
// The caller's pc for the bottommost output frame is the same as in the
// input frame. For all subsequent output frames, it can be read from the
// previous one. This frame's pc can be computed from the non-optimized
// function code and AST id of the bailout.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t value;
if (is_bottommost) {
value = input_->GetFrameSlot(input_offset);
} else {
value = output_[frame_index - 1]->GetPc();
}
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
top_address + output_offset, output_offset, value);
}
// The caller's frame pointer for the bottommost output frame is the same
// as in the input frame. For all subsequent output frames, it can be
// read from the previous one. Also compute and set this frame's frame
// pointer.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
if (is_bottommost) {
value = input_->GetFrameSlot(input_offset);
} else {
value = output_[frame_index - 1]->GetFp();
}
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) {
output_frame->SetRegister(fp.code(), fp_value);
}
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
// For the bottommost output frame the context can be gotten from the input
// frame. For all subsequent output frames it can be gotten from the function
// so long as we don't inline functions that need local contexts.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
if (is_bottommost) {
value = input_->GetFrameSlot(input_offset);
} else {
value = reinterpret_cast<intptr_t>(function->context());
}
output_frame->SetFrameSlot(output_offset, value);
if (is_topmost) {
output_frame->SetRegister(cp.code(), value);
}
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
top_address + output_offset, output_offset, value);
}
// The function was mentioned explicitly in the BEGIN_FRAME.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(function);
// The function for the bottommost output frame should also agree with the
// input frame.
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
top_address + output_offset, output_offset, value);
}
// Translate the rest of the frame.
for (unsigned i = 0; i < height; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
ASSERT(0 == output_offset);
// Compute this frame's PC, state, and continuation.
Code* non_optimized_code = function->shared()->code();
FixedArray* raw_data = non_optimized_code->deoptimization_data();
DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
Address start = non_optimized_code->instruction_start();
unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
output_frame->SetPc(pc_value);
FullCodeGenerator::State state =
FullCodeGenerator::StateField::decode(pc_and_state);
output_frame->SetState(Smi::FromInt(state));
// Set the continuation for the topmost frame.
if (is_topmost && bailout_type_ != DEBUGGER) {
Builtins* builtins = isolate_->builtins();
Code* continuation = (bailout_type_ == EAGER)
? builtins->builtin(Builtins::kNotifyDeoptimized)
: builtins->builtin(Builtins::kNotifyLazyDeoptimized);
output_frame->SetContinuation(
reinterpret_cast<uint32_t>(continuation->entry()));
}
}
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
UNIMPLEMENTED();
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
// spilled. Registers fp and sp are set to the correct values though.
for (int i = 0; i < Register::kNumRegisters; i++) {
input_->SetRegister(i, i * 4);
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
// Fill the frame content from the actual data on the frame.
for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
}
}
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
void Deoptimizer::EntryGenerator::Generate() {
UNIMPLEMENTED();
GeneratePrologue();
Isolate* isolate = masm()->isolate();
CpuFeatures::Scope scope(FPU);
// Unlike on ARM we don't save all the registers, just the useful ones.
// For the rest, there are gaps on the stack, so the offsets remain the same.
const int kNumberOfRegisters = Register::kNumRegisters;
RegList restored_regs = kJSCallerSaved | kCalleeSaved;
RegList saved_regs = restored_regs | sp.bit() | ra.bit();
const int kDoubleRegsSize =
kDoubleSize * FPURegister::kNumAllocatableRegisters;
// Save all FPU registers before messing with them.
__ Subu(sp, sp, Operand(kDoubleRegsSize));
for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ sdc1(fpu_reg, MemOperand(sp, offset));
}
// Push saved_regs (needed to populate FrameDescription::registers_).
// Leave gaps for other registers.
__ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
if ((saved_regs & (1 << i)) != 0) {
__ sw(ToRegister(i), MemOperand(sp, kPointerSize * i));
}
}
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
// Get the bailout id from the stack.
__ lw(a2, MemOperand(sp, kSavedRegistersAreaSize));
// Get the address of the location in the code object if possible (a3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register t0.
if (type() == EAGER) {
__ mov(a3, zero_reg);
// Correct one word for bailout id.
__ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
} else if (type() == OSR) {
__ mov(a3, ra);
// Correct one word for bailout id.
__ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
} else {
__ mov(a3, ra);
// Correct two words for bailout id and return address.
__ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
}
__ Subu(t0, fp, t0);
// Allocate a new deoptimizer object.
// Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
__ PrepareCallCFunction(6, t1);
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ li(a1, Operand(type())); // bailout type,
// a2: bailout id already loaded.
// a3: code address or 0 already loaded.
__ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
__ li(t1, Operand(ExternalReference::isolate_address()));
__ sw(t1, CFunctionArgumentOperand(6)); // Isolate.
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
}
// Preserve "deoptimizer" object in register v0 and get the input
// frame descriptor pointer to a1 (deoptimizer->input_);
// Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
__ mov(a0, v0);
__ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
ASSERT(Register::kNumRegisters == kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
if ((saved_regs & (1 << i)) != 0) {
__ lw(a2, MemOperand(sp, i * kPointerSize));
__ sw(a2, MemOperand(a1, offset));
} else if (FLAG_debug_code) {
__ li(a2, kDebugZapValue);
__ sw(a2, MemOperand(a1, offset));
}
}
// Copy FPU registers to
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ ldc1(f0, MemOperand(sp, src_offset));
__ sdc1(f0, MemOperand(a1, dst_offset));
}
// Remove the bailout id, eventually return address, and the saved registers
// from the stack.
if (type() == EAGER || type() == OSR) {
__ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
} else {
__ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
}
// Compute a pointer to the unwinding limit in register a2; that is
// the first stack slot not part of the input frame.
__ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
__ Addu(a2, a2, sp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
__ bind(&pop_loop);
__ pop(t0);
__ sw(t0, MemOperand(a3, 0));
__ Branch(USE_DELAY_SLOT, &pop_loop, ne, a2, Operand(sp));
__ addiu(a3, a3, sizeof(uint32_t)); // In delay slot.
// Compute the output frame in the deoptimizer.
__ push(a0); // Preserve deoptimizer object across call.
// a0: deoptimizer object; a1: scratch.
__ PrepareCallCFunction(1, a1);
// Call Deoptimizer::ComputeOutputFrames().
{
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate), 1);
}
__ pop(a0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop;
// Outer loop state: a0 = current "FrameDescription** output_",
// a1 = one past the last FrameDescription**.
__ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
__ lw(a0, MemOperand(a0, Deoptimizer::output_offset())); // a0 is output_.
__ sll(a1, a1, kPointerSizeLog2); // Count to offset.
__ addu(a1, a0, a1); // a1 = one past the last FrameDescription**.
__ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
__ lw(a2, MemOperand(a0, 0)); // output_[ix]
__ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
__ bind(&inner_push_loop);
__ Subu(a3, a3, Operand(sizeof(uint32_t)));
__ Addu(t2, a2, Operand(a3));
__ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
__ push(t3);
__ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
__ Addu(a0, a0, Operand(kPointerSize));
__ Branch(&outer_push_loop, lt, a0, Operand(a1));
// Push state, pc, and continuation from the last output frame.
if (type() != OSR) {
__ lw(t2, MemOperand(a2, FrameDescription::state_offset()));
__ push(t2);
}
__ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
__ push(t2);
__ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
__ push(t2);
// Technically restoring 'at' should work unless zero_reg is also restored
// but it's safer to check for this.
ASSERT(!(at.bit() & restored_regs));
// Restore the registers from the last output frame.
__ mov(at, a2);
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
if ((restored_regs & (1 << i)) != 0) {
__ lw(ToRegister(i), MemOperand(at, offset));
}
}
// Set up the roots register.
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate);
__ li(roots, Operand(roots_array_start));
__ pop(at); // Get continuation, leave pc on stack.
__ pop(ra);
__ Jump(at);
__ stop("Unreachable.");
}
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
UNIMPLEMENTED();
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
// Create a sequence of deoptimization entries. Note that any
// registers may be still live.
Label done;
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
if (type() != EAGER) {
// Emulate ia32 like call by pushing return address to stack.
__ push(ra);
}
__ li(at, Operand(i));
__ push(at);
__ Branch(&done);
// Pad the rest of the code.
while (table_entry_size_ > (masm()->pc_offset() - start)) {
__ nop();
}
ASSERT_EQ(table_entry_size_, masm()->pc_offset() - start);
}
__ bind(&done);
}
#undef __
} } // namespace v8::internal
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -29,35 +29,398 @@
#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
#include "mips/lithium-mips.h"
#include "mips/lithium-gap-resolver-mips.h"
#include "deoptimizer.h"
#include "safepoint-table.h"
#include "scopes.h"
// Note: this file was taken from the X64 version. ARM has a partially working
// lithium implementation, but for now it is not ported to mips.
namespace v8 {
namespace internal {
// Forward declarations.
class LDeferredCode;
class SafepointGenerator;
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) { }
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: chunk_(chunk),
masm_(assembler),
info_(info),
current_block_(-1),
current_instruction_(-1),
instructions_(chunk->instructions()),
deoptimizations_(4),
deopt_jump_table_(4),
deoptimization_literals_(8),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
// Simple accessors.
MacroAssembler* masm() const { return masm_; }
CompilationInfo* info() const { return info_; }
Isolate* isolate() const { return info_->isolate(); }
Factory* factory() const { return isolate()->factory(); }
Heap* heap() const { return isolate()->heap(); }
// Support for converting LOperands to assembler types.
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
// LOperand is loaded into scratch, unless already a register.
Register EmitLoadRegister(LOperand* op, Register scratch);
// LOperand must be a double register.
DoubleRegister ToDoubleRegister(LOperand* op) const;
// LOperand is loaded into dbl_scratch, unless already a double register.
DoubleRegister EmitLoadDoubleRegister(LOperand* op,
FloatRegister flt_scratch,
DoubleRegister dbl_scratch);
int ToInteger32(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
// Returns a MemOperand pointing to the high word of a DoubleStackSlot.
MemOperand ToHighMemOperand(LOperand* op) const;
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
bool GenerateCode() {
UNIMPLEMENTED();
return false;
}
bool GenerateCode();
// Finish the code by setting stack height, safepoint, and bailout
// information on it.
void FinishCode(Handle<Code> code) { UNIMPLEMENTED(); }
void FinishCode(Handle<Code> code);
// Deferred code support.
template<int T>
void DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
Token::Value op);
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
private:
enum Status {
UNUSED,
GENERATING,
DONE,
ABORTED
};
bool is_unused() const { return status_ == UNUSED; }
bool is_generating() const { return status_ == GENERATING; }
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
StrictModeFlag strict_mode_flag() const {
return info()->strict_mode_flag();
}
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
Register scratch0() { return lithiumScratchReg; }
Register scratch1() { return lithiumScratchReg2; }
DoubleRegister double_scratch0() { return lithiumScratchDouble; }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
void EmitClassOfTest(Label* if_true,
Label* if_false,
Handle<String> class_name,
Register input,
Register temporary,
Register temporary2);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return scope()->num_parameters(); }
void Abort(const char* format, ...);
void Comment(const char* format, ...);
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
// Code generation passes. Returns true if code generation should
// continue.
bool GeneratePrologue();
bool GenerateBody();
bool GenerateDeferredCode();
bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
enum SafepointMode {
RECORD_SIMPLE_SAFEPOINT,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
};
void CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr);
void CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode);
void CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
LInstruction* instr) {
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, num_arguments, instr);
}
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr);
// Generate a direct call to a known function. Expects the function
// to be in a1.
void CallKnownFunction(Handle<JSFunction> function,
int arity,
LInstruction* instr,
CallKind call_kind);
void LoadHeapObject(Register result, Handle<HeapObject> object);
void RegisterLazyDeoptimization(LInstruction* instr,
SafepointMode safepoint_mode);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
void DeoptimizeIf(Condition cc,
LEnvironment* environment,
Register src1,
const Operand& src2);
void AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr);
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
void DoMathPowHalf(LUnaryMathOperation* instr);
void DoMathLog(LUnaryMathOperation* instr);
void DoMathCos(LUnaryMathOperation* instr);
void DoMathSin(LUnaryMathOperation* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
void RecordSafepoint(int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index);
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
int arguments,
int deoptimization_index);
void RecordPosition(int position);
int LastSafepointEnd() {
return static_cast<int>(safepoints_.GetPcAfterGap());
}
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
void EmitBranch(int left_block,
int right_block,
Condition cc,
Register src1,
const Operand& src2);
void EmitBranchF(int left_block,
int right_block,
Condition cc,
FPURegister src1,
FPURegister src2);
void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
DoubleRegister result,
bool deoptimize_on_undefined,
LEnvironment* env);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
// Returns two registers in cmp1 and cmp2 that can be used in the
// Branch instruction after EmitTypeofIs.
Condition EmitTypeofIs(Label* true_label,
Label* false_label,
Register input,
Handle<String> type_name,
Register& cmp1,
Operand& cmp2);
// Emits optimized code for %_IsObject(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitIsObject(Register input,
Register temp1,
Label* is_not_object,
Label* is_object);
// Emits optimized code for %_IsConstructCall().
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp1, Register temp2);
void EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
Handle<String> name);
struct JumpTableEntry {
explicit inline JumpTableEntry(Address entry)
: label(),
address(entry) { }
Label label;
Address address;
};
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
int current_block_;
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
Safepoint::Kind expected_safepoint_kind_;
class PushSafepointRegistersScope BASE_EMBEDDED {
public:
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
: codegen_(codegen) {
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->expected_safepoint_kind_ = kind;
switch (codegen_->expected_safepoint_kind_) {
case Safepoint::kWithRegisters:
codegen_->masm_->PushSafepointRegisters();
break;
case Safepoint::kWithRegistersAndDoubles:
codegen_->masm_->PushSafepointRegistersAndDoubles();
break;
default:
UNREACHABLE();
}
}
~PushSafepointRegistersScope() {
Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
ASSERT((kind & Safepoint::kWithRegisters) != 0);
switch (kind) {
case Safepoint::kWithRegisters:
codegen_->masm_->PopSafepointRegisters();
break;
case Safepoint::kWithRegistersAndDoubles:
codegen_->masm_->PopSafepointRegistersAndDoubles();
break;
default:
UNREACHABLE();
}
codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
}
private:
LCodeGen* codegen_;
};
friend class LDeferredCode;
friend class LEnvironment;
friend class SafepointGenerator;
DISALLOW_COPY_AND_ASSIGN(LCodeGen);
};
class LDeferredCode: public ZoneObject {
public:
explicit LDeferredCode(LCodeGen* codegen)
: codegen_(codegen),
external_exit_(NULL),
instruction_index_(codegen->current_instruction_) {
codegen->AddDeferredCode(this);
}
virtual ~LDeferredCode() { }
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label *exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }
protected:
LCodeGen* codegen() const { return codegen_; }
MacroAssembler* masm() const { return codegen_->masm(); }
private:
LCodeGen* codegen_;
Label entry_;
Label exit_;
Label* external_exit_;
int instruction_index_;
};
} } // namespace v8::internal
......
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "mips/lithium-gap-resolver-mips.h"
#include "mips/lithium-codegen-mips.h"
namespace v8 {
namespace internal {
static const Register kSavedValueRegister = lithiumScratchReg;
static const DoubleRegister kSavedDoubleValueRegister = lithiumScratchDouble;
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner),
moves_(32),
root_index_(0),
in_cycle_(false),
saved_destination_(NULL) {}
void LGapResolver::Resolve(LParallelMove* parallel_move) {
ASSERT(moves_.is_empty());
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];
// Skip constants to perform them last. They don't block other moves
// and skipping such moves with register destinations keeps those
// registers free for the whole algorithm.
if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
root_index_ = i; // Any cycle is found when by reaching this move again.
PerformMove(i);
if (in_cycle_) {
RestoreValue();
}
}
}
// Perform the moves with constant sources.
for (int i = 0; i < moves_.length(); ++i) {
if (!moves_[i].IsEliminated()) {
ASSERT(moves_[i].source()->IsConstantOperand());
EmitMove(i);
}
}
moves_.Rewind(0);
}
void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
// Perform a linear sweep of the moves to add them to the initial list of
// moves to perform, ignoring any move that is redundant (the source is
// the same as the destination, the destination is ignored and
// unallocated, or the move was already eliminated).
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) moves_.Add(move);
}
Verify();
}
void LGapResolver::PerformMove(int index) {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We
// mark a move as "pending" on entry to PerformMove in order to detect
// cycles in the move graph.
// We can only find a cycle, when doing a depth-first traversal of moves,
// be encountering the starting move again. So by spilling the source of
// the starting move, we break the cycle. All moves are then unblocked,
// and the starting move is completed by writing the spilled value to
// its destination. All other moves from the spilled source have been
// completed prior to breaking the cycle.
// An additional complication is that moves to MemOperands with large
// offsets (more than 1K or 4K) require us to spill this spilled value to
// the stack, to free up the register.
ASSERT(!moves_[index].IsPending());
ASSERT(!moves_[index].IsRedundant());
// Clear this move's destination to indicate a pending move. The actual
// destination is saved in a stack allocated local. Multiple moves can
// be pending because this function is recursive.
ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
LOperand* destination = moves_[index].destination();
moves_[index].set_destination(NULL);
// Perform a depth-first traversal of the move graph to resolve
// dependencies. Any unperformed, unpending move with a source the same
// as this one's destination blocks this one so recursively perform all
// such moves.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination) && !other_move.IsPending()) {
PerformMove(i);
// If there is a blocking, pending move it must be moves_[root_index_]
// and all other moves with the same source as moves_[root_index_] are
// sucessfully executed (because they are cycle-free) by this loop.
}
}
// We are about to resolve this move and don't need it marked as
// pending, so restore its destination.
moves_[index].set_destination(destination);
// The move may be blocked on a pending move, which must be the starting move.
// In this case, we have a cycle, and we save the source of this move to
// a scratch register to break it.
LMoveOperands other_move = moves_[root_index_];
if (other_move.Blocks(destination)) {
ASSERT(other_move.IsPending());
BreakCycle(index);
return;
}
// This move is no longer blocked.
EmitMove(index);
}
void LGapResolver::Verify() {
#ifdef ENABLE_SLOW_ASSERTS
// No operand should be the destination for more than one move.
for (int i = 0; i < moves_.length(); ++i) {
LOperand* destination = moves_[i].destination();
for (int j = i + 1; j < moves_.length(); ++j) {
SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
}
}
#endif
}
#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::BreakCycle(int index) {
// We save in a register the value that should end up in the source of
// moves_[root_index]. After performing all moves in the tree rooted
// in that move, we save the value to that source.
ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
ASSERT(!in_cycle_);
in_cycle_ = true;
LOperand* source = moves_[index].source();
saved_destination_ = moves_[index].destination();
if (source->IsRegister()) {
__ mov(kSavedValueRegister, cgen_->ToRegister(source));
} else if (source->IsStackSlot()) {
__ lw(kSavedValueRegister, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
__ mov_d(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
__ ldc1(kSavedDoubleValueRegister, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
}
// This move will be done by restoring the saved value to the destination.
moves_[index].Eliminate();
}
void LGapResolver::RestoreValue() {
ASSERT(in_cycle_);
ASSERT(saved_destination_ != NULL);
// Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
if (saved_destination_->IsRegister()) {
__ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
} else if (saved_destination_->IsStackSlot()) {
__ sw(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(saved_destination_),
kSavedDoubleValueRegister);
} else if (saved_destination_->IsDoubleStackSlot()) {
__ sdc1(kSavedDoubleValueRegister,
cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
}
in_cycle_ = false;
saved_destination_ = NULL;
}
void LGapResolver::EmitMove(int index) {
LOperand* source = moves_[index].source();
LOperand* destination = moves_[index].destination();
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
Register source_register = cgen_->ToRegister(source);
if (destination->IsRegister()) {
__ mov(cgen_->ToRegister(destination), source_register);
} else {
ASSERT(destination->IsStackSlot());
__ sw(source_register, cgen_->ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
__ lw(cgen_->ToRegister(destination), source_operand);
} else {
ASSERT(destination->IsStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
if (!destination_operand.OffsetIsInt16Encodable()) {
// 'at' is overwritten while saving the value to the destination.
// Therefore we can't use 'at'. It is OK if the read from the source
// destroys 'at', since that happens before the value is read.
// This uses only a single reg of the double reg-pair.
__ lwc1(kSavedDoubleValueRegister, source_operand);
__ swc1(kSavedDoubleValueRegister, destination_operand);
} else {
__ lw(at, source_operand);
__ sw(at, destination_operand);
}
} else {
__ lw(kSavedValueRegister, source_operand);
__ sw(kSavedValueRegister, destination_operand);
}
}
} else if (source->IsConstantOperand()) {
Operand source_operand = cgen_->ToOperand(source);
if (destination->IsRegister()) {
__ li(cgen_->ToRegister(destination), source_operand);
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
MemOperand destination_operand = cgen_->ToMemOperand(destination);
__ li(kSavedValueRegister, source_operand);
__ sw(kSavedValueRegister, cgen_->ToMemOperand(destination));
}
} else if (source->IsDoubleRegister()) {
DoubleRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(destination), source_register);
} else {
ASSERT(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
__ sdc1(source_register, destination_operand);
}
} else if (source->IsDoubleStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
} else {
ASSERT(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
// kSavedDoubleValueRegister was used to break the cycle,
// but kSavedValueRegister is free.
MemOperand source_high_operand =
cgen_->ToHighMemOperand(source);
MemOperand destination_high_operand =
cgen_->ToHighMemOperand(destination);
__ lw(kSavedValueRegister, source_operand);
__ sw(kSavedValueRegister, destination_operand);
__ lw(kSavedValueRegister, source_high_operand);
__ sw(kSavedValueRegister, destination_high_operand);
} else {
__ ldc1(kSavedDoubleValueRegister, source_operand);
__ sdc1(kSavedDoubleValueRegister, destination_operand);
}
}
} else {
UNREACHABLE();
}
moves_[index].Eliminate();
}
#undef __
} } // namespace v8::internal
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
#include "v8.h"
#include "lithium.h"
namespace v8 {
namespace internal {
class LCodeGen;
class LGapResolver;
class LGapResolver BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
// Resolve a set of parallel moves, emitting assembler instructions.
void Resolve(LParallelMove* parallel_move);
private:
// Build the initial list of moves.
void BuildInitialMoveList(LParallelMove* parallel_move);
// Perform the move at the moves_ index in question (possibly requiring
// other moves to satisfy dependencies).
void PerformMove(int index);
// If a cycle is found in the series of moves, save the blocking value to
// a scratch register. The cycle must be found by hitting the root of the
// depth-first search.
void BreakCycle(int index);
// After a cycle has been resolved, restore the value from the scratch
// register to its proper destination.
void RestoreValue();
// Emit a move and remove it from the move graph.
void EmitMove(int index);
// Verify the move list before performing moves.
void Verify();
LCodeGen* cgen_;
// List of moves not yet resolved.
ZoneList<LMoveOperands> moves_;
int root_index_;
bool in_cycle_;
LOperand* saved_destination_;
};
} } // namespace v8::internal
#endif // V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "lithium-allocator-inl.h"
#include "mips/lithium-mips.h"
#include "mips/lithium-codegen-mips.h"
namespace v8 {
namespace internal {
#define DEFINE_COMPILE(type) \
void L##type::CompileToNative(LCodeGen* generator) { \
generator->Do##type(this); \
}
LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
LOsrEntry::LOsrEntry() {
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
register_spills_[i] = NULL;
}
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
double_register_spills_[i] = NULL;
}
}
void LOsrEntry::MarkSpilledRegister(int allocation_index,
LOperand* spill_operand) {
ASSERT(spill_operand->IsStackSlot());
ASSERT(register_spills_[allocation_index] == NULL);
register_spills_[allocation_index] = spill_operand;
}
#ifdef DEBUG
void LInstruction::VerifyCall() {
// Call instructions can use only fixed registers as temporaries and
// outputs because all registers are blocked by the calling convention.
// Inputs operands must use a fixed register or use-at-start policy or
// a non-register policy.
ASSERT(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
for (UseIterator it(this); !it.Done(); it.Advance()) {
LUnallocated* operand = LUnallocated::cast(it.Current());
ASSERT(operand->HasFixedPolicy() ||
operand->IsUsedAtStart());
}
for (TempIterator it(this); !it.Done(); it.Advance()) {
LUnallocated* operand = LUnallocated::cast(it.Current());
ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
}
}
#endif
void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
LOperand* spill_operand) {
ASSERT(spill_operand->IsDoubleStackSlot());
ASSERT(double_register_spills_[allocation_index] == NULL);
double_register_spills_[allocation_index] = spill_operand;
}
void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
PrintOutputOperandTo(stream);
PrintDataTo(stream);
if (HasEnvironment()) {
stream->Add(" ");
environment()->PrintTo(stream);
}
if (HasPointerMap()) {
stream->Add(" ");
pointer_map()->PrintTo(stream);
}
}
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
stream->Add("= ");
for (int i = 0; i < inputs_.length(); i++) {
if (i > 0) stream->Add(" ");
inputs_[i]->PrintTo(stream);
}
}
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
for (int i = 0; i < results_.length(); i++) {
if (i > 0) stream->Add(" ");
results_[i]->PrintTo(stream);
}
}
void LLabel::PrintDataTo(StringStream* stream) {
LGap::PrintDataTo(stream);
LLabel* rep = replacement();
if (rep != NULL) {
stream->Add(" Dead block replaced with B%d", rep->block_id());
}
}
bool LGap::IsRedundant() const {
for (int i = 0; i < 4; i++) {
if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
return false;
}
}
return true;
}
void LGap::PrintDataTo(StringStream* stream) {
for (int i = 0; i < 4; i++) {
stream->Add("(");
if (parallel_moves_[i] != NULL) {
parallel_moves_[i]->PrintDataTo(stream);
}
stream->Add(") ");
}
}
const char* LArithmeticD::Mnemonic() const {
switch (op()) {
case Token::ADD: return "add-d";
case Token::SUB: return "sub-d";
case Token::MUL: return "mul-d";
case Token::DIV: return "div-d";
case Token::MOD: return "mod-d";
default:
UNREACHABLE();
return NULL;
}
}
const char* LArithmeticT::Mnemonic() const {
switch (op()) {
case Token::ADD: return "add-t";
case Token::SUB: return "sub-t";
case Token::MUL: return "mul-t";
case Token::MOD: return "mod-t";
case Token::DIV: return "div-t";
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
case Token::SHL: return "sll-t";
case Token::SAR: return "sra-t";
case Token::SHR: return "srl-t";
default:
UNREACHABLE();
return NULL;
}
}
void LGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", block_id());
}
void LBranch::PrintDataTo(StringStream* stream) {
stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
InputAt(0)->PrintTo(stream);
}
void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
InputAt(0)->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
InputAt(1)->PrintTo(stream);
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
InputAt(0)->PrintTo(stream);
stream->Add(kind() == kStrictEquality ? " === " : " == ");
stream->Add(nil() == kNullValue ? "null" : "undefined");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi(");
InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_undetectable(");
InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_cached_array_index(");
InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
InputAt(0)->PrintTo(stream);
stream->Add(", \"%o\") then B%d else B%d",
*hydrogen()->class_name(),
true_block_id(),
false_block_id());
}
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
InputAt(0)->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
*hydrogen()->type_literal()->ToCString(),
true_block_id(), false_block_id());
}
void LCallConstantFunction::PrintDataTo(StringStream* stream) {
stream->Add("#%d / ", arity());
}
void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
stream->Add("/%s ", hydrogen()->OpName());
InputAt(0)->PrintTo(stream);
}
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
InputAt(0)->PrintTo(stream);
stream->Add("[%d]", slot_index());
}
void LStoreContextSlot::PrintDataTo(StringStream* stream) {
InputAt(0)->PrintTo(stream);
stream->Add("[%d] <- ", slot_index());
InputAt(1)->PrintTo(stream);
}
void LInvokeFunction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
InputAt(0)->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
void LCallKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[a2] #%d / ", arity());
}
void LCallNamed::PrintDataTo(StringStream* stream) {
SmartArrayPointer<char> name_string = name()->ToCString();
stream->Add("%s #%d / ", *name_string, arity());
}
void LCallGlobal::PrintDataTo(StringStream* stream) {
SmartArrayPointer<char> name_string = name()->ToCString();
stream->Add("%s #%d / ", *name_string, arity());
}
void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
stream->Add("#%d / ", arity());
}
void LCallNew::PrintDataTo(StringStream* stream) {
stream->Add("= ");
InputAt(0)->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
stream->Add(" length ");
length()->PrintTo(stream);
stream->Add(" index ");
index()->PrintTo(stream);
}
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
stream->Add(" <- ");
value()->PrintTo(stream);
}
void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
stream->Add(" <- ");
value()->PrintTo(stream);
}
void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
stream->Add("] <- ");
value()->PrintTo(stream);
}
void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
stream->Add("] <- ");
value()->PrintTo(stream);
}
void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
stream->Add("] <- ");
value()->PrintTo(stream);
}
void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(" %p -> %p", *original_map(), *transitioned_map());
}
LChunk::LChunk(CompilationInfo* info, HGraph* graph)
: spill_slot_count_(0),
info_(info),
graph_(graph),
instructions_(32),
pointer_maps_(8),
inlined_closures_(1) {
}
int LChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
if (is_double) spill_slot_count_++;
return spill_slot_count_++;
}
LOperand* LChunk::GetNextSpillSlot(bool is_double) {
int index = GetNextSpillIndex(is_double);
if (is_double) {
return LDoubleStackSlot::Create(index);
} else {
return LStackSlot::Create(index);
}
}
void LChunk::MarkEmptyBlocks() {
HPhase phase("Mark empty blocks", this);
for (int i = 0; i < graph()->blocks()->length(); ++i) {
HBasicBlock* block = graph()->blocks()->at(i);
int first = block->first_instruction_index();
int last = block->last_instruction_index();
LInstruction* first_instr = instructions()->at(first);
LInstruction* last_instr = instructions()->at(last);
LLabel* label = LLabel::cast(first_instr);
if (last_instr->IsGoto()) {
LGoto* goto_instr = LGoto::cast(last_instr);
if (label->IsRedundant() &&
!label->is_loop_header()) {
bool can_eliminate = true;
for (int i = first + 1; i < last && can_eliminate; ++i) {
LInstruction* cur = instructions()->at(i);
if (cur->IsGap()) {
LGap* gap = LGap::cast(cur);
if (!gap->IsRedundant()) {
can_eliminate = false;
}
} else {
can_eliminate = false;
}
}
if (can_eliminate) {
label->set_replacement(GetLabel(goto_instr->block_id()));
}
}
}
}
}
void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LInstructionGap* gap = new LInstructionGap(block);
int index = -1;
if (instr->IsControl()) {
instructions_.Add(gap);
index = instructions_.length();
instructions_.Add(instr);
} else {
index = instructions_.length();
instructions_.Add(instr);
instructions_.Add(gap);
}
if (instr->HasPointerMap()) {
pointer_maps_.Add(instr->pointer_map());
instr->pointer_map()->set_lithium_position(index);
}
}
LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
return LConstantOperand::Create(constant->id());
}
int LChunk::GetParameterStackSlot(int index) const {
// The receiver is at index 0, the first parameter at index 1, so we
// shift all parameter indexes down by the number of parameters, and
// make sure they end up negative so they are distinguishable from
// spill slots.
int result = index - info()->scope()->num_parameters() - 1;
ASSERT(result < 0);
return result;
}
// A parameter relative to ebp in the arguments stub.
int LChunk::ParameterAt(int index) {
ASSERT(-1 <= index); // -1 is the receiver.
return (1 + info()->scope()->num_parameters() - index) *
kPointerSize;
}
LGap* LChunk::GetGapAt(int index) const {
return LGap::cast(instructions_[index]);
}
bool LChunk::IsGapAt(int index) const {
return instructions_[index]->IsGap();
}
int LChunk::NearestGapPos(int index) const {
while (!IsGapAt(index)) index--;
return index;
}
void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
}
Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
}
Representation LChunk::LookupLiteralRepresentation(
LConstantOperand* operand) const {
return graph_->LookupValue(operand->index())->representation();
}
LChunk* LChunkBuilder::Build() {
ASSERT(is_unused());
chunk_ = new LChunk(info(), graph());
HPhase phase("Building chunk", chunk_);
status_ = BUILDING;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
HBasicBlock* next = NULL;
if (i < blocks->length() - 1) next = blocks->at(i + 1);
DoBasicBlock(blocks->at(i), next);
if (is_aborted()) return NULL;
}
status_ = DONE;
return chunk_;
}
void LChunkBuilder::Abort(const char* format, ...) {
if (FLAG_trace_bailout) {
SmartArrayPointer<char> name(
info()->shared_info()->DebugName()->ToCString());
PrintF("Aborting LChunk building in @\"%s\": ", *name);
va_list arguments;
va_start(arguments, format);
OS::VPrint(format, arguments);
va_end(arguments);
PrintF("\n");
}
status_ = ABORTED;
}
LRegister* LChunkBuilder::ToOperand(Register reg) {
return LRegister::Create(Register::ToAllocationIndex(reg));
}
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
return new LUnallocated(LUnallocated::FIXED_REGISTER,
Register::ToAllocationIndex(reg));
}
LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
DoubleRegister::ToAllocationIndex(reg));
}
LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
return Use(value, ToUnallocated(fixed_register));
}
LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
return Use(value, ToUnallocated(reg));
}
LOperand* LChunkBuilder::UseRegister(HValue* value) {
return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
return Use(value,
new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
LUnallocated::USED_AT_START));
}
LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
}
LOperand* LChunkBuilder::Use(HValue* value) {
return Use(value, new LUnallocated(LUnallocated::NONE));
}
LOperand* LChunkBuilder::UseAtStart(HValue* value) {
return Use(value, new LUnallocated(LUnallocated::NONE,
LUnallocated::USED_AT_START));
}
LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
: Use(value);
}
LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseAtStart(value);
}
LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseRegister(value);
}
LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
: UseRegisterAtStart(value);
}
LOperand* LChunkBuilder::UseAny(HValue* value) {
return value->IsConstant()
? chunk_->DefineConstantOperand(HConstant::cast(value))
: Use(value, new LUnallocated(LUnallocated::ANY));
}
LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
if (value->EmitAtUses()) {
HInstruction* instr = HInstruction::cast(value);
VisitInstruction(instr);
}
allocator_->RecordUse(value, operand);
return operand;
}
template<int I, int T>
LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
LUnallocated* result) {
allocator_->RecordDefinition(current_instruction_, result);
instr->set_result(result);
return instr;
}
template<int I, int T>
LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
return Define(instr, new LUnallocated(LUnallocated::NONE));
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
LTemplateInstruction<1, I, T>* instr) {
return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineAsSpilled(
LTemplateInstruction<1, I, T>* instr, int index) {
return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineSameAsFirst(
LTemplateInstruction<1, I, T>* instr) {
return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineFixed(
LTemplateInstruction<1, I, T>* instr, Register reg) {
return Define(instr, ToUnallocated(reg));
}
template<int I, int T>
LInstruction* LChunkBuilder::DefineFixedDouble(
LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
return Define(instr, ToUnallocated(reg));
}
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
instr->set_environment(CreateEnvironment(hydrogen_env,
&argument_index_accumulator));
return instr;
}
LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
LInstruction* instr, int ast_id) {
ASSERT(instruction_pending_deoptimization_environment_ == NULL);
ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
instruction_pending_deoptimization_environment_ = instr;
pending_deoptimization_ast_id_ = ast_id;
return instr;
}
void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
instruction_pending_deoptimization_environment_ = NULL;
pending_deoptimization_ast_id_ = AstNode::kNoNumber;
}
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
#ifdef DEBUG
instr->VerifyCall();
#endif
instr->MarkAsCall();
instr = AssignPointerMap(instr);
if (hinstr->HasSideEffects()) {
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
instr = SetInstructionPendingDeoptimizationEnvironment(
instr, sim->ast_id());
}
// If instruction does not have side-effects lazy deoptimization
// after the call will try to deoptimize to the point before the call.
// Thus we still need to attach environment to this call even if
// call sequence can not deoptimize eagerly.
bool needs_environment =
(can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
if (needs_environment && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
return instr;
}
LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
instr->MarkAsSaveDoubles();
return instr;
}
LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
ASSERT(!instr->HasPointerMap());
instr->set_pointer_map(new LPointerMap(position_));
return instr;
}
LUnallocated* LChunkBuilder::TempRegister() {
LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
allocator_->RecordTemporary(operand);
return operand;
}
LOperand* LChunkBuilder::FixedTemp(Register reg) {
LUnallocated* operand = ToUnallocated(reg);
allocator_->RecordTemporary(operand);
return operand;
}
LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
LUnallocated* operand = ToUnallocated(reg);
allocator_->RecordTemporary(operand);
return operand;
}
LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
return new LLabel(instr->block());
}
LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
return AssignEnvironment(new LDeoptimize);
}
LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
return AssignEnvironment(new LDeoptimize);
}
LInstruction* LChunkBuilder::DoBit(Token::Value op,
HBitwiseBinaryOperation* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
return DefineAsRegister(new LBitI(op, left, right));
} else {
ASSERT(instr->representation().IsTagged());
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), a1);
LOperand* right = UseFixed(instr->right(), a0);
LArithmeticT* result = new LArithmeticT(op, left, right);
return MarkAsCall(DefineFixed(result, v0), instr);
}
}
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
if (instr->representation().IsTagged()) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), a1);
LOperand* right = UseFixed(instr->right(), a0);
LArithmeticT* result = new LArithmeticT(op, left, right);
return MarkAsCall(DefineFixed(result, v0), instr);
}
ASSERT(instr->representation().IsInteger32());
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
HValue* right_value = instr->right();
LOperand* right = NULL;
int constant_value = 0;
if (right_value->IsConstant()) {
HConstant* constant = HConstant::cast(right_value);
right = chunk_->DefineConstantOperand(constant);
constant_value = constant->Integer32Value() & 0x1f;
} else {
right = UseRegisterAtStart(right_value);
}
// Shift operations can only deoptimize if we do a logical shift
// by 0 and the result cannot be truncated to int32.
bool may_deopt = (op == Token::SHR && constant_value == 0);
bool does_deopt = false;
if (may_deopt) {
for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
does_deopt = true;
break;
}
}
}
LInstruction* result =
DefineAsRegister(new LShiftI(op, left, right, does_deopt));
return does_deopt ? AssignEnvironment(result) : result;
}
LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
ASSERT(op != Token::MOD);
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
LArithmeticD* result = new LArithmeticD(op, left, right);
return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
HArithmeticBinaryOperation* instr) {
ASSERT(op == Token::ADD ||
op == Token::DIV ||
op == Token::MOD ||
op == Token::MUL ||
op == Token::SUB);
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
LOperand* left_operand = UseFixed(left, a1);
LOperand* right_operand = UseFixed(right, a0);
LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, v0), instr);
}
void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
ASSERT(is_building());
current_block_ = block;
next_block_ = next_block;
if (block->IsStartBlock()) {
block->UpdateEnvironment(graph_->start_environment());
argument_count_ = 0;
} else if (block->predecessors()->length() == 1) {
// We have a single predecessor => copy environment and outgoing
// argument count from the predecessor.
ASSERT(block->phis()->length() == 0);
HBasicBlock* pred = block->predecessors()->at(0);
HEnvironment* last_environment = pred->last_environment();
ASSERT(last_environment != NULL);
// Only copy the environment, if it is later used again.
if (pred->end()->SecondSuccessor() == NULL) {
ASSERT(pred->end()->FirstSuccessor() == block);
} else {
if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
last_environment = last_environment->Copy();
}
}
block->UpdateEnvironment(last_environment);
ASSERT(pred->argument_count() >= 0);
argument_count_ = pred->argument_count();
} else {
// We are at a state join => process phis.
HBasicBlock* pred = block->predecessors()->at(0);
// No need to copy the environment, it cannot be used later.
HEnvironment* last_environment = pred->last_environment();
for (int i = 0; i < block->phis()->length(); ++i) {
HPhi* phi = block->phis()->at(i);
last_environment->SetValueAt(phi->merged_index(), phi);
}
for (int i = 0; i < block->deleted_phis()->length(); ++i) {
last_environment->SetValueAt(block->deleted_phis()->at(i),
graph_->GetConstantUndefined());
}
block->UpdateEnvironment(last_environment);
// Pick up the outgoing argument count of one of the predecessors.
argument_count_ = pred->argument_count();
}
HInstruction* current = block->first();
int start = chunk_->instructions()->length();
while (current != NULL && !is_aborted()) {
// Code for constants in registers is generated lazily.
if (!current->EmitAtUses()) {
VisitInstruction(current);
}
current = current->next();
}
int end = chunk_->instructions()->length() - 1;
if (end >= start) {
block->set_first_instruction_index(start);
block->set_last_instruction_index(end);
}
block->set_argument_count(argument_count_);
next_block_ = NULL;
current_block_ = NULL;
}
void LChunkBuilder::VisitInstruction(HInstruction* current) {
HInstruction* old_current = current_instruction_;
current_instruction_ = current;
if (current->has_position()) position_ = current->position();
LInstruction* instr = current->CompileToLithium(this);
if (instr != NULL) {
if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
instr = AssignPointerMap(instr);
}
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
}
LEnvironment* LChunkBuilder::CreateEnvironment(
HEnvironment* hydrogen_env,
int* argument_index_accumulator) {
if (hydrogen_env == NULL) return NULL;
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length();
LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
ast_id,
hydrogen_env->parameter_count(),
argument_count_,
value_count,
outer);
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
HValue* value = hydrogen_env->values()->at(i);
LOperand* op = NULL;
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
op = new LArgument((*argument_index_accumulator)++);
} else {
op = UseAny(value);
}
result->AddValue(op, value->representation());
}
return result;
}
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
return new LGoto(instr->FirstSuccessor()->block_id());
}
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* v = instr->value();
if (v->EmitAtUses()) {
HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new LGoto(successor->block_id());
}
return AssignEnvironment(new LBranch(UseRegister(v)));
}
LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
return new LCmpMapAndBranch(value, temp);
}
LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
return DefineAsRegister(new LArgumentsLength(UseRegister(length->value())));
}
LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
return DefineAsRegister(new LArgumentsElements);
}
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstanceOf* result =
new LInstanceOf(UseFixed(instr->left(), a0),
UseFixed(instr->right(), a1));
return MarkAsCall(DefineFixed(result, v0), instr);
}
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
new LInstanceOfKnownGlobal(UseFixed(instr->left(), a0), FixedTemp(t0));
return MarkAsCall(DefineFixed(result, v0), instr);
}
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* function = UseFixed(instr->function(), a1);
LOperand* receiver = UseFixed(instr->receiver(), a0);
LOperand* length = UseFixed(instr->length(), a2);
LOperand* elements = UseFixed(instr->elements(), a3);
LApplyArguments* result = new LApplyArguments(function,
receiver,
length,
elements);
return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
++argument_count_;
LOperand* argument = Use(instr->argument());
return new LPushArgument(argument);
}
LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
}
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
}
LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LOuterContext(context));
}
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LGlobalObject(context));
}
LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
LOperand* global_object = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LGlobalReceiver(global_object));
}
LInstruction* LChunkBuilder::DoCallConstantFunction(
HCallConstantFunction* instr) {
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new LCallConstantFunction, v0), instr);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* function = UseFixed(instr->function(), a1);
argument_count_ -= instr->argument_count();
LInvokeFunction* result = new LInvokeFunction(function);
return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
BuiltinFunctionId op = instr->op();
if (op == kMathLog || op == kMathSin || op == kMathCos) {
LOperand* input = UseFixedDouble(instr->value(), f4);
LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL);
return MarkAsCall(DefineFixedDouble(result, f4), instr);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
switch (op) {
case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
case kMathFloor:
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
case kMathSqrt:
return DefineAsRegister(result);
case kMathRound:
return AssignEnvironment(DefineAsRegister(result));
case kMathPowHalf:
return DefineAsRegister(result);
default:
UNREACHABLE();
return NULL;
}
}
}
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
argument_count_ -= instr->argument_count();
LOperand* key = UseFixed(instr->key(), a2);
return MarkAsCall(DefineFixed(new LCallKeyed(key), v0), instr);
}
LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new LCallNamed, v0), instr);
}
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new LCallGlobal, v0), instr);
}
LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new LCallKnownGlobal, v0), instr);
}
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* constructor = UseFixed(instr->constructor(), a1);
argument_count_ -= instr->argument_count();
LCallNew* result = new LCallNew(constructor);
return MarkAsCall(DefineFixed(result, v0), instr);
}
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new LCallFunction, v0), instr);
}
LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new LCallRuntime, v0), instr);
}
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
return DoShift(Token::SHR, instr);
}
LInstruction* LChunkBuilder::DoSar(HSar* instr) {
return DoShift(Token::SAR, instr);
}
LInstruction* LChunkBuilder::DoShl(HShl* instr) {
return DoShift(Token::SHL, instr);
}
LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
return DoBit(Token::BIT_AND, instr);
}
LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
ASSERT(instr->value()->representation().IsInteger32());
ASSERT(instr->representation().IsInteger32());
return DefineAsRegister(new LBitNotI(UseRegisterAtStart(instr->value())));
}
LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
return DoBit(Token::BIT_OR, instr);
}
LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
return DoBit(Token::BIT_XOR, instr);
}
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else if (instr->representation().IsInteger32()) {
// TODO(1042) The fixed register allocation
// is needed because we call TypeRecordingBinaryOpStub from
// the generated code, which requires registers a0
// and a1 to be used. We should remove that
// when we provide a native implementation.
LOperand* dividend = UseFixed(instr->left(), a0);
LOperand* divisor = UseFixed(instr->right(), a1);
return AssignEnvironment(AssignPointerMap(
DefineFixed(new LDivI(dividend, divisor), v0)));
} else {
return DoArithmeticT(Token::DIV, instr);
}
}
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LModI* mod;
if (instr->HasPowerOf2Divisor()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
mod = new LModI(value, UseOrConstant(instr->right()));
} else {
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
mod = new LModI(dividend,
divisor,
TempRegister(),
FixedTemp(f20),
FixedTemp(f22));
}
if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
instr->CheckFlag(HValue::kCanBeDivByZero)) {
return AssignEnvironment(DefineAsRegister(mod));
} else {
return DefineAsRegister(mod);
}
} else if (instr->representation().IsTagged()) {
return DoArithmeticT(Token::MOD, instr);
} else {
ASSERT(instr->representation().IsDouble());
// We call a C function for double modulo. It can't trigger a GC.
// We need to use fixed result register for the call.
// TODO(fschneider): Allow any register as input registers.
LOperand* left = UseFixedDouble(instr->left(), f2);
LOperand* right = UseFixedDouble(instr->right(), f4);
LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
return MarkAsCall(DefineFixedDouble(result, f2), instr);
}
}
LInstruction* LChunkBuilder::DoMul(HMul* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left;
LOperand* right = UseOrConstant(instr->MostConstantOperand());
LOperand* temp = NULL;
if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
(instr->CheckFlag(HValue::kCanOverflow) ||
!right->IsConstantOperand())) {
left = UseRegister(instr->LeastConstantOperand());
temp = TempRegister();
} else {
left = UseRegisterAtStart(instr->LeastConstantOperand());
}
return AssignEnvironment(DefineAsRegister(new LMulI(left, right, temp)));
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
return DoArithmeticT(Token::MUL, instr);
}
}
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new LSubI(left, right);
LInstruction* result = DefineAsRegister(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
return result;
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::SUB, instr);
} else {
return DoArithmeticT(Token::SUB, instr);
}
}
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
LAddI* add = new LAddI(left, right);
LInstruction* result = DefineAsRegister(add);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
return result;
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::ADD, instr);
} else {
ASSERT(instr->representation().IsTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
ASSERT(instr->representation().IsDouble());
// We call a C function for double power. It can't trigger a GC.
// We need to use fixed result register for the call.
Representation exponent_type = instr->right()->representation();
ASSERT(instr->left()->representation().IsDouble());
LOperand* left = UseFixedDouble(instr->left(), f2);
LOperand* right = exponent_type.IsDouble() ?
UseFixedDouble(instr->right(), f4) :
UseFixed(instr->right(), a0);
LPower* result = new LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, f6),
instr,
CAN_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
Representation r = instr->GetInputRepresentation();
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), a1);
LOperand* right = UseFixed(instr->right(), a0);
LCmpT* result = new LCmpT(left, right);
return MarkAsCall(DefineFixed(result, v0), instr);
}
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
return new LCmpIDAndBranch(left, right);
} else {
ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return new LCmpIDAndBranch(left, right);
}
}
LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
HCompareObjectEqAndBranch* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return new LCmpObjectEqAndBranch(left, right);
}
LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
HCompareConstantEqAndBranch* instr) {
return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
}
LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new LIsNilAndBranch(UseRegisterAtStart(instr->value()));
}
LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* temp = TempRegister();
return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()), temp);
}
LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new LIsSmiAndBranch(Use(instr->value()));
}
LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
HIsUndetectableAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
TempRegister());
}
LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
HHasInstanceTypeAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()));
}
LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
HGetCachedArrayIndex* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LGetCachedArrayIndex(value));
}
LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
HHasCachedArrayIndexAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new LHasCachedArrayIndexAndBranch(
UseRegisterAtStart(instr->value()));
}
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
TempRegister());
}
LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LJSArrayLength(array));
}
LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
HFixedArrayBaseLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LFixedArrayBaseLength(array));
}
LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
LOperand* object = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LElementsKind(object));
}
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
LValueOf* result = new LValueOf(object, TempRegister());
return AssignEnvironment(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
UseRegister(instr->length())));
}
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
return NULL;
}
LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
LOperand* value = UseFixed(instr->value(), a0);
return MarkAsCall(new LThrow(value), instr);
}
LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
return NULL;
}
LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
// All HForceRepresentation instructions should be eliminated in the
// representation change phase of Hydrogen.
UNREACHABLE();
return NULL;
}
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
bool needs_check = !instr->value()->type().IsSmi();
LInstruction* res = NULL;
if (!needs_check) {
res = DefineSameAsFirst(new LSmiUntag(value, needs_check));
} else {
LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
: NULL;
LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(f22)
: NULL;
res = DefineSameAsFirst(new LTaggedToI(value, temp1, temp2, temp3));
res = AssignEnvironment(res);
}
return res;
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
LOperand* value = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
// Make sure that the temp and result_temp registers are
// different.
LUnallocated* result_temp = TempRegister();
LNumberTagD* result = new LNumberTagD(value, temp1, temp2);
Define(result, result_temp);
return AssignPointerMap(result);
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
LDoubleToI* res =
new LDoubleToI(value,
TempRegister(),
instr->CanTruncateToInt32() ? TempRegister() : NULL);
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineSameAsFirst(new LSmiTag(value));
} else {
LNumberTagI* result = new LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
}
} else {
ASSERT(to.IsDouble());
LOperand* value = Use(instr->value());
return DefineAsRegister(new LInteger32ToDouble(value));
}
}
UNREACHABLE();
return NULL;
}
LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new LCheckNonSmi(value));
}
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* result = new LCheckInstanceType(value);
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LInstruction* result = new LCheckPrototypeMaps(temp1, temp2);
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new LCheckSmi(value));
}
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new LCheckFunction(value));
}
LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LInstruction* result = new LCheckMap(value);
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
HValue* value = instr->value();
Representation input_rep = value->representation();
LOperand* reg = UseRegister(value);
if (input_rep.IsDouble()) {
// Revisit this decision, here and 8 lines below.
return DefineAsRegister(new LClampDToUint8(reg, FixedTemp(f22)));
} else if (input_rep.IsInteger32()) {
return DefineAsRegister(new LClampIToUint8(reg));
} else {
ASSERT(input_rep.IsTagged());
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve f22 explicitly.
LClampTToUint8* result = new LClampTToUint8(reg, FixedTemp(f22));
return AssignEnvironment(DefineAsRegister(result));
}
}
LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
HValue* value = instr->value();
Representation input_rep = value->representation();
LOperand* reg = UseRegister(value);
if (input_rep.IsDouble()) {
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LDoubleToI* res = new LDoubleToI(reg, temp1, temp2);
return AssignEnvironment(DefineAsRegister(res));
} else if (input_rep.IsInteger32()) {
// Canonicalization should already have removed the hydrogen instruction in
// this case, since it is a noop.
UNREACHABLE();
return NULL;
} else {
ASSERT(input_rep.IsTagged());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LOperand* temp3 = FixedTemp(f22);
LTaggedToI* res = new LTaggedToI(reg, temp1, temp2, temp3);
return AssignEnvironment(DefineSameAsFirst(res));
}
}
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
return new LReturn(UseFixed(instr->value(), v0));
}
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
if (r.IsInteger32()) {
return DefineAsRegister(new LConstantI);
} else if (r.IsDouble()) {
return DefineAsRegister(new LConstantD);
} else if (r.IsTagged()) {
return DefineAsRegister(new LConstantT);
} else {
UNREACHABLE();
return NULL;
}
}
LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LLoadGlobalCell* result = new LLoadGlobalCell;
return instr->RequiresHoleCheck()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* global_object = UseFixed(instr->global_object(), a0);
LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
return MarkAsCall(DefineFixed(result, v0), instr);
}
LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
LOperand* temp = TempRegister();
LOperand* value = UseTempRegister(instr->value());
LInstruction* result = new LStoreGlobalCell(value, temp);
if (instr->RequiresHoleCheck()) result = AssignEnvironment(result);
return result;
}
LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
LOperand* global_object = UseFixed(instr->global_object(), a1);
LOperand* value = UseFixed(instr->value(), a0);
LStoreGlobalGeneric* result =
new LStoreGlobalGeneric(global_object, value);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LLoadContextSlot(context));
}
LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LOperand* context;
LOperand* value;
if (instr->NeedsWriteBarrier()) {
context = UseTempRegister(instr->context());
value = UseTempRegister(instr->value());
} else {
context = UseRegister(instr->context());
value = UseRegister(instr->value());
}
return new LStoreContextSlot(context, value);
}
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
return DefineAsRegister(
new LLoadNamedField(UseRegisterAtStart(instr->object())));
}
LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
HLoadNamedFieldPolymorphic* instr) {
ASSERT(instr->representation().IsTagged());
if (instr->need_generic()) {
LOperand* obj = UseFixed(instr->object(), a0);
LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
return MarkAsCall(DefineFixed(result, v0), instr);
} else {
LOperand* obj = UseRegisterAtStart(instr->object());
LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
return AssignEnvironment(DefineAsRegister(result));
}
}
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), a0);
LInstruction* result = DefineFixed(new LLoadNamedGeneric(object), v0);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
HLoadFunctionPrototype* instr) {
return AssignEnvironment(DefineAsRegister(
new LLoadFunctionPrototype(UseRegister(instr->function()))));
}
LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LLoadElements(input));
}
LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
HLoadExternalArrayPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LLoadExternalArrayPointer(input));
}
LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
HLoadKeyedFastElement* instr) {
ASSERT(instr->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
return AssignEnvironment(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
HLoadKeyedFastDoubleElement* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* elements = UseTempRegister(instr->elements());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastDoubleElement* result =
new LLoadKeyedFastDoubleElement(elements, key);
return AssignEnvironment(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
HLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
Representation representation(instr->representation());
ASSERT(
(representation.IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(representation.IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
LLoadKeyedSpecializedArrayElement* result =
new LLoadKeyedSpecializedArrayElement(external_pointer, key);
LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
AssignEnvironment(load_instr) : load_instr;
}
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), a1);
LOperand* key = UseFixed(instr->key(), a0);
LInstruction* result =
DefineFixed(new LLoadKeyedGeneric(object, key), v0);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
HStoreKeyedFastElement* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
ASSERT(instr->value()->representation().IsTagged());
ASSERT(instr->object()->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* obj = UseTempRegister(instr->object());
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
: UseRegisterAtStart(instr->value());
LOperand* key = needs_write_barrier
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
}
LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
HStoreKeyedFastDoubleElement* instr) {
ASSERT(instr->value()->representation().IsDouble());
ASSERT(instr->elements()->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* val = UseTempRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
return new LStoreKeyedFastDoubleElement(elements, key, val);
}
LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
HStoreKeyedSpecializedArrayElement* instr) {
Representation representation(instr->value()->representation());
ElementsKind elements_kind = instr->elements_kind();
ASSERT(
(representation.IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(representation.IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer = UseRegister(instr->external_pointer());
bool val_is_temp_register =
elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
elements_kind == EXTERNAL_FLOAT_ELEMENTS;
LOperand* val = val_is_temp_register
? UseTempRegister(instr->value())
: UseRegister(instr->value());
LOperand* key = UseRegisterOrConstant(instr->key());
return new LStoreKeyedSpecializedArrayElement(external_pointer,
key,
val);
}
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* obj = UseFixed(instr->object(), a2);
LOperand* key = UseFixed(instr->key(), a1);
LOperand* val = UseFixed(instr->value(), a0);
ASSERT(instr->object()->representation().IsTagged());
ASSERT(instr->key()->representation().IsTagged());
ASSERT(instr->value()->representation().IsTagged());
return MarkAsCall(new LStoreKeyedGeneric(obj, key, val), instr);
}
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
new LTransitionElementsKind(object, new_map_reg, NULL);
return DefineSameAsFirst(result);
} else {
LOperand* object = UseFixed(instr->object(), a0);
LOperand* fixed_object_reg = FixedTemp(a2);
LOperand* new_map_reg = FixedTemp(a3);
LTransitionElementsKind* result =
new LTransitionElementsKind(object, new_map_reg, fixed_object_reg);
return MarkAsCall(DefineFixed(result, v0), instr);
}
}
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* obj = needs_write_barrier
? UseTempRegister(instr->object())
: UseRegisterAtStart(instr->object());
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
: UseRegister(instr->value());
return new LStoreNamedField(obj, val);
}
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* obj = UseFixed(instr->object(), a1);
LOperand* val = UseFixed(instr->value(), a0);
LInstruction* result = new LStoreNamedGeneric(obj, val);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return MarkAsCall(DefineFixed(new LStringAdd(left, right), v0), instr);
}
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* string = UseTempRegister(instr->string());
LOperand* index = UseTempRegister(instr->index());
LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
LOperand* char_code = UseRegister(instr->value());
LStringCharFromCode* result = new LStringCharFromCode(char_code);
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LOperand* string = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LStringLength(string));
}
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
return MarkAsCall(DefineFixed(new LArrayLiteral, v0), instr);
}
LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
return MarkAsCall(DefineFixed(new LObjectLiteral, v0), instr);
}
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
return MarkAsCall(DefineFixed(new LRegExpLiteral, v0), instr);
}
LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
return MarkAsCall(DefineFixed(new LFunctionLiteral, v0), instr);
}
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LOperand* object = UseFixed(instr->object(), a0);
LOperand* key = UseFixed(instr->key(), a1);
LDeleteProperty* result = new LDeleteProperty(object, key);
return MarkAsCall(DefineFixed(result, v0), instr);
}
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
allocator_->MarkAsOsrEntry();
current_block_->last_environment()->set_ast_id(instr->ast_id());
return AssignEnvironment(new LOsrEntry);
}
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
int spill_index = chunk()->GetParameterStackSlot(instr->index());
return DefineAsSpilled(new LParameter, spill_index);
}
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
if (spill_index > LUnallocated::kMaxFixedIndex) {
Abort("Too many spill slots needed for OSR");
spill_index = 0;
}
return DefineAsSpilled(new LUnknownOSRValue, spill_index);
}
LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
argument_count_ -= instr->argument_count();
return MarkAsCall(DefineFixed(new LCallStub, v0), instr);
}
LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
// There are no real uses of the arguments object.
// arguments.length and element access are supported directly on
// stack arguments, and any real arguments object use causes a bailout.
// So this value is never used.
return NULL;
}
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
LOperand* arguments = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length());
LOperand* index = UseRegister(instr->index());
LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
return AssignEnvironment(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
LOperand* object = UseFixed(instr->value(), a0);
LToFastProperties* result = new LToFastProperties(object);
return MarkAsCall(DefineFixed(result, v0), instr);
}
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LTypeof* result = new LTypeof(UseFixed(instr->value(), a0));
return MarkAsCall(DefineFixed(result, v0), instr);
}
LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
HIsConstructCallAndBranch* instr) {
return new LIsConstructCallAndBranch(TempRegister());
}
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
HEnvironment* env = current_block_->last_environment();
ASSERT(env != NULL);
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
for (int i = 0; i < instr->values()->length(); ++i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
} else {
env->Push(value);
}
}
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
if (pending_deoptimization_ast_id_ == instr->ast_id()) {
LInstruction* result = new LLazyBailout;
result = AssignEnvironment(result);
instruction_pending_deoptimization_environment_->
set_deoptimization_environment(result->environment());
ClearInstructionPendingDeoptimizationEnvironment();
return result;
}
return NULL;
}
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
if (instr->is_function_entry()) {
return MarkAsCall(new LStackCheck, instr);
} else {
ASSERT(instr->is_backwards_branch());
return AssignEnvironment(AssignPointerMap(new LStackCheck));
}
}
LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->function(),
undefined,
instr->call_kind());
current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure());
return NULL;
}
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
HEnvironment* outer = current_block_->last_environment()->outer();
current_block_->UpdateEnvironment(outer);
return NULL;
}
LInstruction* LChunkBuilder::DoIn(HIn* instr) {
LOperand* key = UseRegisterAtStart(instr->key());
LOperand* object = UseRegisterAtStart(instr->object());
LIn* result = new LIn(key, object);
return MarkAsCall(DefineFixed(result, v0), instr);
}
} } // namespace v8::internal
......@@ -32,275 +32,2191 @@
#include "lithium-allocator.h"
#include "lithium.h"
#include "safepoint-table.h"
// Note: this file was taken from the X64 version. ARM has a partially working
// lithium implementation, but for now it is not ported to mips.
#include "utils.h"
namespace v8 {
namespace internal {
// Forward declarations.
class LCodeGen;
class LEnvironment;
class Translation;
// Forward declarations.
class LCodeGen;
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \
V(Call) \
LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
V(ArithmeticD) \
V(ArithmeticT) \
V(ArrayLiteral) \
V(BitI) \
V(BitNotI) \
V(BoundsCheck) \
V(Branch) \
V(CallConstantFunction) \
V(CallFunction) \
V(CallGlobal) \
V(CallKeyed) \
V(CallKnownGlobal) \
V(CallNamed) \
V(CallNew) \
V(CallRuntime) \
V(CallStub) \
V(CheckFunction) \
V(CheckInstanceType) \
V(CheckMap) \
V(CheckNonSmi) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
V(CmpConstantEqAndBranch) \
V(CmpIDAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
V(ConstantD) \
V(ConstantI) \
V(ConstantT) \
V(Context) \
V(DeleteProperty) \
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
V(ElementsKind) \
V(FixedArrayBaseLength) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsNilAndBranch) \
V(IsObjectAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadElements) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
V(LoadKeyedFastDoubleElement) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
V(NumberTagI) \
V(NumberUntagD) \
V(ObjectLiteral) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
V(StoreKeyedFastDoubleElement) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
V(StringLength) \
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(ValueOf)
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
virtual Opcode opcode() const { return LInstruction::k##type; } \
virtual void CompileToNative(LCodeGen* generator); \
virtual const char* Mnemonic() const { return mnemonic; } \
static L##type* cast(LInstruction* instr) { \
ASSERT(instr->Is##type()); \
return reinterpret_cast<L##type*>(instr); \
}
#define DECLARE_HYDROGEN_ACCESSOR(type) \
H##type* hydrogen() const { \
return H##type::cast(hydrogen_value()); \
}
class LInstruction: public ZoneObject {
public:
LInstruction()
: environment_(NULL),
hydrogen_value_(NULL),
is_call_(false),
is_save_doubles_(false) { }
virtual ~LInstruction() { }
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
virtual void PrintTo(StringStream* stream);
virtual void PrintDataTo(StringStream* stream) = 0;
virtual void PrintOutputOperandTo(StringStream* stream) = 0;
enum Opcode {
// Declare a unique enum value for each instruction.
#define DECLARE_OPCODE(type) k##type,
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
kNumberOfInstructions
#undef DECLARE_OPCODE
};
virtual Opcode opcode() const = 0;
// Declare non-virtual type testers for all leaf IR classes.
#define DECLARE_PREDICATE(type) \
bool Is##type() const { return opcode() == k##type; }
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
#undef DECLARE_PREDICATE
// Declare virtual predicates for instructions that don't have
// an opcode.
virtual bool IsGap() const { return false; }
virtual bool IsControl() const { return false; }
void set_environment(LEnvironment* env) { environment_ = env; }
LEnvironment* environment() const { return environment_; }
bool HasEnvironment() const { return environment_ != NULL; }
void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
void set_deoptimization_environment(LEnvironment* env) {
deoptimization_environment_.set(env);
}
LEnvironment* deoptimization_environment() const {
return deoptimization_environment_.get();
}
bool HasDeoptimizationEnvironment() const {
return deoptimization_environment_.is_set();
}
void MarkAsCall() { is_call_ = true; }
void MarkAsSaveDoubles() { is_save_doubles_ = true; }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return is_call_; }
bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
virtual int InputCount() = 0;
virtual LOperand* InputAt(int i) = 0;
virtual int TempCount() = 0;
virtual LOperand* TempAt(int i) = 0;
LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; }
#ifdef DEBUG
void VerifyCall();
#endif
private:
LEnvironment* environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
SetOncePointer<LEnvironment> deoptimization_environment_;
bool is_call_;
bool is_save_doubles_;
};
// R = number of result operands (0 or 1).
// I = number of input operands.
// T = number of temporary operands.
template<int R, int I, int T>
class LTemplateInstruction: public LInstruction {
public:
// Allow 0 or 1 output operands.
STATIC_ASSERT(R == 0 || R == 1);
virtual bool HasResult() const { return R != 0; }
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() { return results_[0]; }
int InputCount() { return I; }
LOperand* InputAt(int i) { return inputs_[i]; }
int TempCount() { return T; }
LOperand* TempAt(int i) { return temps_[i]; }
virtual void PrintDataTo(StringStream* stream);
virtual void PrintOutputOperandTo(StringStream* stream);
protected:
EmbeddedContainer<LOperand*, R> results_;
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
};
class LGap: public LTemplateInstruction<0, 0, 0> {
public:
explicit LGap(HBasicBlock* block)
: block_(block) {
parallel_moves_[BEFORE] = NULL;
parallel_moves_[START] = NULL;
parallel_moves_[END] = NULL;
parallel_moves_[AFTER] = NULL;
}
// Can't use the DECLARE-macro here because of sub-classes.
virtual bool IsGap() const { return true; }
virtual void PrintDataTo(StringStream* stream);
static LGap* cast(LInstruction* instr) {
ASSERT(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
}
bool IsRedundant() const;
HBasicBlock* block() const { return block_; }
enum InnerPosition {
BEFORE,
START,
END,
AFTER,
FIRST_INNER_POSITION = BEFORE,
LAST_INNER_POSITION = AFTER
};
LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
return parallel_moves_[pos];
}
LParallelMove* GetParallelMove(InnerPosition pos) {
return parallel_moves_[pos];
}
private:
LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
HBasicBlock* block_;
};
class LInstructionGap: public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
};
class LGoto: public LTemplateInstruction<0, 0, 0> {
public:
explicit LGoto(int block_id) : block_id_(block_id) { }
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int block_id() const { return block_id_; }
private:
int block_id_;
};
class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
void set_gap_instructions_size(int gap_instructions_size) {
gap_instructions_size_ = gap_instructions_size;
}
int gap_instructions_size() { return gap_instructions_size_; }
private:
int gap_instructions_size_;
};
class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
};
class LLabel: public LGap {
public:
explicit LLabel(HBasicBlock* block)
: LGap(block), replacement_(NULL) { }
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
virtual void PrintDataTo(StringStream* stream);
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
Label* label() { return &label_; }
LLabel* replacement() const { return replacement_; }
void set_replacement(LLabel* label) { replacement_ = label; }
bool HasReplacement() const { return replacement_ != NULL; }
private:
Label label_;
LLabel* replacement_;
};
class LParameter: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
class LCallStub: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
TranscendentalCache::Type transcendental_type() {
return hydrogen()->transcendental_type();
}
};
class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
template<int I, int T>
class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
virtual bool IsControl() const { return true; }
int SuccessorCount() { return hydrogen()->SuccessorCount(); }
HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
private:
HControlInstruction* hydrogen() {
return HControlInstruction::cast(this->hydrogen_value());
}
};
class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
LOperand* length,
LOperand* elements) {
inputs_[0] = function;
inputs_[1] = receiver;
inputs_[2] = length;
inputs_[3] = elements;
}
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
LOperand* function() { return inputs_[0]; }
LOperand* receiver() { return inputs_[1]; }
LOperand* length() { return inputs_[2]; }
LOperand* elements() { return inputs_[3]; }
};
class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
public:
LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
inputs_[0] = arguments;
inputs_[1] = length;
inputs_[2] = index;
}
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
LOperand* arguments() { return inputs_[0]; }
LOperand* length() { return inputs_[1]; }
LOperand* index() { return inputs_[2]; }
virtual void PrintDataTo(StringStream* stream);
};
class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
}
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
};
class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
public:
LArgumentsElements() { }
DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
};
class LModI: public LTemplateInstruction<1, 2, 3> {
public:
// Used when the right hand is a constant power of 2.
LModI(LOperand* left,
LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = NULL;
temps_[1] = NULL;
temps_[2] = NULL;
}
// Used for the standard case.
LModI(LOperand* left,
LOperand* right,
LOperand* temp1,
LOperand* temp2,
LOperand* temp3) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp1;
temps_[1] = temp2;
temps_[2] = temp3;
}
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
};
class LDivI: public LTemplateInstruction<1, 2, 0> {
public:
LDivI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
};
class LMulI: public LTemplateInstruction<1, 2, 1> {
public:
LMulI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
DECLARE_HYDROGEN_ACCESSOR(Mul)
};
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
return hydrogen()->GetInputRepresentation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
};
class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
public:
LUnaryMathOperation(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
virtual void PrintDataTo(StringStream* stream);
BuiltinFunctionId op() const { return hydrogen()->op(); }
};
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
"cmp-object-eq-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
};
class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
public:
explicit LCmpConstantEqAndBranch(LOperand* left) {
inputs_[0] = left;
}
DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
"cmp-constant-eq-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
};
class LIsNilAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsNilAndBranch(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
EqualityKind kind() const { return hydrogen()->kind(); }
NilValue nil() const { return hydrogen()->nil(); }
virtual void PrintDataTo(StringStream* stream);
};
class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
"is-undetectable-and-branch")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
};
class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
class LCmpT: public LTemplateInstruction<1, 2, 0> {
public:
LCmpT(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
Token::Value op() const { return hydrogen()->token(); }
};
class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
public:
LInstanceOf(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
public:
LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
"instance-of-known-global")
DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
Handle<JSFunction> function() const { return hydrogen()->function(); }
};
class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
inputs_[0] = index;
inputs_[1] = length;
}
LOperand* index() { return inputs_[0]; }
LOperand* length() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
};
class LBitI: public LTemplateInstruction<1, 2, 0> {
public:
LBitI(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
inputs_[0] = left;
inputs_[1] = right;
}
Token::Value op() const { return op_; }
DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
private:
Token::Value op_;
};
class LShiftI: public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
inputs_[0] = left;
inputs_[1] = right;
}
Token::Value op() const { return op_; }
bool can_deopt() const { return can_deopt_; }
DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
private:
Token::Value op_;
bool can_deopt_;
};
class LSubI: public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
DECLARE_HYDROGEN_ACCESSOR(Sub)
};
class LConstantI: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
int32_t value() const { return hydrogen()->Integer32Value(); }
};
class LConstantD: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
double value() const { return hydrogen()->DoubleValue(); }
};
class LConstantT: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
Handle<Object> value() const { return hydrogen()->handle(); }
};
class LBranch: public LControlInstruction<1, 0> {
public:
explicit LBranch(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Branch)
virtual void PrintDataTo(StringStream* stream);
};
class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
public:
LCmpMapAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMap)
virtual bool IsControl() const { return true; }
Handle<Map> map() const { return hydrogen()->map(); }
int true_block_id() const {
return hydrogen()->FirstSuccessor()->block_id();
}
int false_block_id() const {
return hydrogen()->SecondSuccessor()->block_id();
}
};
class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LJSArrayLength(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
};
class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LFixedArrayBaseLength(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
"fixed-array-base-length")
DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
};
class LElementsKind: public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
};
class LValueOf: public LTemplateInstruction<1, 1, 1> {
public:
LValueOf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
DECLARE_HYDROGEN_ACCESSOR(ValueOf)
};
class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
class LBitNotI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LBitNotI(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
};
class LAddI: public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
};
class LPower: public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(Power, "power")
DECLARE_HYDROGEN_ACCESSOR(Power)
};
class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
inputs_[0] = left;
inputs_[1] = right;
}
Token::Value op() const { return op_; }
virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
private:
Token::Value op_;
};
class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
inputs_[0] = left;
inputs_[1] = right;
}
virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
Token::Value op() const { return op_; }
private:
Token::Value op_;
};
class LReturn: public LTemplateInstruction<0, 1, 0> {
public:
explicit LReturn(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
};
class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedFieldPolymorphic(LOperand* object) {
inputs_[0] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
LOperand* object() { return inputs_[0]; }
};
class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
inputs_[0] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
LOperand* object() { return inputs_[0]; }
Handle<Object> name() const { return hydrogen()->name(); }
};
class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) {
inputs_[0] = function;
}
DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
LOperand* function() { return inputs_[0]; }
};
class LLoadElements: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadElements(LOperand* object) {
inputs_[0] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
};
class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
"load-external-array-pointer")
};
class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
};
class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
"load-keyed-fast-double-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
};
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
"load-keyed-specialized-array-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
};
class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
inputs_[0] = obj;
inputs_[1] = key;
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
};
class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadGlobalGeneric(LOperand* global_object) {
inputs_[0] = global_object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
LOperand* global_object() { return inputs_[0]; }
Handle<Object> name() const { return hydrogen()->name(); }
bool for_typeof() const { return hydrogen()->for_typeof(); }
};
class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
public:
LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
};
class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
public:
explicit LStoreGlobalGeneric(LOperand* global_object,
LOperand* value) {
inputs_[0] = global_object;
inputs_[1] = value;
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
LOperand* global_object() { return InputAt(0); }
Handle<Object> name() const { return hydrogen()->name(); }
LOperand* value() { return InputAt(1); }
bool strict_mode() { return hydrogen()->strict_mode(); }
};
class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
LOperand* context() { return InputAt(0); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
};
class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
public:
LStoreContextSlot(LOperand* context, LOperand* value) {
inputs_[0] = context;
inputs_[1] = value;
}
DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
LOperand* context() { return InputAt(0); }
LOperand* value() { return InputAt(1); }
int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
};
class LPushArgument: public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
};
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
};
class LContext: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
};
class LOuterContext: public LTemplateInstruction<1, 1, 0> {
public:
explicit LOuterContext(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
LOperand* context() { return InputAt(0); }
};
class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalObject(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
LOperand* context() { return InputAt(0); }
};
class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
public:
explicit LGlobalReceiver(LOperand* global_object) {
inputs_[0] = global_object;
}
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
LOperand* global() { return InputAt(0); }
};
class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
virtual void PrintDataTo(StringStream* stream);
Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInvokeFunction(LOperand* function) {
inputs_[0] = function;
}
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
LOperand* function() { return inputs_[0]; }
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallKeyed(LOperand* key) {
inputs_[0] = key;
}
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LCallNamed: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
virtual void PrintDataTo(StringStream* stream);
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LCallFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
int arity() const { return hydrogen()->argument_count() - 2; }
};
class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
virtual void PrintDataTo(StringStream* stream);
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LInstruction: public ZoneObject {
class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
public:
LInstruction() { }
virtual ~LInstruction() { }
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
virtual void PrintDataTo(StringStream* stream);
Handle<JSFunction> target() const { return hydrogen()->target(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
// Predicates should be generated by macro as in lithium-ia32.h.
virtual bool IsLabel() const {
UNIMPLEMENTED();
return false;
class LCallNew: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNew(LOperand* constructor) {
inputs_[0] = constructor;
}
virtual bool IsOsrEntry() const {
UNIMPLEMENTED();
return false;
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
};
class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
}
LPointerMap* pointer_map() const {
UNIMPLEMENTED();
return NULL;
DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
};
class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
}
bool HasPointerMap() const {
UNIMPLEMENTED();
return false;
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
public:
LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp1;
temps_[1] = temp2;
}
void set_environment(LEnvironment* env) { UNIMPLEMENTED(); }
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
};
LEnvironment* environment() const {
UNIMPLEMENTED();
return NULL;
// Sometimes truncating conversion from a tagged value to an int32.
class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
public:
LDoubleToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp1;
temps_[1] = temp2;
}
bool HasEnvironment() const {
UNIMPLEMENTED();
return false;
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
// Truncating conversion from a tagged value to an int32.
class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
public:
LTaggedToI(LOperand* value,
LOperand* temp1,
LOperand* temp2,
LOperand* temp3) {
inputs_[0] = value;
temps_[0] = temp1;
temps_[1] = temp2;
temps_[2] = temp3;
}
virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
virtual bool IsControl() const {
UNIMPLEMENTED();
return false;
class LSmiTag: public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
}
void MarkAsCall() { UNIMPLEMENTED(); }
void MarkAsSaveDoubles() { UNIMPLEMENTED(); }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
};
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const {
UNIMPLEMENTED();
return false;
class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
}
bool IsMarkedAsSaveDoubles() const {
UNIMPLEMENTED();
return false;
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change)
};
class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
inputs_[0] = value;
}
virtual bool HasResult() const {
UNIMPLEMENTED();
return false;
DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
bool needs_check() const { return needs_check_; }
private:
bool needs_check_;
};
class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
public:
LStoreNamedField(LOperand* obj, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = val;
}
virtual LOperand* result() {
UNIMPLEMENTED();
return NULL;
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
};
class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
public:
LStoreNamedGeneric(LOperand* obj, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = val;
}
virtual int InputCount() {
UNIMPLEMENTED();
return 0;
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
bool strict_mode() { return strict_mode_flag() == kStrictMode; }
};
class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = val;
}
virtual LOperand* InputAt(int i) {
UNIMPLEMENTED();
return NULL;
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
"store-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
};
class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedFastDoubleElement(LOperand* elements,
LOperand* key,
LOperand* val) {
inputs_[0] = elements;
inputs_[1] = key;
inputs_[2] = val;
}
virtual int TempCount() {
UNIMPLEMENTED();
return 0;
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
"store-keyed-fast-double-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
virtual void PrintDataTo(StringStream* stream);
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
};
class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = val;
}
virtual LOperand* TempAt(int i) {
UNIMPLEMENTED();
return NULL;
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
bool strict_mode() { return hydrogen()->strict_mode(); }
};
class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
LOperand* key,
LOperand* val) {
inputs_[0] = external_pointer;
inputs_[1] = key;
inputs_[2] = val;
}
LOperand* FirstInput() {
UNIMPLEMENTED();
return NULL;
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
"store-keyed-specialized-array-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
};
LOperand* Output() {
UNIMPLEMENTED();
return NULL;
class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp,
LOperand* temp_reg) {
inputs_[0] = object;
temps_[0] = new_map_temp;
temps_[1] = temp_reg;
}
#ifdef DEBUG
void VerifyCall() { UNIMPLEMENTED(); }
#endif
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
"transition-elements-kind")
DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* new_map_reg() { return temps_[0]; }
LOperand* temp_reg() { return temps_[1]; }
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
};
class LGap: public LInstruction {
class LStringAdd: public LTemplateInstruction<1, 2, 0> {
public:
explicit LGap(HBasicBlock* block) { }
HBasicBlock* block() const {
UNIMPLEMENTED();
return NULL;
LStringAdd(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
enum InnerPosition {
BEFORE,
START,
END,
AFTER,
FIRST_INNER_POSITION = BEFORE,
LAST_INNER_POSITION = AFTER
};
DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
DECLARE_HYDROGEN_ACCESSOR(StringAdd)
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
};
LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
UNIMPLEMENTED();
return NULL;
class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
public:
LStringCharCodeAt(LOperand* string, LOperand* index) {
inputs_[0] = string;
inputs_[1] = index;
}
LParallelMove* GetParallelMove(InnerPosition pos) {
UNIMPLEMENTED();
return NULL;
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
LOperand* string() { return inputs_[0]; }
LOperand* index() { return inputs_[1]; }
};
class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
public:
explicit LStringCharFromCode(LOperand* char_code) {
inputs_[0] = char_code;
}
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
LOperand* char_code() { return inputs_[0]; }
};
class LLabel: public LGap {
class LStringLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLabel(HBasicBlock* block) : LGap(block) { }
explicit LStringLength(LOperand* string) {
inputs_[0] = string;
}
DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
DECLARE_HYDROGEN_ACCESSOR(StringLength)
LOperand* string() { return inputs_[0]; }
};
class LOsrEntry: public LInstruction {
class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
public:
// Function could be generated by a macro as in lithium-ia32.h.
static LOsrEntry* cast(LInstruction* instr) {
UNIMPLEMENTED();
return NULL;
explicit LCheckFunction(LOperand* value) {
inputs_[0] = value;
}
LOperand** SpilledRegisterArray() {
UNIMPLEMENTED();
return NULL;
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
};
class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckInstanceType(LOperand* value) {
inputs_[0] = value;
}
LOperand** SpilledDoubleRegisterArray() {
UNIMPLEMENTED();
return NULL;
DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
};
class LCheckMap: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMap(LOperand* value) {
inputs_[0] = value;
}
void MarkSpilledRegister(int allocation_index, LOperand* spill_operand) {
UNIMPLEMENTED();
DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
DECLARE_HYDROGEN_ACCESSOR(CheckMap)
};
class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
public:
LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2) {
temps_[0] = temp1;
temps_[1] = temp2;
}
void MarkSpilledDoubleRegister(int allocation_index,
LOperand* spill_operand) {
UNIMPLEMENTED();
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
Handle<JSObject> holder() const { return hydrogen()->holder(); }
};
class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
};
class LChunk: public ZoneObject {
class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
public:
explicit LChunk(HGraph* graph) { }
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
};
HGraph* graph() const {
UNIMPLEMENTED();
return NULL;
class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
public:
LClampDToUint8(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
const ZoneList<LPointerMap*>* pointer_maps() const {
UNIMPLEMENTED();
return NULL;
LOperand* unclamped() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
};
class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* value) {
inputs_[0] = value;
}
LOperand* GetNextSpillSlot(bool double_slot) {
UNIMPLEMENTED();
return NULL;
LOperand* unclamped() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
};
class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
LConstantOperand* DefineConstantOperand(HConstant* constant) {
UNIMPLEMENTED();
return NULL;
LOperand* unclamped() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
};
class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
};
class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
};
class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
};
class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
}
LLabel* GetLabel(int block_id) const {
UNIMPLEMENTED();
return NULL;
DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
};
class LTypeof: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeof(LOperand* value) {
inputs_[0] = value;
}
const ZoneList<LInstruction*>* instructions() const {
UNIMPLEMENTED();
return NULL;
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
}
int GetParameterStackSlot(int index) const {
UNIMPLEMENTED();
return 0;
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
Handle<String> type_literal() { return hydrogen()->type_literal(); }
virtual void PrintDataTo(StringStream* stream);
};
class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
}
void AddGapMove(int index, LOperand* from, LOperand* to) { UNIMPLEMENTED(); }
DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
"is-construct-call-and-branch")
};
LGap* GetGapAt(int index) const {
UNIMPLEMENTED();
return NULL;
class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
public:
LDeleteProperty(LOperand* obj, LOperand* key) {
inputs_[0] = obj;
inputs_[1] = key;
}
bool IsGapAt(int index) const {
UNIMPLEMENTED();
return false;
DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
};
class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry();
DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
LOperand** SpilledRegisterArray() { return register_spills_; }
LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
void MarkSpilledDoubleRegister(int allocation_index,
LOperand* spill_operand);
private:
// Arrays of spill slot operands for registers with an assigned spill
// slot, i.e., that must also be restored to the spill slot on OSR entry.
// NULL if the register has no assigned spill slot. Indexed by allocation
// index.
LOperand* register_spills_[Register::kNumAllocatableRegisters];
LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
};
class LStackCheck: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
Label* done_label() { return &done_label_; }
private:
Label done_label_;
};
class LIn: public LTemplateInstruction<1, 2, 0> {
public:
LIn(LOperand* key, LOperand* object) {
inputs_[0] = key;
inputs_[1] = object;
}
int NearestGapPos(int index) const {
UNIMPLEMENTED();
return 0;
LOperand* key() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(In, "in")
};
class LChunkBuilder;
class LChunk: public ZoneObject {
public:
explicit LChunk(CompilationInfo* info, HGraph* graph);
void AddInstruction(LInstruction* instruction, HBasicBlock* block);
LConstantOperand* DefineConstantOperand(HConstant* constant);
Handle<Object> LookupLiteral(LConstantOperand* operand) const;
Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
int GetNextSpillIndex(bool is_double);
LOperand* GetNextSpillSlot(bool is_double);
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
void AddGapMove(int index, LOperand* from, LOperand* to);
LGap* GetGapAt(int index) const;
bool IsGapAt(int index) const;
int NearestGapPos(int index) const;
void MarkEmptyBlocks();
const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
LLabel* GetLabel(int block_id) const {
HBasicBlock* block = graph_->blocks()->at(block_id);
int first_instruction = block->first_instruction_index();
return LLabel::cast(instructions_[first_instruction]);
}
int LookupDestination(int block_id) const {
LLabel* cur = GetLabel(block_id);
while (cur->replacement() != NULL) {
cur = cur->replacement();
}
return cur->block_id();
}
Label* GetAssemblyLabel(int block_id) const {
LLabel* label = GetLabel(block_id);
ASSERT(!label->HasReplacement());
return label->label();
}
void MarkEmptyBlocks() { UNIMPLEMENTED(); }
const ZoneList<Handle<JSFunction> >* inlined_closures() const {
return &inlined_closures_;
}
CompilationInfo* info() const {
UNIMPLEMENTED();
return NULL;
void AddInlinedClosure(Handle<JSFunction> closure) {
inlined_closures_.Add(closure);
}
#ifdef DEBUG
void Verify() { UNIMPLEMENTED(); }
#endif
private:
int spill_slot_count_;
CompilationInfo* info_;
HGraph* const graph_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
ZoneList<Handle<JSFunction> > inlined_closures_;
};
class LChunkBuilder BASE_EMBEDDED {
public:
LChunkBuilder(CompilationInfo*&, HGraph* graph, LAllocator* allocator) { }
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
info_(info),
graph_(graph),
status_(UNUSED),
current_instruction_(NULL),
current_block_(NULL),
next_block_(NULL),
argument_count_(0),
allocator_(allocator),
position_(RelocInfo::kNoPosition),
instruction_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
// Build the sequence for the graph.
LChunk* Build() {
UNIMPLEMENTED();
return NULL;
};
LChunk* Build();
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node) { \
UNIMPLEMENTED(); \
return NULL; \
}
#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
private:
enum Status {
UNUSED,
BUILDING,
DONE,
ABORTED
};
LChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
bool is_unused() const { return status_ == UNUSED; }
bool is_building() const { return status_ == BUILDING; }
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
void Abort(const char* format, ...);
// Methods for getting operands for Use / Define / Temp.
LRegister* ToOperand(Register reg);
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(DoubleRegister reg);
// Methods for setting up define-use relationships.
MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
DoubleRegister fixed_register);
// A value that is guaranteed to be allocated to a register.
// Operand created by UseRegister is guaranteed to be live until the end of
// instruction. This means that register allocator will not reuse it's
// register for any other operand inside instruction.
// Operand created by UseRegisterAtStart is guaranteed to be live only at
// instruction start. Register allocator is free to assign the same register
// to some other operand used inside instruction (i.e. temporary or
// output).
MUST_USE_RESULT LOperand* UseRegister(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
// An input operand in a register that may be trashed.
MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
// An input operand in a register or stack slot.
MUST_USE_RESULT LOperand* Use(HValue* value);
MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
// An input operand in a register, stack slot or a constant operand.
MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
// An input operand in a register or a constant operand.
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
MUST_USE_RESULT LOperand* UseAny(HValue* value);
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
MUST_USE_RESULT LOperand* FixedTemp(Register reg);
MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
template<int I, int T>
LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
LUnallocated* result);
template<int I, int T>
LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
template<int I, int T>
LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
template<int I, int T>
LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
int index);
template<int I, int T>
LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
template<int I, int T>
LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
Register reg);
template<int I, int T>
LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
DoubleRegister reg);
LInstruction* AssignEnvironment(LInstruction* instr);
LInstruction* AssignPointerMap(LInstruction* instr);
enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
// By default we assume that instruction sequences generated for calls
// cannot deoptimize eagerly and we do not attach environment to this
// instruction.
LInstruction* MarkAsCall(
LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LInstruction* MarkAsSaveDoubles(LInstruction* instr);
LInstruction* SetInstructionPendingDeoptimizationEnvironment(
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
int* argument_index_accumulator);
void VisitInstruction(HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
LInstruction* DoArithmeticD(Token::Value op,
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
HArithmeticBinaryOperation* instr);
LChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
Status status_;
HInstruction* current_instruction_;
HBasicBlock* current_block_;
HBasicBlock* next_block_;
int argument_count_;
LAllocator* allocator_;
int position_;
LInstruction* instruction_pending_deoptimization_environment_;
int pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
};
#undef DECLARE_HYDROGEN_ACCESSOR
#undef DECLARE_CONCRETE_INSTRUCTION
} } // namespace v8::internal
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment