Commit b510dc58 authored by danno@chromium.org's avatar danno@chromium.org

MIPS: Re-land Crankshaft-generated KeyedLoad stubs.

Port r13236 (cd9236c5)

BUG=
TEST=

Review URL: https://codereview.chromium.org/11801002
Patch from Akos Palfi <palfia@homejinni.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13322 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent ab301a54
......@@ -80,9 +80,36 @@ bool Operand::is_reg() const {
}
int Register::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(FPU)) {
return kMaxNumAllocatableRegisters;
} else {
return kMaxNumAllocatableRegisters - kGPRsPerNonFPUDouble;
}
}
int DoubleRegister::NumRegisters() {
if (CpuFeatures::IsSupported(FPU)) {
return FPURegister::kNumRegisters;
} else {
return 1;
}
}
int DoubleRegister::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(FPU)) {
return FPURegister::kMaxNumAllocatableRegisters;
} else {
return 1;
}
}
int FPURegister::ToAllocationIndex(FPURegister reg) {
ASSERT(reg.code() % 2 == 0);
ASSERT(reg.code() / 2 < kNumAllocatableRegisters);
ASSERT(reg.code() / 2 < kMaxNumAllocatableRegisters);
ASSERT(reg.is_valid());
ASSERT(!reg.is(kDoubleRegZero));
ASSERT(!reg.is(kLithiumScratchDouble));
......
......@@ -73,6 +73,33 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
}
const char* DoubleRegister::AllocationIndexToString(int index) {
if (CpuFeatures::IsSupported(FPU)) {
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"f0",
"f2",
"f4",
"f6",
"f8",
"f10",
"f12",
"f14",
"f16",
"f18",
"f20",
"f22",
"f24",
"f26"
};
return names[index];
} else {
ASSERT(index == 0);
return "sfpd0";
}
}
void CpuFeatures::Probe() {
unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
CpuFeaturesImpliedByCompiler());
......
......@@ -72,20 +72,23 @@ namespace internal {
// Core register.
struct Register {
static const int kNumRegisters = v8::internal::kNumRegisters;
static const int kNumAllocatableRegisters = 14; // v0 through t7.
static const int kMaxNumAllocatableRegisters = 14; // v0 through t7.
static const int kSizeInBytes = 4;
static const int kGPRsPerNonFPUDouble = 2;
inline static int NumAllocatableRegisters();
static int ToAllocationIndex(Register reg) {
return reg.code() - 2; // zero_reg and 'at' are skipped.
}
static Register FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return from_code(index + 2); // zero_reg and 'at' are skipped.
}
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"v0",
"v1",
......@@ -197,38 +200,19 @@ struct FPURegister {
// f28: 0.0
// f30: scratch register.
static const int kNumReservedRegisters = 2;
static const int kNumAllocatableRegisters = kNumRegisters / 2 -
static const int kMaxNumAllocatableRegisters = kNumRegisters / 2 -
kNumReservedRegisters;
inline static int NumRegisters();
inline static int NumAllocatableRegisters();
inline static int ToAllocationIndex(FPURegister reg);
static const char* AllocationIndexToString(int index);
static FPURegister FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return from_code(index * 2);
}
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
"f0",
"f2",
"f4",
"f6",
"f8",
"f10",
"f12",
"f14",
"f16",
"f18",
"f20",
"f22",
"f24",
"f26"
};
return names[index];
}
static FPURegister from_code(int code) {
FPURegister r = { code };
return r;
......@@ -316,6 +300,9 @@ const FPURegister f29 = { 29 };
const FPURegister f30 = { 30 };
const FPURegister f31 = { 31 };
const Register sfpd_lo = { kRegister_t6_Code };
const Register sfpd_hi = { kRegister_t7_Code };
// Register aliases.
// cp is assumed to be a callee saved register.
// Defined using #define instead of "static const Register&" because Clang
......
......@@ -1297,6 +1297,26 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Preserve registers across notification, this is important for compiled
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ MultiPush(kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
__ CallRuntime(Runtime::kNotifyICMiss, 0);
__ MultiPop(kJSCallerSaved | kCalleeSaved);
}
__ mov(at, ra); // Stash the miss continuation
__ Addu(sp, sp, Operand(kPointerSize)); // Ignore state
__ pop(ra); // Restore RA to continuation in JSFunction
__ Jump(at); // Jump to miss handler
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
......
......@@ -38,6 +38,17 @@ namespace v8 {
namespace internal {
void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a1, a0 };
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
isolate->builtins()->KeyedLoadIC_Miss();
}
#define __ ACCESS_MASM(masm)
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
......@@ -500,7 +511,7 @@ void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
// scratch register. Destroys the source register. No GC occurs during this
// stub so you don't have to set up the frame.
class ConvertToDoubleStub : public CodeStub {
class ConvertToDoubleStub : public PlatformCodeStub {
public:
ConvertToDoubleStub(Register result_reg_1,
Register result_reg_2,
......@@ -3893,12 +3904,29 @@ void CodeStub::GenerateStubsAheadOfTime() {
void CodeStub::GenerateFPStubs() {
CEntryStub save_doubles(1, kSaveFPRegs);
Handle<Code> code = save_doubles.GetCode();
code->set_is_pregenerated(true);
StoreBufferOverflowStub stub(kSaveFPRegs);
stub.GetCode()->set_is_pregenerated(true);
code->GetIsolate()->set_fp_stubs_generated(true);
SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU)
? kSaveFPRegs
: kDontSaveFPRegs;
CEntryStub save_doubles(1, mode);
StoreBufferOverflowStub stub(mode);
// These stubs might already be in the snapshot, detect that and don't
// regenerate, which would lead to code stub initialization state being messed
// up.
Code* save_doubles_code = NULL;
Code* store_buffer_overflow_code = NULL;
if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope2(FPU);
save_doubles_code = *save_doubles.GetCode();
store_buffer_overflow_code = *stub.GetCode();
} else {
save_doubles_code = *save_doubles.GetCode();
store_buffer_overflow_code = *stub.GetCode();
}
save_doubles_code->set_is_pregenerated(true);
store_buffer_overflow_code->set_is_pregenerated(true);
}
ISOLATE->set_fp_stubs_generated(true);
}
......
......@@ -37,7 +37,7 @@ namespace internal {
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public CodeStub {
class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
......@@ -59,7 +59,7 @@ class TranscendentalCacheStub: public CodeStub {
};
class StoreBufferOverflowStub: public CodeStub {
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
......@@ -78,7 +78,7 @@ class StoreBufferOverflowStub: public CodeStub {
};
class UnaryOpStub: public CodeStub {
class UnaryOpStub: public PlatformCodeStub {
public:
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
......@@ -220,7 +220,7 @@ enum StringAddFlags {
};
class StringAddStub: public CodeStub {
class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
......@@ -243,7 +243,7 @@ class StringAddStub: public CodeStub {
};
class SubStringStub: public CodeStub {
class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
......@@ -255,7 +255,7 @@ class SubStringStub: public CodeStub {
};
class StringCompareStub: public CodeStub {
class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() { }
......@@ -296,7 +296,7 @@ class StringCompareStub: public CodeStub {
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public CodeStub {
class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public:
WriteInt32ToHeapNumberStub(Register the_int,
Register the_heap_number,
......@@ -340,7 +340,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
};
class NumberToStringStub: public CodeStub {
class NumberToStringStub: public PlatformCodeStub {
public:
NumberToStringStub() { }
......@@ -366,7 +366,7 @@ class NumberToStringStub: public CodeStub {
};
class RecordWriteStub: public CodeStub {
class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
Register value,
......@@ -512,7 +512,7 @@ class RecordWriteStub: public CodeStub {
Register GetRegThatIsNotOneOf(Register r1,
Register r2,
Register r3) {
for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(r1)) continue;
if (candidate.is(r2)) continue;
......@@ -571,7 +571,7 @@ class RecordWriteStub: public CodeStub {
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM and MIPS.
class RegExpCEntryStub: public CodeStub {
class RegExpCEntryStub: public PlatformCodeStub {
public:
RegExpCEntryStub() {}
virtual ~RegExpCEntryStub() {}
......@@ -589,7 +589,7 @@ class RegExpCEntryStub: public CodeStub {
// keep the code which called into native pinned in the memory. Currently the
// simplest approach is to generate such stub early enough so it can never be
// moved by GC
class DirectCEntryStub: public CodeStub {
class DirectCEntryStub: public PlatformCodeStub {
public:
DirectCEntryStub() {}
void Generate(MacroAssembler* masm);
......@@ -739,7 +739,7 @@ class FloatingPointHelper : public AllStatic {
};
class StringDictionaryLookupStub: public CodeStub {
class StringDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
......
......@@ -46,6 +46,10 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
class CodeGenerator: public AstVisitor {
public:
CodeGenerator() {
InitializeAstVisitor();
}
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
......@@ -70,7 +74,7 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
DEFINE_AST_VISITOR_SUBCLASS_METHODS();
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
......
......@@ -206,7 +206,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
void Deoptimizer::DoComputeOsrOutputFrame() {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
optimized_code_->deoptimization_data());
compiled_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
......@@ -240,7 +240,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
......@@ -332,7 +332,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned pc_offset = data->OsrPcOffset()->value();
uint32_t pc = reinterpret_cast<uint32_t>(
optimized_code_->entry() + pc_offset);
compiled_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
......@@ -445,6 +445,70 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
}
void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
int frame_index) {
//
// FROM TO <-fp
// | .... | | .... |
// +-------------------------+ +-------------------------+
// | JSFunction continuation | | JSFunction continuation |
// +-------------------------+ +-------------------------+<-sp
// | | saved frame (fp) |
// | +=========================+<-fp
// | | JSFunction context |
// v +-------------------------+
// | COMPILED_STUB marker | fp = saved frame
// +-------------------------+ f8 = JSFunction context
// | |
// | ... |
// | |
// +-------------------------+<-sp
//
//
int output_frame_size = 1 * kPointerSize;
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, 0);
Code* notify_miss =
isolate_->builtins()->builtin(Builtins::kNotifyICMiss);
output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(notify_miss->entry()));
ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
int major_key = compiled_code_->major_key();
CodeStubInterfaceDescriptor* descriptor =
isolate_->code_stub_interface_descriptor(major_key);
Handle<Code> miss_ic(descriptor->deoptimization_handler_);
output_frame->SetPc(reinterpret_cast<intptr_t>(miss_ic->instruction_start()));
unsigned input_frame_size = input_->GetFrameSize();
intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
output_frame->SetFrameSlot(0, value);
value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
output_frame->SetRegister(fp.code(), value);
output_frame->SetFp(value);
value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
output_frame->SetRegister(cp.code(), value);
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
ASSERT(opcode == Translation::REGISTER);
USE(opcode);
int input_reg = iterator->Next();
intptr_t input_value = input_->GetRegister(input_reg);
output_frame->SetRegister(a1.code(), input_value);
int32_t next = iterator->Next();
opcode = static_cast<Translation::Opcode>(next);
ASSERT(opcode == Translation::REGISTER);
input_reg = iterator->Next();
input_value = input_->GetRegister(input_reg);
output_frame->SetRegister(a0.code(), input_value);
ASSERT(frame_index == 0);
output_[frame_index] = output_frame;
}
void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index) {
Builtins* builtins = isolate_->builtins();
......@@ -868,7 +932,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
......@@ -889,7 +953,6 @@ void Deoptimizer::EntryGenerator::Generate() {
Isolate* isolate = masm()->isolate();
CpuFeatures::Scope scope(FPU);
// Unlike on ARM we don't save all the registers, just the useful ones.
// For the rest, there are gaps on the stack, so the offsets remain the same.
const int kNumberOfRegisters = Register::kNumRegisters;
......@@ -898,14 +961,19 @@ void Deoptimizer::EntryGenerator::Generate() {
RegList saved_regs = restored_regs | sp.bit() | ra.bit();
const int kDoubleRegsSize =
kDoubleSize * FPURegister::kNumAllocatableRegisters;
// Save all FPU registers before messing with them.
__ Subu(sp, sp, Operand(kDoubleRegsSize));
for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ sdc1(fpu_reg, MemOperand(sp, offset));
kDoubleSize * FPURegister::kMaxNumAllocatableRegisters;
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
// Save all FPU registers before messing with them.
__ Subu(sp, sp, Operand(kDoubleRegsSize));
for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ sdc1(fpu_reg, MemOperand(sp, offset));
}
} else {
__ Subu(sp, sp, Operand(kDoubleRegsSize));
}
// Push saved_regs (needed to populate FrameDescription::registers_).
......@@ -977,14 +1045,17 @@ void Deoptimizer::EntryGenerator::Generate() {
}
}
// Copy FPU registers to
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ ldc1(f0, MemOperand(sp, src_offset));
__ sdc1(f0, MemOperand(a1, dst_offset));
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
// Copy FPU registers to
// double_registers_[DoubleRegister::kNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ ldc1(f0, MemOperand(sp, src_offset));
__ sdc1(f0, MemOperand(a1, dst_offset));
}
}
// Remove the bailout id, eventually return address, and the saved registers
......@@ -1005,11 +1076,14 @@ void Deoptimizer::EntryGenerator::Generate() {
// frame description.
__ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
Label pop_loop_header;
__ Branch(&pop_loop_header);
__ bind(&pop_loop);
__ pop(t0);
__ sw(t0, MemOperand(a3, 0));
__ Branch(USE_DELAY_SLOT, &pop_loop, ne, a2, Operand(sp));
__ addiu(a3, a3, sizeof(uint32_t)); // In delay slot.
__ addiu(a3, a3, sizeof(uint32_t));
__ bind(&pop_loop_header);
__ Branch(&pop_loop, ne, a2, Operand(sp));
// Compute the output frame in the deoptimizer.
__ push(a0); // Preserve deoptimizer object across call.
......@@ -1024,25 +1098,30 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(a0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop;
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
// Outer loop state: a0 = current "FrameDescription** output_",
// a1 = one past the last FrameDescription**.
__ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
__ lw(a0, MemOperand(a0, Deoptimizer::output_offset())); // a0 is output_.
__ sll(a1, a1, kPointerSizeLog2); // Count to offset.
__ addu(a1, a0, a1); // a1 = one past the last FrameDescription**.
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
__ lw(a2, MemOperand(a0, 0)); // output_[ix]
__ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ Subu(a3, a3, Operand(sizeof(uint32_t)));
__ Addu(t2, a2, Operand(a3));
__ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
__ push(t3);
__ bind(&inner_loop_header);
__ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
__ Addu(a0, a0, Operand(kPointerSize));
__ bind(&outer_loop_header);
__ Branch(&outer_push_loop, lt, a0, Operand(a1));
......
This diff is collapsed.
......@@ -61,6 +61,7 @@ class LCodeGen BASE_EMBEDDED {
deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
......@@ -76,6 +77,15 @@ class LCodeGen BASE_EMBEDDED {
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
bool NeedsEagerFrame() const {
return GetStackSlotCount() > 0 ||
info()->is_non_deferred_calling() ||
!info()->IsStub();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
}
// Support for converting LOperands to assembler types.
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
......@@ -189,7 +199,7 @@ class LCodeGen BASE_EMBEDDED {
Register temporary2);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return scope()->num_parameters(); }
int GetParameterCount() const { return info()->num_parameters(); }
void Abort(const char* reason);
void Comment(const char* format, ...);
......@@ -368,11 +378,15 @@ class LCodeGen BASE_EMBEDDED {
int* offset);
struct JumpTableEntry {
explicit inline JumpTableEntry(Address entry)
inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
: label(),
address(entry) { }
address(entry),
needs_frame(frame),
is_lazy_deopt(is_lazy) { }
Label label;
Address address;
bool needs_frame;
bool is_lazy_deopt;
};
void EnsureSpaceForLazyDeopt();
......@@ -401,6 +415,7 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
......@@ -416,6 +431,7 @@ class LCodeGen BASE_EMBEDDED {
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
: codegen_(codegen) {
ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->expected_safepoint_kind_ = kind;
......
......@@ -172,8 +172,10 @@ void LGapResolver::BreakCycle(int index) {
} else if (source->IsStackSlot()) {
__ lw(kLithiumScratchReg, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
CpuFeatures::Scope scope(FPU);
__ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
CpuFeatures::Scope scope(FPU);
__ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
......@@ -193,9 +195,11 @@ void LGapResolver::RestoreValue() {
} else if (saved_destination_->IsStackSlot()) {
__ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
CpuFeatures::Scope scope(FPU);
__ mov_d(cgen_->ToDoubleRegister(saved_destination_),
kLithiumScratchDouble);
} else if (saved_destination_->IsDoubleStackSlot()) {
CpuFeatures::Scope scope(FPU);
__ sdc1(kLithiumScratchDouble,
cgen_->ToMemOperand(saved_destination_));
} else {
......@@ -232,6 +236,7 @@ void LGapResolver::EmitMove(int index) {
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
if (!destination_operand.OffsetIsInt16Encodable()) {
CpuFeatures::Scope scope(FPU);
// 'at' is overwritten while saving the value to the destination.
// Therefore we can't use 'at'. It is OK if the read from the source
// destroys 'at', since that happens before the value is read.
......@@ -271,6 +276,7 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleRegister()) {
CpuFeatures::Scope scope(FPU);
DoubleRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(destination), source_register);
......@@ -281,6 +287,7 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleStackSlot()) {
CpuFeatures::Scope scope(FPU);
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
......
......@@ -42,10 +42,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
LOsrEntry::LOsrEntry() {
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
register_spills_[i] = NULL;
}
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
double_register_spills_[i] = NULL;
}
}
......@@ -616,6 +616,7 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
info()->MarkAsNonDeferredCalling();
#ifdef DEBUG
instr->VerifyCall();
#endif
......@@ -1583,6 +1584,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation to = instr->to();
if (from.IsTagged()) {
if (to.IsDouble()) {
info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
......@@ -1607,6 +1609,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
......@@ -1626,6 +1629,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
......@@ -1864,7 +1868,16 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
LOperand* external_pointer = UseRegister(instr->elements());
// float->double conversion on non-VFP2 requires an extra scratch
// register. For convenience, just mark the elements register as "UseTemp"
// so that it can be used as a temp during the float->double conversion
// after it's no longer needed after the float load.
bool needs_temp =
!CpuFeatures::IsSupported(FPU) &&
(elements_kind == EXTERNAL_FLOAT_ELEMENTS);
LOperand* external_pointer = needs_temp
? UseTempRegister(instr->elements())
: UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(external_pointer, key);
}
......@@ -2083,8 +2096,17 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
int spill_index = chunk()->GetParameterStackSlot(instr->index());
return DefineAsSpilled(new(zone()) LParameter, spill_index);
LParameter* result = new(zone()) LParameter;
if (info()->IsOptimizing()) {
int spill_index = chunk()->GetParameterStackSlot(instr->index());
return DefineAsSpilled(result, spill_index);
} else {
ASSERT(info()->IsStub());
CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
Register reg = descriptor->register_params_[instr->index()];
return DefineFixed(result, reg);
}
}
......
......@@ -251,6 +251,11 @@ class LInstruction: public ZoneObject {
void MarkAsCall() { is_call_ = true; }
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return is_call_; }
bool ClobbersRegisters() const { return is_call_; }
bool ClobbersDoubleRegisters() const { return is_call_; }
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return is_call_; }
......@@ -2304,8 +2309,9 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
// slot, i.e., that must also be restored to the spill slot on OSR entry.
// NULL if the register has no assigned spill slot. Indexed by allocation
// index.
LOperand* register_spills_[Register::kNumAllocatableRegisters];
LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
LOperand* double_register_spills_[
DoubleRegister::kMaxNumAllocatableRegisters];
};
......
......@@ -118,8 +118,8 @@ void MacroAssembler::PopSafepointRegisters() {
void MacroAssembler::PushSafepointRegistersAndDoubles() {
PushSafepointRegisters();
Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
Subu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
FPURegister reg = FPURegister::FromAllocationIndex(i);
sdc1(reg, MemOperand(sp, i * kDoubleSize));
}
......@@ -127,11 +127,11 @@ void MacroAssembler::PushSafepointRegistersAndDoubles() {
void MacroAssembler::PopSafepointRegistersAndDoubles() {
for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
FPURegister reg = FPURegister::FromAllocationIndex(i);
ldc1(reg, MemOperand(sp, i * kDoubleSize));
}
Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
Addu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
PopSafepointRegisters();
}
......@@ -167,7 +167,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
UNIMPLEMENTED_MIPS();
// General purpose registers are pushed last on the stack.
int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
......@@ -4250,7 +4250,10 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
PrepareCEntryArgs(function->nargs);
PrepareCEntryFunction(ExternalReference(function, isolate()));
CEntryStub stub(1, kSaveFPRegs);
SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU)
? kSaveFPRegs
: kDontSaveFPRegs;
CEntryStub stub(1, mode);
CallStub(&stub);
}
......
......@@ -1514,9 +1514,9 @@ class MacroAssembler: public Assembler {
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
// Needs access to SafepointRegisterStackIndex for optimized frame
// Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
friend class OptimizedFrame;
friend class StandardFrame;
};
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment