Commit b510dc58 authored by danno@chromium.org's avatar danno@chromium.org

MIPS: Re-land Crankshaft-generated KeyedLoad stubs.

Port r13236 (cd9236c5)

BUG=
TEST=

Review URL: https://codereview.chromium.org/11801002
Patch from Akos Palfi <palfia@homejinni.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13322 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent ab301a54
...@@ -80,9 +80,36 @@ bool Operand::is_reg() const { ...@@ -80,9 +80,36 @@ bool Operand::is_reg() const {
} }
int Register::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(FPU)) {
return kMaxNumAllocatableRegisters;
} else {
return kMaxNumAllocatableRegisters - kGPRsPerNonFPUDouble;
}
}
int DoubleRegister::NumRegisters() {
if (CpuFeatures::IsSupported(FPU)) {
return FPURegister::kNumRegisters;
} else {
return 1;
}
}
int DoubleRegister::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(FPU)) {
return FPURegister::kMaxNumAllocatableRegisters;
} else {
return 1;
}
}
int FPURegister::ToAllocationIndex(FPURegister reg) { int FPURegister::ToAllocationIndex(FPURegister reg) {
ASSERT(reg.code() % 2 == 0); ASSERT(reg.code() % 2 == 0);
ASSERT(reg.code() / 2 < kNumAllocatableRegisters); ASSERT(reg.code() / 2 < kMaxNumAllocatableRegisters);
ASSERT(reg.is_valid()); ASSERT(reg.is_valid());
ASSERT(!reg.is(kDoubleRegZero)); ASSERT(!reg.is(kDoubleRegZero));
ASSERT(!reg.is(kLithiumScratchDouble)); ASSERT(!reg.is(kLithiumScratchDouble));
......
...@@ -73,6 +73,33 @@ static uint64_t CpuFeaturesImpliedByCompiler() { ...@@ -73,6 +73,33 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
} }
const char* DoubleRegister::AllocationIndexToString(int index) {
if (CpuFeatures::IsSupported(FPU)) {
ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"f0",
"f2",
"f4",
"f6",
"f8",
"f10",
"f12",
"f14",
"f16",
"f18",
"f20",
"f22",
"f24",
"f26"
};
return names[index];
} else {
ASSERT(index == 0);
return "sfpd0";
}
}
void CpuFeatures::Probe() { void CpuFeatures::Probe() {
unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() | unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
CpuFeaturesImpliedByCompiler()); CpuFeaturesImpliedByCompiler());
......
...@@ -72,20 +72,23 @@ namespace internal { ...@@ -72,20 +72,23 @@ namespace internal {
// Core register. // Core register.
struct Register { struct Register {
static const int kNumRegisters = v8::internal::kNumRegisters; static const int kNumRegisters = v8::internal::kNumRegisters;
static const int kNumAllocatableRegisters = 14; // v0 through t7. static const int kMaxNumAllocatableRegisters = 14; // v0 through t7.
static const int kSizeInBytes = 4; static const int kSizeInBytes = 4;
static const int kGPRsPerNonFPUDouble = 2;
inline static int NumAllocatableRegisters();
static int ToAllocationIndex(Register reg) { static int ToAllocationIndex(Register reg) {
return reg.code() - 2; // zero_reg and 'at' are skipped. return reg.code() - 2; // zero_reg and 'at' are skipped.
} }
static Register FromAllocationIndex(int index) { static Register FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters); ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return from_code(index + 2); // zero_reg and 'at' are skipped. return from_code(index + 2); // zero_reg and 'at' are skipped.
} }
static const char* AllocationIndexToString(int index) { static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters); ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = { const char* const names[] = {
"v0", "v0",
"v1", "v1",
...@@ -197,38 +200,19 @@ struct FPURegister { ...@@ -197,38 +200,19 @@ struct FPURegister {
// f28: 0.0 // f28: 0.0
// f30: scratch register. // f30: scratch register.
static const int kNumReservedRegisters = 2; static const int kNumReservedRegisters = 2;
static const int kNumAllocatableRegisters = kNumRegisters / 2 - static const int kMaxNumAllocatableRegisters = kNumRegisters / 2 -
kNumReservedRegisters; kNumReservedRegisters;
inline static int NumRegisters();
inline static int NumAllocatableRegisters();
inline static int ToAllocationIndex(FPURegister reg); inline static int ToAllocationIndex(FPURegister reg);
static const char* AllocationIndexToString(int index);
static FPURegister FromAllocationIndex(int index) { static FPURegister FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters); ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return from_code(index * 2); return from_code(index * 2);
} }
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
"f0",
"f2",
"f4",
"f6",
"f8",
"f10",
"f12",
"f14",
"f16",
"f18",
"f20",
"f22",
"f24",
"f26"
};
return names[index];
}
static FPURegister from_code(int code) { static FPURegister from_code(int code) {
FPURegister r = { code }; FPURegister r = { code };
return r; return r;
...@@ -316,6 +300,9 @@ const FPURegister f29 = { 29 }; ...@@ -316,6 +300,9 @@ const FPURegister f29 = { 29 };
const FPURegister f30 = { 30 }; const FPURegister f30 = { 30 };
const FPURegister f31 = { 31 }; const FPURegister f31 = { 31 };
const Register sfpd_lo = { kRegister_t6_Code };
const Register sfpd_hi = { kRegister_t7_Code };
// Register aliases. // Register aliases.
// cp is assumed to be a callee saved register. // cp is assumed to be a callee saved register.
// Defined using #define instead of "static const Register&" because Clang // Defined using #define instead of "static const Register&" because Clang
......
...@@ -1297,6 +1297,26 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR) ...@@ -1297,6 +1297,26 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Preserve registers across notification, this is important for compiled
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ MultiPush(kJSCallerSaved | kCalleeSaved);
// Pass the function and deoptimization type to the runtime system.
__ CallRuntime(Runtime::kNotifyICMiss, 0);
__ MultiPop(kJSCallerSaved | kCalleeSaved);
}
__ mov(at, ra); // Stash the miss continuation
__ Addu(sp, sp, Operand(kPointerSize)); // Ignore state
__ pop(ra); // Restore RA to continuation in JSFunction
__ Jump(at); // Jump to miss handler
}
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) { Deoptimizer::BailoutType type) {
{ {
......
...@@ -38,6 +38,17 @@ namespace v8 { ...@@ -38,6 +38,17 @@ namespace v8 {
namespace internal { namespace internal {
void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { a1, a0 };
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
isolate->builtins()->KeyedLoadIC_Miss();
}
#define __ ACCESS_MASM(masm) #define __ ACCESS_MASM(masm)
static void EmitIdenticalObjectComparison(MacroAssembler* masm, static void EmitIdenticalObjectComparison(MacroAssembler* masm,
...@@ -500,7 +511,7 @@ void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) { ...@@ -500,7 +511,7 @@ void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
// scratch register. Destroys the source register. No GC occurs during this // scratch register. Destroys the source register. No GC occurs during this
// stub so you don't have to set up the frame. // stub so you don't have to set up the frame.
class ConvertToDoubleStub : public CodeStub { class ConvertToDoubleStub : public PlatformCodeStub {
public: public:
ConvertToDoubleStub(Register result_reg_1, ConvertToDoubleStub(Register result_reg_1,
Register result_reg_2, Register result_reg_2,
...@@ -3893,12 +3904,29 @@ void CodeStub::GenerateStubsAheadOfTime() { ...@@ -3893,12 +3904,29 @@ void CodeStub::GenerateStubsAheadOfTime() {
void CodeStub::GenerateFPStubs() { void CodeStub::GenerateFPStubs() {
CEntryStub save_doubles(1, kSaveFPRegs); SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU)
Handle<Code> code = save_doubles.GetCode(); ? kSaveFPRegs
code->set_is_pregenerated(true); : kDontSaveFPRegs;
StoreBufferOverflowStub stub(kSaveFPRegs); CEntryStub save_doubles(1, mode);
stub.GetCode()->set_is_pregenerated(true); StoreBufferOverflowStub stub(mode);
code->GetIsolate()->set_fp_stubs_generated(true); // These stubs might already be in the snapshot, detect that and don't
// regenerate, which would lead to code stub initialization state being messed
// up.
Code* save_doubles_code = NULL;
Code* store_buffer_overflow_code = NULL;
if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope2(FPU);
save_doubles_code = *save_doubles.GetCode();
store_buffer_overflow_code = *stub.GetCode();
} else {
save_doubles_code = *save_doubles.GetCode();
store_buffer_overflow_code = *stub.GetCode();
}
save_doubles_code->set_is_pregenerated(true);
store_buffer_overflow_code->set_is_pregenerated(true);
}
ISOLATE->set_fp_stubs_generated(true);
} }
......
...@@ -37,7 +37,7 @@ namespace internal { ...@@ -37,7 +37,7 @@ namespace internal {
// Compute a transcendental math function natively, or call the // Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function. // TranscendentalCache runtime function.
class TranscendentalCacheStub: public CodeStub { class TranscendentalCacheStub: public PlatformCodeStub {
public: public:
enum ArgumentType { enum ArgumentType {
TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits, TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
...@@ -59,7 +59,7 @@ class TranscendentalCacheStub: public CodeStub { ...@@ -59,7 +59,7 @@ class TranscendentalCacheStub: public CodeStub {
}; };
class StoreBufferOverflowStub: public CodeStub { class StoreBufferOverflowStub: public PlatformCodeStub {
public: public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { } : save_doubles_(save_fp) { }
...@@ -78,7 +78,7 @@ class StoreBufferOverflowStub: public CodeStub { ...@@ -78,7 +78,7 @@ class StoreBufferOverflowStub: public CodeStub {
}; };
class UnaryOpStub: public CodeStub { class UnaryOpStub: public PlatformCodeStub {
public: public:
UnaryOpStub(Token::Value op, UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode, UnaryOverwriteMode mode,
...@@ -220,7 +220,7 @@ enum StringAddFlags { ...@@ -220,7 +220,7 @@ enum StringAddFlags {
}; };
class StringAddStub: public CodeStub { class StringAddStub: public PlatformCodeStub {
public: public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {} explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
...@@ -243,7 +243,7 @@ class StringAddStub: public CodeStub { ...@@ -243,7 +243,7 @@ class StringAddStub: public CodeStub {
}; };
class SubStringStub: public CodeStub { class SubStringStub: public PlatformCodeStub {
public: public:
SubStringStub() {} SubStringStub() {}
...@@ -255,7 +255,7 @@ class SubStringStub: public CodeStub { ...@@ -255,7 +255,7 @@ class SubStringStub: public CodeStub {
}; };
class StringCompareStub: public CodeStub { class StringCompareStub: public PlatformCodeStub {
public: public:
StringCompareStub() { } StringCompareStub() { }
...@@ -296,7 +296,7 @@ class StringCompareStub: public CodeStub { ...@@ -296,7 +296,7 @@ class StringCompareStub: public CodeStub {
// This stub can convert a signed int32 to a heap number (double). It does // This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub // not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame. // so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public CodeStub { class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public: public:
WriteInt32ToHeapNumberStub(Register the_int, WriteInt32ToHeapNumberStub(Register the_int,
Register the_heap_number, Register the_heap_number,
...@@ -340,7 +340,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub { ...@@ -340,7 +340,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
}; };
class NumberToStringStub: public CodeStub { class NumberToStringStub: public PlatformCodeStub {
public: public:
NumberToStringStub() { } NumberToStringStub() { }
...@@ -366,7 +366,7 @@ class NumberToStringStub: public CodeStub { ...@@ -366,7 +366,7 @@ class NumberToStringStub: public CodeStub {
}; };
class RecordWriteStub: public CodeStub { class RecordWriteStub: public PlatformCodeStub {
public: public:
RecordWriteStub(Register object, RecordWriteStub(Register object,
Register value, Register value,
...@@ -512,7 +512,7 @@ class RecordWriteStub: public CodeStub { ...@@ -512,7 +512,7 @@ class RecordWriteStub: public CodeStub {
Register GetRegThatIsNotOneOf(Register r1, Register GetRegThatIsNotOneOf(Register r1,
Register r2, Register r2,
Register r3) { Register r3) {
for (int i = 0; i < Register::kNumAllocatableRegisters; i++) { for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
Register candidate = Register::FromAllocationIndex(i); Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(r1)) continue; if (candidate.is(r1)) continue;
if (candidate.is(r2)) continue; if (candidate.is(r2)) continue;
...@@ -571,7 +571,7 @@ class RecordWriteStub: public CodeStub { ...@@ -571,7 +571,7 @@ class RecordWriteStub: public CodeStub {
// Enter C code from generated RegExp code in a way that allows // Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC. // the C code to fix the return address in case of a GC.
// Currently only needed on ARM and MIPS. // Currently only needed on ARM and MIPS.
class RegExpCEntryStub: public CodeStub { class RegExpCEntryStub: public PlatformCodeStub {
public: public:
RegExpCEntryStub() {} RegExpCEntryStub() {}
virtual ~RegExpCEntryStub() {} virtual ~RegExpCEntryStub() {}
...@@ -589,7 +589,7 @@ class RegExpCEntryStub: public CodeStub { ...@@ -589,7 +589,7 @@ class RegExpCEntryStub: public CodeStub {
// keep the code which called into native pinned in the memory. Currently the // keep the code which called into native pinned in the memory. Currently the
// simplest approach is to generate such stub early enough so it can never be // simplest approach is to generate such stub early enough so it can never be
// moved by GC // moved by GC
class DirectCEntryStub: public CodeStub { class DirectCEntryStub: public PlatformCodeStub {
public: public:
DirectCEntryStub() {} DirectCEntryStub() {}
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
...@@ -739,7 +739,7 @@ class FloatingPointHelper : public AllStatic { ...@@ -739,7 +739,7 @@ class FloatingPointHelper : public AllStatic {
}; };
class StringDictionaryLookupStub: public CodeStub { class StringDictionaryLookupStub: public PlatformCodeStub {
public: public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
......
...@@ -46,6 +46,10 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; ...@@ -46,6 +46,10 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
class CodeGenerator: public AstVisitor { class CodeGenerator: public AstVisitor {
public: public:
CodeGenerator() {
InitializeAstVisitor();
}
static bool MakeCode(CompilationInfo* info); static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags. // Printing of AST, etc. as requested by flags.
...@@ -70,7 +74,7 @@ class CodeGenerator: public AstVisitor { ...@@ -70,7 +74,7 @@ class CodeGenerator: public AstVisitor {
int pos, int pos,
bool right_here = false); bool right_here = false);
DEFINE_AST_VISITOR_SUBCLASS_METHODS(); DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
private: private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator); DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
......
...@@ -206,7 +206,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) { ...@@ -206,7 +206,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
void Deoptimizer::DoComputeOsrOutputFrame() { void Deoptimizer::DoComputeOsrOutputFrame() {
DeoptimizationInputData* data = DeoptimizationInputData::cast( DeoptimizationInputData* data = DeoptimizationInputData::cast(
optimized_code_->deoptimization_data()); compiled_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value(); unsigned ast_id = data->OsrAstId()->value();
int bailout_id = LookupBailoutId(data, BailoutId(ast_id)); int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
...@@ -240,7 +240,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { ...@@ -240,7 +240,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned input_frame_size = input_->GetFrameSize(); unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size); ASSERT(fixed_size + height_in_bytes == input_frame_size);
unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize; unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value(); unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize; unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size; unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
...@@ -332,7 +332,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { ...@@ -332,7 +332,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned pc_offset = data->OsrPcOffset()->value(); unsigned pc_offset = data->OsrPcOffset()->value();
uint32_t pc = reinterpret_cast<uint32_t>( uint32_t pc = reinterpret_cast<uint32_t>(
optimized_code_->entry() + pc_offset); compiled_code_->entry() + pc_offset);
output_[0]->SetPc(pc); output_[0]->SetPc(pc);
} }
Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR); Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
...@@ -445,6 +445,70 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator, ...@@ -445,6 +445,70 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
} }
void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
int frame_index) {
//
// FROM TO <-fp
// | .... | | .... |
// +-------------------------+ +-------------------------+
// | JSFunction continuation | | JSFunction continuation |
// +-------------------------+ +-------------------------+<-sp
// | | saved frame (fp) |
// | +=========================+<-fp
// | | JSFunction context |
// v +-------------------------+
// | COMPILED_STUB marker | fp = saved frame
// +-------------------------+ f8 = JSFunction context
// | |
// | ... |
// | |
// +-------------------------+<-sp
//
//
int output_frame_size = 1 * kPointerSize;
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, 0);
Code* notify_miss =
isolate_->builtins()->builtin(Builtins::kNotifyICMiss);
output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(notify_miss->entry()));
ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
int major_key = compiled_code_->major_key();
CodeStubInterfaceDescriptor* descriptor =
isolate_->code_stub_interface_descriptor(major_key);
Handle<Code> miss_ic(descriptor->deoptimization_handler_);
output_frame->SetPc(reinterpret_cast<intptr_t>(miss_ic->instruction_start()));
unsigned input_frame_size = input_->GetFrameSize();
intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
output_frame->SetFrameSlot(0, value);
value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
output_frame->SetRegister(fp.code(), value);
output_frame->SetFp(value);
value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
output_frame->SetRegister(cp.code(), value);
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
ASSERT(opcode == Translation::REGISTER);
USE(opcode);
int input_reg = iterator->Next();
intptr_t input_value = input_->GetRegister(input_reg);
output_frame->SetRegister(a1.code(), input_value);
int32_t next = iterator->Next();
opcode = static_cast<Translation::Opcode>(next);
ASSERT(opcode == Translation::REGISTER);
input_reg = iterator->Next();
input_value = input_->GetRegister(input_reg);
output_frame->SetRegister(a0.code(), input_value);
ASSERT(frame_index == 0);
output_[frame_index] = output_frame;
}
void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator, void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index) { int frame_index) {
Builtins* builtins = isolate_->builtins(); Builtins* builtins = isolate_->builtins();
...@@ -868,7 +932,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { ...@@ -868,7 +932,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
} }
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp())); input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp())); input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) { for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0); input_->SetDoubleRegister(i, 0.0);
} }
...@@ -889,7 +953,6 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -889,7 +953,6 @@ void Deoptimizer::EntryGenerator::Generate() {
Isolate* isolate = masm()->isolate(); Isolate* isolate = masm()->isolate();
CpuFeatures::Scope scope(FPU);
// Unlike on ARM we don't save all the registers, just the useful ones. // Unlike on ARM we don't save all the registers, just the useful ones.
// For the rest, there are gaps on the stack, so the offsets remain the same. // For the rest, there are gaps on the stack, so the offsets remain the same.
const int kNumberOfRegisters = Register::kNumRegisters; const int kNumberOfRegisters = Register::kNumRegisters;
...@@ -898,14 +961,19 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -898,14 +961,19 @@ void Deoptimizer::EntryGenerator::Generate() {
RegList saved_regs = restored_regs | sp.bit() | ra.bit(); RegList saved_regs = restored_regs | sp.bit() | ra.bit();
const int kDoubleRegsSize = const int kDoubleRegsSize =
kDoubleSize * FPURegister::kNumAllocatableRegisters; kDoubleSize * FPURegister::kMaxNumAllocatableRegisters;
// Save all FPU registers before messing with them. if (CpuFeatures::IsSupported(FPU)) {
__ Subu(sp, sp, Operand(kDoubleRegsSize)); CpuFeatures::Scope scope(FPU);
for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) { // Save all FPU registers before messing with them.
FPURegister fpu_reg = FPURegister::FromAllocationIndex(i); __ Subu(sp, sp, Operand(kDoubleRegsSize));
int offset = i * kDoubleSize; for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
__ sdc1(fpu_reg, MemOperand(sp, offset)); FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ sdc1(fpu_reg, MemOperand(sp, offset));
}
} else {
__ Subu(sp, sp, Operand(kDoubleRegsSize));
} }
// Push saved_regs (needed to populate FrameDescription::registers_). // Push saved_regs (needed to populate FrameDescription::registers_).
...@@ -977,14 +1045,17 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -977,14 +1045,17 @@ void Deoptimizer::EntryGenerator::Generate() {
} }
} }
// Copy FPU registers to if (CpuFeatures::IsSupported(FPU)) {
// double_registers_[DoubleRegister::kNumAllocatableRegisters] CpuFeatures::Scope scope(FPU);
int double_regs_offset = FrameDescription::double_registers_offset(); // Copy FPU registers to
for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) { // double_registers_[DoubleRegister::kNumAllocatableRegisters]
int dst_offset = i * kDoubleSize + double_regs_offset; int double_regs_offset = FrameDescription::double_registers_offset();
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
__ ldc1(f0, MemOperand(sp, src_offset)); int dst_offset = i * kDoubleSize + double_regs_offset;
__ sdc1(f0, MemOperand(a1, dst_offset)); int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ ldc1(f0, MemOperand(sp, src_offset));
__ sdc1(f0, MemOperand(a1, dst_offset));
}
} }
// Remove the bailout id, eventually return address, and the saved registers // Remove the bailout id, eventually return address, and the saved registers
...@@ -1005,11 +1076,14 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -1005,11 +1076,14 @@ void Deoptimizer::EntryGenerator::Generate() {
// frame description. // frame description.
__ Addu(a3, a1, Operand(FrameDescription::frame_content_offset())); __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop; Label pop_loop;
Label pop_loop_header;
__ Branch(&pop_loop_header);
__ bind(&pop_loop); __ bind(&pop_loop);
__ pop(t0); __ pop(t0);
__ sw(t0, MemOperand(a3, 0)); __ sw(t0, MemOperand(a3, 0));
__ Branch(USE_DELAY_SLOT, &pop_loop, ne, a2, Operand(sp)); __ addiu(a3, a3, sizeof(uint32_t));
__ addiu(a3, a3, sizeof(uint32_t)); // In delay slot. __ bind(&pop_loop_header);
__ Branch(&pop_loop, ne, a2, Operand(sp));
// Compute the output frame in the deoptimizer. // Compute the output frame in the deoptimizer.
__ push(a0); // Preserve deoptimizer object across call. __ push(a0); // Preserve deoptimizer object across call.
...@@ -1024,25 +1098,30 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -1024,25 +1098,30 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(a0); // Restore deoptimizer object (class Deoptimizer). __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames. // Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop; Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
// Outer loop state: a0 = current "FrameDescription** output_", // Outer loop state: a0 = current "FrameDescription** output_",
// a1 = one past the last FrameDescription**. // a1 = one past the last FrameDescription**.
__ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset())); __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
__ lw(a0, MemOperand(a0, Deoptimizer::output_offset())); // a0 is output_. __ lw(a0, MemOperand(a0, Deoptimizer::output_offset())); // a0 is output_.
__ sll(a1, a1, kPointerSizeLog2); // Count to offset. __ sll(a1, a1, kPointerSizeLog2); // Count to offset.
__ addu(a1, a0, a1); // a1 = one past the last FrameDescription**. __ addu(a1, a0, a1); // a1 = one past the last FrameDescription**.
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop); __ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index. // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
__ lw(a2, MemOperand(a0, 0)); // output_[ix] __ lw(a2, MemOperand(a0, 0)); // output_[ix]
__ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset())); __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop); __ bind(&inner_push_loop);
__ Subu(a3, a3, Operand(sizeof(uint32_t))); __ Subu(a3, a3, Operand(sizeof(uint32_t)));
__ Addu(t2, a2, Operand(a3)); __ Addu(t2, a2, Operand(a3));
__ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset())); __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
__ push(t3); __ push(t3);
__ bind(&inner_loop_header);
__ Branch(&inner_push_loop, ne, a3, Operand(zero_reg)); __ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
__ Addu(a0, a0, Operand(kPointerSize)); __ Addu(a0, a0, Operand(kPointerSize));
__ bind(&outer_loop_header);
__ Branch(&outer_push_loop, lt, a0, Operand(a1)); __ Branch(&outer_push_loop, lt, a0, Operand(a1));
......
...@@ -77,6 +77,7 @@ bool LCodeGen::GenerateCode() { ...@@ -77,6 +77,7 @@ bool LCodeGen::GenerateCode() {
return GeneratePrologue() && return GeneratePrologue() &&
GenerateBody() && GenerateBody() &&
GenerateDeferredCode() && GenerateDeferredCode() &&
GenerateDeoptJumpTable() &&
GenerateSafepointTable(); GenerateSafepointTable();
} }
...@@ -116,42 +117,48 @@ void LCodeGen::Comment(const char* format, ...) { ...@@ -116,42 +117,48 @@ void LCodeGen::Comment(const char* format, ...) {
bool LCodeGen::GeneratePrologue() { bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating()); ASSERT(is_generating());
ProfileEntryHookStub::MaybeCallEntryHook(masm_); if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG #ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 && if (strlen(FLAG_stop_at) > 0 &&
info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop_at"); __ stop("stop_at");
} }
#endif #endif
// a1: Callee's JS function. // a1: Callee's JS function.
// cp: Callee's context. // cp: Callee's context.
// fp: Caller's frame pointer. // fp: Caller's frame pointer.
// lr: Caller's pc. // lr: Caller's pc.
// Strict mode functions and builtins need to replace the receiver // Strict mode functions and builtins need to replace the receiver
// with undefined when called as functions (without an explicit // with undefined when called as functions (without an explicit
// receiver object). r5 is zero for method calls and non-zero for // receiver object). r5 is zero for method calls and non-zero for
// function calls. // function calls.
if (!info_->is_classic_mode() || info_->is_native()) { if (!info_->is_classic_mode() || info_->is_native()) {
Label ok; Label ok;
__ Branch(&ok, eq, t1, Operand(zero_reg)); __ Branch(&ok, eq, t1, Operand(zero_reg));
int receiver_offset = scope()->num_parameters() * kPointerSize; int receiver_offset = scope()->num_parameters() * kPointerSize;
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex); __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ sw(a2, MemOperand(sp, receiver_offset)); __ sw(a2, MemOperand(sp, receiver_offset));
__ bind(&ok); __ bind(&ok);
}
} }
info()->set_prologue_offset(masm_->pc_offset()); info()->set_prologue_offset(masm_->pc_offset());
// The following three instructions must remain together and unmodified for if (NeedsEagerFrame()) {
// code aging to work properly. // The following three instructions must remain together and unmodified for
__ Push(ra, fp, cp, a1); // code aging to work properly.
// Add unused load of ip to ensure prologue sequence is identical for __ Push(ra, fp, cp, a1);
// full-codegen and lithium-codegen. // Add unused load of ip to ensure prologue sequence is identical for
__ LoadRoot(at, Heap::kUndefinedValueRootIndex); // full-codegen and lithium-codegen.
__ Addu(fp, sp, Operand(2 * kPointerSize)); // Adj. FP to point to saved FP. __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
// Adj. FP to point to saved FP.
__ Addu(fp, sp, Operand(2 * kPointerSize));
frame_is_built_ = true;
}
// Reserve space for the stack slots needed by the code. // Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount(); int slots = GetStackSlotCount();
...@@ -170,7 +177,7 @@ bool LCodeGen::GeneratePrologue() { ...@@ -170,7 +177,7 @@ bool LCodeGen::GeneratePrologue() {
} }
// Possibly allocate a local context. // Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) { if (heap_slots > 0) {
Comment(";;; Allocate local context"); Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is in a1. // Argument to NewContext is the function, which is in a1.
...@@ -206,7 +213,7 @@ bool LCodeGen::GeneratePrologue() { ...@@ -206,7 +213,7 @@ bool LCodeGen::GeneratePrologue() {
} }
// Trace the call. // Trace the call.
if (FLAG_trace) { if (FLAG_trace && info()->IsOptimizing()) {
__ CallRuntime(Runtime::kTraceEnter, 0); __ CallRuntime(Runtime::kTraceEnter, 0);
} }
EnsureSpaceForLazyDeopt(); EnsureSpaceForLazyDeopt();
...@@ -264,10 +271,31 @@ bool LCodeGen::GenerateDeferredCode() { ...@@ -264,10 +271,31 @@ bool LCodeGen::GenerateDeferredCode() {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i]; LDeferredCode* code = deferred_[i];
__ bind(code->entry()); __ bind(code->entry());
if (NeedsDeferredFrame()) {
Comment(";;; Deferred build frame",
code->instruction_index(),
code->instr()->Mnemonic());
ASSERT(!frame_is_built_);
ASSERT(info()->IsStub());
frame_is_built_ = true;
__ MultiPush(cp.bit() | fp.bit() | ra.bit());
__ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
__ Addu(fp, sp, Operand(2 * kPointerSize));
}
Comment(";;; Deferred code @%d: %s.", Comment(";;; Deferred code @%d: %s.",
code->instruction_index(), code->instruction_index(),
code->instr()->Mnemonic()); code->instr()->Mnemonic());
code->Generate(); code->Generate();
if (NeedsDeferredFrame()) {
Comment(";;; Deferred destroy frame",
code->instruction_index(),
code->instr()->Mnemonic());
ASSERT(frame_is_built_);
__ pop(at);
__ MultiPop(cp.bit() | fp.bit() | ra.bit());
frame_is_built_ = false;
}
__ jmp(code->exit()); __ jmp(code->exit());
} }
} }
...@@ -279,10 +307,72 @@ bool LCodeGen::GenerateDeferredCode() { ...@@ -279,10 +307,72 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateDeoptJumpTable() { bool LCodeGen::GenerateDeoptJumpTable() {
// TODO(plind): not clear that this will have advantage for MIPS. // Check that the jump table is accessible from everywhere in the function
// Skipping it for now. Raised issue #100 for this. // code, i.e. that offsets to the table can be encoded in the 16bit signed
Abort("Unimplemented: GenerateDeoptJumpTable"); // immediate of a branch instruction.
return false; // To simplify we consider the code size from the first instruction to the
// end of the jump table.
if (!is_int16((masm()->pc_offset() / Assembler::kInstrSize) +
deopt_jump_table_.length() * 12)) {
Abort("Generated code is too large");
}
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
__ RecordComment("[ Deoptimization jump table");
Label table_start;
__ bind(&table_start);
Label needs_frame_not_call;
Label needs_frame_is_call;
for (int i = 0; i < deopt_jump_table_.length(); i++) {
__ bind(&deopt_jump_table_[i].label);
Address entry = deopt_jump_table_[i].address;
__ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
if (deopt_jump_table_[i].needs_frame) {
if (deopt_jump_table_[i].is_lazy_deopt) {
if (needs_frame_is_call.is_bound()) {
__ Branch(&needs_frame_is_call);
} else {
__ bind(&needs_frame_is_call);
__ MultiPush(cp.bit() | fp.bit() | ra.bit());
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
ASSERT(info()->IsStub());
__ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
__ Addu(fp, sp, Operand(2 * kPointerSize));
__ Call(t9);
}
} else {
if (needs_frame_not_call.is_bound()) {
__ Branch(&needs_frame_not_call);
} else {
__ bind(&needs_frame_not_call);
__ MultiPush(cp.bit() | fp.bit() | ra.bit());
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
ASSERT(info()->IsStub());
__ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
__ push(scratch0());
__ Addu(fp, sp, Operand(2 * kPointerSize));
__ Jump(t9);
}
}
} else {
if (deopt_jump_table_[i].is_lazy_deopt) {
__ Call(t9);
} else {
__ Jump(t9);
}
}
}
__ RecordComment("]");
// The deoptimization jump table is the last part of the instruction
// sequence. Mark the generated code as done unless we bailed out.
if (!is_aborted()) status_ = DONE;
return !is_aborted();
} }
...@@ -482,7 +572,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, ...@@ -482,7 +572,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
translation, translation,
arguments_index, arguments_index,
arguments_count); arguments_count);
int closure_id = *info()->closure() != *environment->closure() bool has_closure_id = !info()->closure().is_null() &&
*info()->closure() != *environment->closure();
int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure()) ? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId; : Translation::kSelfLiteralId;
...@@ -503,6 +595,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, ...@@ -503,6 +595,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
ASSERT(height == 0); ASSERT(height == 0);
translation->BeginSetterStubFrame(closure_id); translation->BeginSetterStubFrame(closure_id);
break; break;
case STUB:
translation->BeginCompiledStubFrame();
break;
case ARGUMENTS_ADAPTOR: case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break; break;
...@@ -689,7 +784,11 @@ void LCodeGen::DeoptimizeIf(Condition cc, ...@@ -689,7 +784,11 @@ void LCodeGen::DeoptimizeIf(Condition cc,
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered()); ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index(); int id = environment->deoptimization_index();
Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
if (entry == NULL) { if (entry == NULL) {
Abort("bailout was not prepared"); Abort("bailout was not prepared");
return; return;
...@@ -712,9 +811,22 @@ void LCodeGen::DeoptimizeIf(Condition cc, ...@@ -712,9 +811,22 @@ void LCodeGen::DeoptimizeIf(Condition cc,
__ bind(&skip); __ bind(&skip);
} }
// TODO(plind): The Arm port is a little different here, due to their bool needs_lazy_deopt = info()->IsStub();
// DeOpt jump table, which is not used for Mips yet. ASSERT(info()->IsStub() || frame_is_built_);
__ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2); if (cc == al && !needs_lazy_deopt) {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (deopt_jump_table_.is_empty() ||
(deopt_jump_table_.last().address != entry) ||
(deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) ||
(deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
deopt_jump_table_.Add(table_entry, zone());
}
__ Branch(&deopt_jump_table_.last().label, cc, src1, src2);
}
} }
...@@ -1289,6 +1401,7 @@ void LCodeGen::DoConstantI(LConstantI* instr) { ...@@ -1289,6 +1401,7 @@ void LCodeGen::DoConstantI(LConstantI* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) { void LCodeGen::DoConstantD(LConstantD* instr) {
ASSERT(instr->result()->IsDoubleRegister()); ASSERT(instr->result()->IsDoubleRegister());
DoubleRegister result = ToDoubleRegister(instr->result()); DoubleRegister result = ToDoubleRegister(instr->result());
CpuFeatures::Scope scope(FPU);
double v = instr->value(); double v = instr->value();
__ Move(result, v); __ Move(result, v);
} }
...@@ -1486,6 +1599,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { ...@@ -1486,6 +1599,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ bind(&done); __ bind(&done);
} else { } else {
ASSERT(instr->hydrogen()->representation().IsDouble()); ASSERT(instr->hydrogen()->representation().IsDouble());
CpuFeatures::Scope scope(FPU);
FPURegister left_reg = ToDoubleRegister(left); FPURegister left_reg = ToDoubleRegister(left);
FPURegister right_reg = ToDoubleRegister(right); FPURegister right_reg = ToDoubleRegister(right);
FPURegister result_reg = ToDoubleRegister(instr->result()); FPURegister result_reg = ToDoubleRegister(instr->result());
...@@ -1526,6 +1640,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { ...@@ -1526,6 +1640,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) { void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
CpuFeatures::Scope scope(FPU);
DoubleRegister left = ToDoubleRegister(instr->left()); DoubleRegister left = ToDoubleRegister(instr->left());
DoubleRegister right = ToDoubleRegister(instr->right()); DoubleRegister right = ToDoubleRegister(instr->right());
DoubleRegister result = ToDoubleRegister(instr->result()); DoubleRegister result = ToDoubleRegister(instr->result());
...@@ -1635,6 +1750,7 @@ void LCodeGen::DoBranch(LBranch* instr) { ...@@ -1635,6 +1750,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Register reg = ToRegister(instr->value()); Register reg = ToRegister(instr->value());
EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg)); EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
} else if (r.IsDouble()) { } else if (r.IsDouble()) {
CpuFeatures::Scope scope(FPU);
DoubleRegister reg = ToDoubleRegister(instr->value()); DoubleRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false. // Test the double value. Zero and NaN are false.
EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero); EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
...@@ -1712,6 +1828,7 @@ void LCodeGen::DoBranch(LBranch* instr) { ...@@ -1712,6 +1828,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} }
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
CpuFeatures::Scope scope(FPU);
// heap number -> false iff +0, -0, or NaN. // heap number -> false iff +0, -0, or NaN.
DoubleRegister dbl_scratch = double_scratch0(); DoubleRegister dbl_scratch = double_scratch0();
Label not_heap_number; Label not_heap_number;
...@@ -1791,6 +1908,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { ...@@ -1791,6 +1908,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
EmitGoto(next_block); EmitGoto(next_block);
} else { } else {
if (instr->is_double()) { if (instr->is_double()) {
CpuFeatures::Scope scope(FPU);
// Compare left and right as doubles and load the // Compare left and right as doubles and load the
// resulting flags into the normal status register. // resulting flags into the normal status register.
FPURegister left_reg = ToDoubleRegister(left); FPURegister left_reg = ToDoubleRegister(left);
...@@ -2338,16 +2456,21 @@ void LCodeGen::DoCmpT(LCmpT* instr) { ...@@ -2338,16 +2456,21 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
void LCodeGen::DoReturn(LReturn* instr) { void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace) { if (FLAG_trace && info()->IsOptimizing()) {
// Push the return value on the stack as the parameter. // Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in v0. // Runtime::TraceExit returns its parameter in v0.
__ push(v0); __ push(v0);
__ CallRuntime(Runtime::kTraceExit, 1); __ CallRuntime(Runtime::kTraceExit, 1);
} }
int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; if (NeedsEagerFrame()) {
__ mov(sp, fp); int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
__ Pop(ra, fp); __ mov(sp, fp);
__ Addu(sp, sp, Operand(sp_delta)); __ Pop(ra, fp);
__ Addu(sp, sp, Operand(sp_delta));
}
if (info()->IsStub()) {
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
__ Jump(ra); __ Jump(ra);
} }
...@@ -2706,12 +2829,61 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { ...@@ -2706,12 +2829,61 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ sll(scratch0(), key, shift_size); __ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer); __ Addu(scratch0(), scratch0(), external_pointer);
} }
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ lwc1(result, MemOperand(scratch0(), additional_offset));
__ cvt_d_s(result, result);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ ldc1(result, MemOperand(scratch0(), additional_offset));
}
} else {
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
Register value = external_pointer;
__ lw(value, MemOperand(scratch0(), additional_offset));
__ And(sfpd_lo, value, Operand(kBinary32MantissaMask));
__ srl(scratch0(), value, kBinary32MantissaBits);
__ And(scratch0(), scratch0(),
Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
Label exponent_rebiased;
__ Xor(at, scratch0(), Operand(0x00));
__ Branch(&exponent_rebiased, eq, at, Operand(zero_reg));
__ Xor(at, scratch0(), Operand(0xff));
Label skip;
__ Branch(&skip, ne, at, Operand(zero_reg));
__ li(scratch0(), Operand(0x7ff));
__ bind(&skip);
__ Branch(&exponent_rebiased, eq, at, Operand(zero_reg));
// Rebias exponent.
__ Addu(scratch0(),
scratch0(),
Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
__ bind(&exponent_rebiased);
__ And(sfpd_hi, value, Operand(kBinary32SignMask));
__ sll(at, scratch0(), HeapNumber::kMantissaBitsInTopWord);
__ Or(sfpd_hi, sfpd_hi, at);
// Shift mantissa.
static const int kMantissaShiftForHiWord =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
static const int kMantissaShiftForLoWord =
kBitsPerInt - kMantissaShiftForHiWord;
__ srl(at, sfpd_lo, kMantissaShiftForHiWord);
__ Or(sfpd_hi, sfpd_hi, at);
__ sll(sfpd_lo, sfpd_lo, kMantissaShiftForLoWord);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { } else {
__ lwc1(result, MemOperand(scratch0(), additional_offset)); __ lw(sfpd_lo, MemOperand(scratch0(), additional_offset));
__ cvt_d_s(result, result); __ lw(sfpd_hi, MemOperand(scratch0(),
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS additional_offset + kPointerSize));
__ ldc1(result, MemOperand(scratch0(), additional_offset)); }
} }
} else { } else {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
...@@ -2780,25 +2952,28 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { ...@@ -2780,25 +2952,28 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
key = ToRegister(instr->key()); key = ToRegister(instr->key());
} }
if (key_is_constant) { int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
__ Addu(elements, elements, ((constant_key + instr->additional_index()) << element_size_shift);
Operand(((constant_key + instr->additional_index()) << if (!key_is_constant) {
element_size_shift) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag));
} else {
__ sll(scratch, key, shift_size); __ sll(scratch, key, shift_size);
__ Addu(elements, elements, Operand(scratch)); __ Addu(elements, elements, scratch);
__ Addu(elements, elements, }
Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) + if (CpuFeatures::IsSupported(FPU)) {
(instr->additional_index() << element_size_shift))); CpuFeatures::Scope scope(FPU);
} __ Addu(elements, elements, Operand(base_offset));
__ ldc1(result, MemOperand(elements));
if (instr->hydrogen()->RequiresHoleCheck()) { if (instr->hydrogen()->RequiresHoleCheck()) {
__ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32)); DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
}
} else {
__ lw(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
__ lw(sfpd_lo, MemOperand(elements, base_offset));
if (instr->hydrogen()->RequiresHoleCheck()) {
ASSERT(kPointerSize == sizeof(kHoleNanLower32));
DeoptimizeIf(eq, instr->environment(), sfpd_hi, Operand(kHoleNanUpper32));
}
} }
__ ldc1(result, MemOperand(elements));
} }
...@@ -3241,6 +3416,7 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { ...@@ -3241,6 +3416,7 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
CpuFeatures::Scope scope(FPU);
// Class for deferred case. // Class for deferred case.
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
public: public:
...@@ -3277,6 +3453,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { ...@@ -3277,6 +3453,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
CpuFeatures::Scope scope(FPU);
DoubleRegister input = ToDoubleRegister(instr->value()); DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
Register scratch1 = scratch0(); Register scratch1 = scratch0();
...@@ -3305,6 +3482,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { ...@@ -3305,6 +3482,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
CpuFeatures::Scope scope(FPU);
DoubleRegister input = ToDoubleRegister(instr->value()); DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
...@@ -3381,6 +3559,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { ...@@ -3381,6 +3559,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
CpuFeatures::Scope scope(FPU);
DoubleRegister input = ToDoubleRegister(instr->value()); DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result()); DoubleRegister result = ToDoubleRegister(instr->result());
__ sqrt_d(result, input); __ sqrt_d(result, input);
...@@ -3388,6 +3567,7 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { ...@@ -3388,6 +3567,7 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
CpuFeatures::Scope scope(FPU);
DoubleRegister input = ToDoubleRegister(instr->value()); DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result()); DoubleRegister result = ToDoubleRegister(instr->result());
DoubleRegister temp = ToDoubleRegister(instr->temp()); DoubleRegister temp = ToDoubleRegister(instr->temp());
...@@ -3412,6 +3592,7 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { ...@@ -3412,6 +3592,7 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
void LCodeGen::DoPower(LPower* instr) { void LCodeGen::DoPower(LPower* instr) {
CpuFeatures::Scope scope(FPU);
Representation exponent_type = instr->hydrogen()->right()->representation(); Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers. // Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones. // Just make sure that the input/output registers are the expected ones.
...@@ -3442,6 +3623,7 @@ void LCodeGen::DoPower(LPower* instr) { ...@@ -3442,6 +3623,7 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) { void LCodeGen::DoRandom(LRandom* instr) {
CpuFeatures::Scope scope(FPU);
class DeferredDoRandom: public LDeferredCode { class DeferredDoRandom: public LDeferredCode {
public: public:
DeferredDoRandom(LCodeGen* codegen, LRandom* instr) DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
...@@ -3518,6 +3700,7 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) { ...@@ -3518,6 +3700,7 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
void LCodeGen::DoMathExp(LMathExp* instr) { void LCodeGen::DoMathExp(LMathExp* instr) {
CpuFeatures::Scope scope(FPU);
DoubleRegister input = ToDoubleRegister(instr->value()); DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result()); DoubleRegister result = ToDoubleRegister(instr->result());
DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
...@@ -3813,6 +3996,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { ...@@ -3813,6 +3996,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
CpuFeatures::Scope scope(FPU);
Register external_pointer = ToRegister(instr->elements()); Register external_pointer = ToRegister(instr->elements());
Register key = no_reg; Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind(); ElementsKind elements_kind = instr->elements_kind();
...@@ -3886,6 +4070,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { ...@@ -3886,6 +4070,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
CpuFeatures::Scope scope(FPU);
DoubleRegister value = ToDoubleRegister(instr->value()); DoubleRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements()); Register elements = ToRegister(instr->elements());
Register key = no_reg; Register key = no_reg;
...@@ -4169,6 +4354,7 @@ void LCodeGen::DoStringLength(LStringLength* instr) { ...@@ -4169,6 +4354,7 @@ void LCodeGen::DoStringLength(LStringLength* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
CpuFeatures::Scope scope(FPU);
LOperand* input = instr->value(); LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot()); ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result(); LOperand* output = instr->result();
...@@ -4186,6 +4372,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { ...@@ -4186,6 +4372,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
CpuFeatures::Scope scope(FPU);
LOperand* input = instr->value(); LOperand* input = instr->value();
LOperand* output = instr->result(); LOperand* output = instr->result();
...@@ -4247,13 +4434,51 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) { ...@@ -4247,13 +4434,51 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
} }
// Convert unsigned integer with specified number of leading zeroes in binary
// representation to IEEE 754 double.
// Integer to convert is passed in register hiword.
// Resulting double is returned in registers hiword:loword.
// This functions does not work correctly for 0.
static void GenerateUInt2Double(MacroAssembler* masm,
Register hiword,
Register loword,
Register scratch,
int leading_zeroes) {
const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
const int mantissa_shift_for_hi_word =
meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
const int mantissa_shift_for_lo_word =
kBitsPerInt - mantissa_shift_for_hi_word;
masm->li(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
if (mantissa_shift_for_hi_word > 0) {
masm->sll(loword, hiword, mantissa_shift_for_lo_word);
masm->srl(hiword, hiword, mantissa_shift_for_hi_word);
masm->Or(hiword, scratch, hiword);
} else {
masm->mov(loword, zero_reg);
masm->sll(hiword, hiword, mantissa_shift_for_hi_word);
masm->Or(hiword, scratch, hiword);
}
// If least significant bit of biased exponent was not 1 it was corrupted
// by most significant bit of mantissa so we should fix that.
if (!(biased_exponent & 1)) {
masm->li(scratch, 1 << HeapNumber::kExponentShift);
masm->nor(scratch, scratch, scratch);
masm->and_(hiword, hiword, scratch);
}
}
void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
LOperand* value, LOperand* value,
IntegerSignedness signedness) { IntegerSignedness signedness) {
Label slow; Label slow;
Register src = ToRegister(value); Register src = ToRegister(value);
Register dst = ToRegister(instr->result()); Register dst = ToRegister(instr->result());
FPURegister dbl_scratch = double_scratch0(); DoubleRegister dbl_scratch = double_scratch0();
// Preserve the value of all registers. // Preserve the value of all registers.
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
...@@ -4267,16 +4492,40 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, ...@@ -4267,16 +4492,40 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ SmiUntag(src, dst); __ SmiUntag(src, dst);
__ Xor(src, src, Operand(0x80000000)); __ Xor(src, src, Operand(0x80000000));
} }
__ mtc1(src, dbl_scratch); if (CpuFeatures::IsSupported(FPU)) {
__ cvt_d_w(dbl_scratch, dbl_scratch); CpuFeatures::Scope scope(FPU);
__ mtc1(src, dbl_scratch);
__ cvt_d_w(dbl_scratch, dbl_scratch);
} else {
FloatingPointHelper::Destination dest =
FloatingPointHelper::kCoreRegisters;
FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, f0,
sfpd_lo, sfpd_hi,
scratch0(), f2);
}
} else { } else {
__ mtc1(src, dbl_scratch); if (CpuFeatures::IsSupported(FPU)) {
__ Cvt_d_uw(dbl_scratch, dbl_scratch, f22); CpuFeatures::Scope scope(FPU);
__ mtc1(src, dbl_scratch);
__ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
} else {
Label no_leading_zero, done;
__ And(at, src, Operand(0x80000000));
__ Branch(&no_leading_zero, ne, at, Operand(zero_reg));
// Integer has one leading zeros.
GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, t0, 1);
__ Branch(&done);
__ bind(&no_leading_zero);
GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, t0, 0);
__ Branch(&done);
}
} }
if (FLAG_inline_new) { if (FLAG_inline_new) {
__ LoadRoot(t2, Heap::kHeapNumberMapRootIndex); __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(t1, a3, t0, t2, &slow, DONT_TAG_RESULT); __ AllocateHeapNumber(t1, a3, t0, scratch0(), &slow, DONT_TAG_RESULT);
__ Move(dst, t1); __ Move(dst, t1);
__ Branch(&done); __ Branch(&done);
} }
...@@ -4295,7 +4544,13 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, ...@@ -4295,7 +4544,13 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// Done. Put the value in dbl_scratch into the value of the allocated heap // Done. Put the value in dbl_scratch into the value of the allocated heap
// number. // number.
__ bind(&done); __ bind(&done);
__ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset)); if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
__ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
} else {
__ sw(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
__ sw(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
}
__ Addu(dst, dst, kHeapObjectTag); __ Addu(dst, dst, kHeapObjectTag);
__ StoreToSafepointRegisterSlot(dst, dst); __ StoreToSafepointRegisterSlot(dst, dst);
} }
...@@ -4328,7 +4583,13 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { ...@@ -4328,7 +4583,13 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ Branch(deferred->entry()); __ Branch(deferred->entry());
} }
__ bind(deferred->exit()); __ bind(deferred->exit());
__ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset)); if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
__ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
} else {
__ sw(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
__ sw(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
}
// Now that we have finished with the object's real address tag it // Now that we have finished with the object's real address tag it
__ Addu(reg, reg, kHeapObjectTag); __ Addu(reg, reg, kHeapObjectTag);
} }
...@@ -4376,6 +4637,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, ...@@ -4376,6 +4637,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
bool deoptimize_on_minus_zero, bool deoptimize_on_minus_zero,
LEnvironment* env) { LEnvironment* env) {
Register scratch = scratch0(); Register scratch = scratch0();
CpuFeatures::Scope scope(FPU);
Label load_smi, heap_number, done; Label load_smi, heap_number, done;
...@@ -4440,6 +4702,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { ...@@ -4440,6 +4702,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// of the if. // of the if.
if (instr->truncating()) { if (instr->truncating()) {
CpuFeatures::Scope scope(FPU);
Register scratch3 = ToRegister(instr->temp2()); Register scratch3 = ToRegister(instr->temp2());
FPURegister single_scratch = double_scratch.low(); FPURegister single_scratch = double_scratch.low();
ASSERT(!scratch3.is(input_reg) && ASSERT(!scratch3.is(input_reg) &&
...@@ -4676,6 +4939,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { ...@@ -4676,6 +4939,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
CpuFeatures::Scope vfp_scope(FPU);
DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result()); Register result_reg = ToRegister(instr->result());
DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
...@@ -4684,6 +4948,7 @@ void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { ...@@ -4684,6 +4948,7 @@ void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
CpuFeatures::Scope vfp_scope(FPU);
Register unclamped_reg = ToRegister(instr->unclamped()); Register unclamped_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result()); Register result_reg = ToRegister(instr->result());
__ ClampUint8(result_reg, unclamped_reg); __ ClampUint8(result_reg, unclamped_reg);
...@@ -4691,6 +4956,7 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { ...@@ -4691,6 +4956,7 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
CpuFeatures::Scope vfp_scope(FPU);
Register scratch = scratch0(); Register scratch = scratch0();
Register input_reg = ToRegister(instr->unclamped()); Register input_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result()); Register result_reg = ToRegister(instr->result());
...@@ -5314,6 +5580,7 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { ...@@ -5314,6 +5580,7 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
void LCodeGen::EnsureSpaceForLazyDeopt() { void LCodeGen::EnsureSpaceForLazyDeopt() {
if (info()->IsStub()) return;
// Ensure that we have enough space after the previous lazy-bailout // Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here. // instruction for patching the code here.
int current_pc = masm()->pc_offset(); int current_pc = masm()->pc_offset();
......
...@@ -61,6 +61,7 @@ class LCodeGen BASE_EMBEDDED { ...@@ -61,6 +61,7 @@ class LCodeGen BASE_EMBEDDED {
deferred_(8, info->zone()), deferred_(8, info->zone()),
osr_pc_offset_(-1), osr_pc_offset_(-1),
last_lazy_deopt_pc_(0), last_lazy_deopt_pc_(0),
frame_is_built_(false),
safepoints_(info->zone()), safepoints_(info->zone()),
resolver_(this), resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) { expected_safepoint_kind_(Safepoint::kSimple) {
...@@ -76,6 +77,15 @@ class LCodeGen BASE_EMBEDDED { ...@@ -76,6 +77,15 @@ class LCodeGen BASE_EMBEDDED {
Heap* heap() const { return isolate()->heap(); } Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; } Zone* zone() const { return zone_; }
bool NeedsEagerFrame() const {
return GetStackSlotCount() > 0 ||
info()->is_non_deferred_calling() ||
!info()->IsStub();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
}
// Support for converting LOperands to assembler types. // Support for converting LOperands to assembler types.
// LOperand must be a register. // LOperand must be a register.
Register ToRegister(LOperand* op) const; Register ToRegister(LOperand* op) const;
...@@ -189,7 +199,7 @@ class LCodeGen BASE_EMBEDDED { ...@@ -189,7 +199,7 @@ class LCodeGen BASE_EMBEDDED {
Register temporary2); Register temporary2);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); } int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return scope()->num_parameters(); } int GetParameterCount() const { return info()->num_parameters(); }
void Abort(const char* reason); void Abort(const char* reason);
void Comment(const char* format, ...); void Comment(const char* format, ...);
...@@ -368,11 +378,15 @@ class LCodeGen BASE_EMBEDDED { ...@@ -368,11 +378,15 @@ class LCodeGen BASE_EMBEDDED {
int* offset); int* offset);
struct JumpTableEntry { struct JumpTableEntry {
explicit inline JumpTableEntry(Address entry) inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
: label(), : label(),
address(entry) { } address(entry),
needs_frame(frame),
is_lazy_deopt(is_lazy) { }
Label label; Label label;
Address address; Address address;
bool needs_frame;
bool is_lazy_deopt;
}; };
void EnsureSpaceForLazyDeopt(); void EnsureSpaceForLazyDeopt();
...@@ -401,6 +415,7 @@ class LCodeGen BASE_EMBEDDED { ...@@ -401,6 +415,7 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<LDeferredCode*> deferred_; ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_; int osr_pc_offset_;
int last_lazy_deopt_pc_; int last_lazy_deopt_pc_;
bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table // Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code. // itself is emitted at the end of the generated code.
...@@ -416,6 +431,7 @@ class LCodeGen BASE_EMBEDDED { ...@@ -416,6 +431,7 @@ class LCodeGen BASE_EMBEDDED {
PushSafepointRegistersScope(LCodeGen* codegen, PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind) Safepoint::Kind kind)
: codegen_(codegen) { : codegen_(codegen) {
ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->expected_safepoint_kind_ = kind; codegen_->expected_safepoint_kind_ = kind;
......
...@@ -172,8 +172,10 @@ void LGapResolver::BreakCycle(int index) { ...@@ -172,8 +172,10 @@ void LGapResolver::BreakCycle(int index) {
} else if (source->IsStackSlot()) { } else if (source->IsStackSlot()) {
__ lw(kLithiumScratchReg, cgen_->ToMemOperand(source)); __ lw(kLithiumScratchReg, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) { } else if (source->IsDoubleRegister()) {
CpuFeatures::Scope scope(FPU);
__ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source)); __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) { } else if (source->IsDoubleStackSlot()) {
CpuFeatures::Scope scope(FPU);
__ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source)); __ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
} else { } else {
UNREACHABLE(); UNREACHABLE();
...@@ -193,9 +195,11 @@ void LGapResolver::RestoreValue() { ...@@ -193,9 +195,11 @@ void LGapResolver::RestoreValue() {
} else if (saved_destination_->IsStackSlot()) { } else if (saved_destination_->IsStackSlot()) {
__ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_)); __ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) { } else if (saved_destination_->IsDoubleRegister()) {
CpuFeatures::Scope scope(FPU);
__ mov_d(cgen_->ToDoubleRegister(saved_destination_), __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
kLithiumScratchDouble); kLithiumScratchDouble);
} else if (saved_destination_->IsDoubleStackSlot()) { } else if (saved_destination_->IsDoubleStackSlot()) {
CpuFeatures::Scope scope(FPU);
__ sdc1(kLithiumScratchDouble, __ sdc1(kLithiumScratchDouble,
cgen_->ToMemOperand(saved_destination_)); cgen_->ToMemOperand(saved_destination_));
} else { } else {
...@@ -232,6 +236,7 @@ void LGapResolver::EmitMove(int index) { ...@@ -232,6 +236,7 @@ void LGapResolver::EmitMove(int index) {
MemOperand destination_operand = cgen_->ToMemOperand(destination); MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) { if (in_cycle_) {
if (!destination_operand.OffsetIsInt16Encodable()) { if (!destination_operand.OffsetIsInt16Encodable()) {
CpuFeatures::Scope scope(FPU);
// 'at' is overwritten while saving the value to the destination. // 'at' is overwritten while saving the value to the destination.
// Therefore we can't use 'at'. It is OK if the read from the source // Therefore we can't use 'at'. It is OK if the read from the source
// destroys 'at', since that happens before the value is read. // destroys 'at', since that happens before the value is read.
...@@ -271,6 +276,7 @@ void LGapResolver::EmitMove(int index) { ...@@ -271,6 +276,7 @@ void LGapResolver::EmitMove(int index) {
} }
} else if (source->IsDoubleRegister()) { } else if (source->IsDoubleRegister()) {
CpuFeatures::Scope scope(FPU);
DoubleRegister source_register = cgen_->ToDoubleRegister(source); DoubleRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) { if (destination->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(destination), source_register); __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
...@@ -281,6 +287,7 @@ void LGapResolver::EmitMove(int index) { ...@@ -281,6 +287,7 @@ void LGapResolver::EmitMove(int index) {
} }
} else if (source->IsDoubleStackSlot()) { } else if (source->IsDoubleStackSlot()) {
CpuFeatures::Scope scope(FPU);
MemOperand source_operand = cgen_->ToMemOperand(source); MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) { if (destination->IsDoubleRegister()) {
__ ldc1(cgen_->ToDoubleRegister(destination), source_operand); __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
......
...@@ -42,10 +42,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) ...@@ -42,10 +42,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE #undef DEFINE_COMPILE
LOsrEntry::LOsrEntry() { LOsrEntry::LOsrEntry() {
for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
register_spills_[i] = NULL; register_spills_[i] = NULL;
} }
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) { for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
double_register_spills_[i] = NULL; double_register_spills_[i] = NULL;
} }
} }
...@@ -616,6 +616,7 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { ...@@ -616,6 +616,7 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr, HInstruction* hinstr,
CanDeoptimize can_deoptimize) { CanDeoptimize can_deoptimize) {
info()->MarkAsNonDeferredCalling();
#ifdef DEBUG #ifdef DEBUG
instr->VerifyCall(); instr->VerifyCall();
#endif #endif
...@@ -1583,6 +1584,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { ...@@ -1583,6 +1584,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation to = instr->to(); Representation to = instr->to();
if (from.IsTagged()) { if (from.IsTagged()) {
if (to.IsDouble()) { if (to.IsDouble()) {
info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value()); LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value); LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res)); return AssignEnvironment(DefineAsRegister(res));
...@@ -1607,6 +1609,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { ...@@ -1607,6 +1609,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} }
} else if (from.IsDouble()) { } else if (from.IsDouble()) {
if (to.IsTagged()) { if (to.IsTagged()) {
info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value()); LOperand* value = UseRegister(instr->value());
LOperand* temp1 = TempRegister(); LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister(); LOperand* temp2 = TempRegister();
...@@ -1626,6 +1629,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { ...@@ -1626,6 +1629,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineAsRegister(res)); return AssignEnvironment(DefineAsRegister(res));
} }
} else if (from.IsInteger32()) { } else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling();
if (to.IsTagged()) { if (to.IsTagged()) {
HValue* val = instr->value(); HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val); LOperand* value = UseRegisterAtStart(val);
...@@ -1864,7 +1868,16 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { ...@@ -1864,7 +1868,16 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() && (instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
LOperand* external_pointer = UseRegister(instr->elements()); // float->double conversion on non-VFP2 requires an extra scratch
// register. For convenience, just mark the elements register as "UseTemp"
// so that it can be used as a temp during the float->double conversion
// after it's no longer needed after the float load.
bool needs_temp =
!CpuFeatures::IsSupported(FPU) &&
(elements_kind == EXTERNAL_FLOAT_ELEMENTS);
LOperand* external_pointer = needs_temp
? UseTempRegister(instr->elements())
: UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(external_pointer, key); result = new(zone()) LLoadKeyed(external_pointer, key);
} }
...@@ -2083,8 +2096,17 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { ...@@ -2083,8 +2096,17 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
int spill_index = chunk()->GetParameterStackSlot(instr->index()); LParameter* result = new(zone()) LParameter;
return DefineAsSpilled(new(zone()) LParameter, spill_index); if (info()->IsOptimizing()) {
int spill_index = chunk()->GetParameterStackSlot(instr->index());
return DefineAsSpilled(result, spill_index);
} else {
ASSERT(info()->IsStub());
CodeStubInterfaceDescriptor* descriptor =
info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
Register reg = descriptor->register_params_[instr->index()];
return DefineFixed(result, reg);
}
} }
......
...@@ -251,6 +251,11 @@ class LInstruction: public ZoneObject { ...@@ -251,6 +251,11 @@ class LInstruction: public ZoneObject {
void MarkAsCall() { is_call_ = true; } void MarkAsCall() { is_call_ = true; }
// Interface to the register allocator and iterators.
bool ClobbersTemps() const { return is_call_; }
bool ClobbersRegisters() const { return is_call_; }
bool ClobbersDoubleRegisters() const { return is_call_; }
// Interface to the register allocator and iterators. // Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return is_call_; } bool IsMarkedAsCall() const { return is_call_; }
...@@ -2304,8 +2309,9 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> { ...@@ -2304,8 +2309,9 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
// slot, i.e., that must also be restored to the spill slot on OSR entry. // slot, i.e., that must also be restored to the spill slot on OSR entry.
// NULL if the register has no assigned spill slot. Indexed by allocation // NULL if the register has no assigned spill slot. Indexed by allocation
// index. // index.
LOperand* register_spills_[Register::kNumAllocatableRegisters]; LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters]; LOperand* double_register_spills_[
DoubleRegister::kMaxNumAllocatableRegisters];
}; };
......
...@@ -118,8 +118,8 @@ void MacroAssembler::PopSafepointRegisters() { ...@@ -118,8 +118,8 @@ void MacroAssembler::PopSafepointRegisters() {
void MacroAssembler::PushSafepointRegistersAndDoubles() { void MacroAssembler::PushSafepointRegistersAndDoubles() {
PushSafepointRegisters(); PushSafepointRegisters();
Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize)); Subu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) { for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
FPURegister reg = FPURegister::FromAllocationIndex(i); FPURegister reg = FPURegister::FromAllocationIndex(i);
sdc1(reg, MemOperand(sp, i * kDoubleSize)); sdc1(reg, MemOperand(sp, i * kDoubleSize));
} }
...@@ -127,11 +127,11 @@ void MacroAssembler::PushSafepointRegistersAndDoubles() { ...@@ -127,11 +127,11 @@ void MacroAssembler::PushSafepointRegistersAndDoubles() {
void MacroAssembler::PopSafepointRegistersAndDoubles() { void MacroAssembler::PopSafepointRegistersAndDoubles() {
for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) { for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
FPURegister reg = FPURegister::FromAllocationIndex(i); FPURegister reg = FPURegister::FromAllocationIndex(i);
ldc1(reg, MemOperand(sp, i * kDoubleSize)); ldc1(reg, MemOperand(sp, i * kDoubleSize));
} }
Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize)); Addu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
PopSafepointRegisters(); PopSafepointRegisters();
} }
...@@ -167,7 +167,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { ...@@ -167,7 +167,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
UNIMPLEMENTED_MIPS(); UNIMPLEMENTED_MIPS();
// General purpose registers are pushed last on the stack. // General purpose registers are pushed last on the stack.
int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize; int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset); return MemOperand(sp, doubles_size + register_offset);
} }
...@@ -4250,7 +4250,10 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { ...@@ -4250,7 +4250,10 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id); const Runtime::Function* function = Runtime::FunctionForId(id);
PrepareCEntryArgs(function->nargs); PrepareCEntryArgs(function->nargs);
PrepareCEntryFunction(ExternalReference(function, isolate())); PrepareCEntryFunction(ExternalReference(function, isolate()));
CEntryStub stub(1, kSaveFPRegs); SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU)
? kSaveFPRegs
: kDontSaveFPRegs;
CEntryStub stub(1, mode);
CallStub(&stub); CallStub(&stub);
} }
......
...@@ -1514,9 +1514,9 @@ class MacroAssembler: public Assembler { ...@@ -1514,9 +1514,9 @@ class MacroAssembler: public Assembler {
// This handle will be patched with the code object on installation. // This handle will be patched with the code object on installation.
Handle<Object> code_object_; Handle<Object> code_object_;
// Needs access to SafepointRegisterStackIndex for optimized frame // Needs access to SafepointRegisterStackIndex for compiled frame
// traversal. // traversal.
friend class OptimizedFrame; friend class StandardFrame;
}; };
......
...@@ -1054,46 +1054,6 @@ static void StoreIntAsFloat(MacroAssembler* masm, ...@@ -1054,46 +1054,6 @@ static void StoreIntAsFloat(MacroAssembler* masm,
} }
// Convert unsigned integer with specified number of leading zeroes in binary
// representation to IEEE 754 double.
// Integer to convert is passed in register hiword.
// Resulting double is returned in registers hiword:loword.
// This functions does not work correctly for 0.
static void GenerateUInt2Double(MacroAssembler* masm,
Register hiword,
Register loword,
Register scratch,
int leading_zeroes) {
const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
const int mantissa_shift_for_hi_word =
meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
const int mantissa_shift_for_lo_word =
kBitsPerInt - mantissa_shift_for_hi_word;
__ li(scratch, biased_exponent << HeapNumber::kExponentShift);
if (mantissa_shift_for_hi_word > 0) {
__ sll(loword, hiword, mantissa_shift_for_lo_word);
__ srl(hiword, hiword, mantissa_shift_for_hi_word);
__ or_(hiword, scratch, hiword);
} else {
__ mov(loword, zero_reg);
__ sll(hiword, hiword, mantissa_shift_for_hi_word);
__ or_(hiword, scratch, hiword);
}
// If least significant bit of biased exponent was not 1 it was corrupted
// by most significant bit of mantissa so we should fix that.
if (!(biased_exponent & 1)) {
__ li(scratch, 1 << HeapNumber::kExponentShift);
__ nor(scratch, scratch, scratch);
__ and_(hiword, hiword, scratch);
}
}
#undef __ #undef __
#define __ ACCESS_MASM(masm()) #define __ ACCESS_MASM(masm())
...@@ -3316,9 +3276,17 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement( ...@@ -3316,9 +3276,17 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
// -- a1 : receiver // -- a1 : receiver
// ----------------------------------- // -----------------------------------
ElementsKind elements_kind = receiver_map->elements_kind(); ElementsKind elements_kind = receiver_map->elements_kind();
Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode(); if (receiver_map->has_fast_elements() ||
receiver_map->has_external_array_elements()) {
__ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK); Handle<Code> stub = KeyedLoadFastElementStub(
receiver_map->instance_type() == JS_ARRAY_TYPE,
elements_kind).GetCode();
__ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK);
} else {
Handle<Code> stub =
KeyedLoadDictionaryElementStub().GetCode();
__ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK);
}
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss(); Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET); __ Jump(ic, RelocInfo::CODE_TARGET);
...@@ -3730,355 +3698,6 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm, ...@@ -3730,355 +3698,6 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
} }
void KeyedLoadStubCompiler::GenerateLoadExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
// ---------- S t a t e --------------
// -- ra : return address
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
Label miss_force_generic, slow, failed_allocation;
Register key = a0;
Register receiver = a1;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key, t0, t1, f2, f4, &miss_force_generic);
__ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
// a3: elements array
// Check that the index is in range.
__ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
__ sra(t2, key, kSmiTagSize);
// Unsigned comparison catches both negative and too-large values.
__ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
__ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
// a3: base pointer of external storage
// We are not untagging smi key and instead work with it
// as if it was premultiplied by 2.
STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
Register value = a2;
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
__ srl(t2, key, 1);
__ addu(t3, a3, t2);
__ lb(value, MemOperand(t3, 0));
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ srl(t2, key, 1);
__ addu(t3, a3, t2);
__ lbu(value, MemOperand(t3, 0));
break;
case EXTERNAL_SHORT_ELEMENTS:
__ addu(t3, a3, key);
__ lh(value, MemOperand(t3, 0));
break;
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ addu(t3, a3, key);
__ lhu(value, MemOperand(t3, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ sll(t2, key, 1);
__ addu(t3, a3, t2);
__ lw(value, MemOperand(t3, 0));
break;
case EXTERNAL_FLOAT_ELEMENTS:
__ sll(t3, t2, 2);
__ addu(t3, a3, t3);
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
__ lwc1(f0, MemOperand(t3, 0));
} else {
__ lw(value, MemOperand(t3, 0));
}
break;
case EXTERNAL_DOUBLE_ELEMENTS:
__ sll(t2, key, 2);
__ addu(t3, a3, t2);
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
__ ldc1(f0, MemOperand(t3, 0));
} else {
// t3: pointer to the beginning of the double we want to load.
__ lw(a2, MemOperand(t3, 0));
__ lw(a3, MemOperand(t3, Register::kSizeInBytes));
}
break;
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
// For integer array types:
// a2: value
// For float array type:
// f0: value (if FPU is supported)
// a2: value (if FPU is not supported)
// For double array type:
// f0: value (if FPU is supported)
// a2/a3: value (if FPU is not supported)
if (elements_kind == EXTERNAL_INT_ELEMENTS) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
Label box_int;
__ Subu(t3, value, Operand(0xC0000000)); // Non-smi value gives neg result.
__ Branch(&box_int, lt, t3, Operand(zero_reg));
// Tag integer as smi and return it.
__ sll(v0, value, kSmiTagSize);
__ Ret();
__ bind(&box_int);
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion.
// The arm version uses a temporary here to save r0, but we don't need to
// (a0 is not modified).
__ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(v0, a3, t0, t1, &slow, DONT_TAG_RESULT);
__ mtc1(value, f0);
__ cvt_d_w(f0, f0);
__ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset));
__ Addu(v0, v0, kHeapObjectTag);
__ Ret();
} else {
// Allocate a HeapNumber for the result and perform int-to-double
// conversion.
// The arm version uses a temporary here to save r0, but we don't need to
// (a0 is not modified).
__ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(v0, a3, t0, t1, &slow, TAG_RESULT);
Register dst_mantissa = t2;
Register dst_exponent = t3;
FloatingPointHelper::Destination dest =
FloatingPointHelper::kCoreRegisters;
FloatingPointHelper::ConvertIntToDouble(masm,
value,
dest,
f0,
dst_mantissa,
dst_exponent,
t1,
f2);
__ sw(dst_mantissa, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
__ sw(dst_exponent, FieldMemOperand(v0, HeapNumber::kExponentOffset));
__ Ret();
}
} else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
Label pl_box_int;
__ And(t2, value, Operand(0xC0000000));
__ Branch(&pl_box_int, ne, t2, Operand(zero_reg));
// It can fit in an Smi.
// Tag integer as smi and return it.
__ sll(v0, value, kSmiTagSize);
__ Ret();
__ bind(&pl_box_int);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't use a0 and a1 as AllocateHeapNumber clobbers all
// registers - also when jumping due to exhausted young space.
__ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(v0, t2, t3, t6, &slow, DONT_TAG_RESULT);
// This is replaced by a macro:
// __ mtc1(value, f0); // LS 32-bits.
// __ mtc1(zero_reg, f1); // MS 32-bits are all zero.
// __ cvt_d_l(f0, f0); // Use 64 bit conv to get correct unsigned 32-bit.
__ Cvt_d_uw(f0, value, f22);
__ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset));
__ Addu(v0, v0, kHeapObjectTag);
__ Ret();
} else {
// Check whether unsigned integer fits into smi.
Label box_int_0, box_int_1, done;
__ And(t2, value, Operand(0x80000000));
__ Branch(&box_int_0, ne, t2, Operand(zero_reg));
__ And(t2, value, Operand(0x40000000));
__ Branch(&box_int_1, ne, t2, Operand(zero_reg));
// Tag integer as smi and return it.
__ sll(v0, value, kSmiTagSize);
__ Ret();
Register hiword = value; // a2.
Register loword = a3;
__ bind(&box_int_0);
// Integer does not have leading zeros.
GenerateUInt2Double(masm, hiword, loword, t0, 0);
__ Branch(&done);
__ bind(&box_int_1);
// Integer has one leading zero.
GenerateUInt2Double(masm, hiword, loword, t0, 1);
__ bind(&done);
// Integer was converted to double in registers hiword:loword.
// Wrap it into a HeapNumber. Don't use a0 and a1 as AllocateHeapNumber
// clobbers all registers - also when jumping due to exhausted young
// space.
__ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(t2, t3, t5, t6, &slow, TAG_RESULT);
__ sw(hiword, FieldMemOperand(t2, HeapNumber::kExponentOffset));
__ sw(loword, FieldMemOperand(t2, HeapNumber::kMantissaOffset));
__ mov(v0, t2);
__ Ret();
}
} else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
// Allocate a HeapNumber for the result. Don't use a0 and a1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(v0, t3, t5, t6, &slow, DONT_TAG_RESULT);
// The float (single) value is already in fpu reg f0 (if we use float).
__ cvt_d_s(f0, f0);
__ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset));
__ Addu(v0, v0, kHeapObjectTag);
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use a0 and a1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(v0, t3, t5, t6, &slow, TAG_RESULT);
// FPU is not available, do manual single to double conversion.
// a2: floating point value (binary32).
// v0: heap number for result
// Extract mantissa to t4.
__ And(t4, value, Operand(kBinary32MantissaMask));
// Extract exponent to t5.
__ srl(t5, value, kBinary32MantissaBits);
__ And(t5, t5, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
Label exponent_rebiased;
__ Branch(&exponent_rebiased, eq, t5, Operand(zero_reg));
__ li(t0, 0x7ff);
__ Xor(t1, t5, Operand(0xFF));
__ Movz(t5, t0, t1); // Set t5 to 0x7ff only if t5 is equal to 0xff.
__ Branch(&exponent_rebiased, eq, t1, Operand(zero_reg));
// Rebias exponent.
__ Addu(t5,
t5,
Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
__ bind(&exponent_rebiased);
__ And(a2, value, Operand(kBinary32SignMask));
value = no_reg;
__ sll(t0, t5, HeapNumber::kMantissaBitsInTopWord);
__ or_(a2, a2, t0);
// Shift mantissa.
static const int kMantissaShiftForHiWord =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
static const int kMantissaShiftForLoWord =
kBitsPerInt - kMantissaShiftForHiWord;
__ srl(t0, t4, kMantissaShiftForHiWord);
__ or_(a2, a2, t0);
__ sll(a0, t4, kMantissaShiftForLoWord);
__ sw(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
__ sw(a0, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
__ Ret();
}
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
// Allocate a HeapNumber for the result. Don't use a0 and a1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(v0, t3, t5, t6, &slow, DONT_TAG_RESULT);
// The double value is already in f0
__ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset));
__ Addu(v0, v0, kHeapObjectTag);
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use a0 and a1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(v0, t3, t5, t6, &slow, TAG_RESULT);
__ sw(a2, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
__ sw(a3, FieldMemOperand(v0, HeapNumber::kExponentOffset));
__ Ret();
}
} else {
// Tag integer as smi and return it.
__ sll(v0, value, kSmiTagSize);
__ Ret();
}
// Slow case, key and receiver still in a0 and a1.
__ bind(&slow);
__ IncrementCounter(
masm->isolate()->counters()->keyed_load_external_array_slow(),
1, a2, a3);
// ---------- S t a t e --------------
// -- ra : return address
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
__ Push(a1, a0);
__ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
__ bind(&miss_force_generic);
Handle<Code> stub =
masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
__ Jump(stub, RelocInfo::CODE_TARGET);
}
void KeyedStoreStubCompiler::GenerateStoreExternalArray( void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm, MacroAssembler* masm,
ElementsKind elements_kind) { ElementsKind elements_kind) {
...@@ -4478,115 +4097,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( ...@@ -4478,115 +4097,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
} }
void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ra : return address
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
Label miss_force_generic;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, a0, t0, t1, f2, f4, &miss_force_generic);
// Get the elements array.
__ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
__ AssertFastElements(a2);
// Check that the key is within bounds.
__ lw(a3, FieldMemOperand(a2, FixedArray::kLengthOffset));
__ Branch(USE_DELAY_SLOT, &miss_force_generic, hs, a0, Operand(a3));
// Load the result and make sure it's not the hole.
__ Addu(a3, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t0, t0, a3);
__ lw(t0, MemOperand(t0));
__ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
__ Branch(&miss_force_generic, eq, t0, Operand(t1));
__ Ret(USE_DELAY_SLOT);
__ mov(v0, t0);
__ bind(&miss_force_generic);
Handle<Code> stub =
masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
__ Jump(stub, RelocInfo::CODE_TARGET);
}
void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ra : return address
// -- a0 : key
// -- a1 : receiver
// -----------------------------------
Label miss_force_generic, slow_allocate_heapnumber;
Register key_reg = a0;
Register receiver_reg = a1;
Register elements_reg = a2;
Register heap_number_reg = a2;
Register indexed_double_offset = a3;
Register scratch = t0;
Register scratch2 = t1;
Register scratch3 = t2;
Register heap_number_map = t3;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic);
// Get the elements array.
__ lw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
// Check that the key is within bounds.
__ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
__ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
// Load the upper word of the double in the fixed array and test for NaN.
__ sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
__ Addu(indexed_double_offset, elements_reg, Operand(scratch2));
uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
__ lw(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
__ Branch(&miss_force_generic, eq, scratch, Operand(kHoleNanUpper32));
// Non-NaN. Allocate a new heap number and copy the double value into it.
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
heap_number_map, &slow_allocate_heapnumber, TAG_RESULT);
// Don't need to reload the upper 32 bits of the double, it's already in
// scratch.
__ sw(scratch, FieldMemOperand(heap_number_reg,
HeapNumber::kExponentOffset));
__ lw(scratch, FieldMemOperand(indexed_double_offset,
FixedArray::kHeaderSize));
__ sw(scratch, FieldMemOperand(heap_number_reg,
HeapNumber::kMantissaOffset));
__ mov(v0, heap_number_reg);
__ Ret();
__ bind(&slow_allocate_heapnumber);
Handle<Code> slow_ic =
masm->isolate()->builtins()->KeyedLoadIC_Slow();
__ Jump(slow_ic, RelocInfo::CODE_TARGET);
__ bind(&miss_force_generic);
Handle<Code> miss_ic =
masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
__ Jump(miss_ic, RelocInfo::CODE_TARGET);
}
void KeyedStoreStubCompiler::GenerateStoreFastElement( void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm, MacroAssembler* masm,
bool is_js_array, bool is_js_array,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment