Commit fc1dadce authored by yangguo@chromium.org's avatar yangguo@chromium.org

Use register allocator for context on x64.

BUG=
R=mvstanton@chromium.org

Review URL: https://codereview.chromium.org/50863002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@17585 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent ba24c3a7
......@@ -6393,18 +6393,6 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
return false;
}
#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
// Target must be able to use caller's context.
CompilationInfo* outer_info = current_info();
if (target->context() != outer_info->closure()->context() ||
outer_info->scope()->contains_with() ||
outer_info->scope()->num_heap_slots() > 0) {
TraceInline(target, caller, "target requires context change");
return false;
}
#endif
// Don't inline deeper than the maximum number of inlining levels.
HEnvironment* env = environment();
int current_level = 1;
......@@ -6542,15 +6530,9 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
undefined,
function_state()->inlining_kind(),
undefined_receiver);
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
// IA32, ARM and MIPS only, overwrite the caller's context in the
// deoptimization environment with the correct one.
//
// TODO(kmillikin): implement the same inlining on other platforms so we
// can remove the unsightly ifdefs in this function.
HConstant* context = Add<HConstant>(Handle<Context>(target->context()));
inner_env->BindContext(context);
#endif
Add<HSimulate>(return_id);
current_block()->UpdateEnvironment(inner_env);
......
......@@ -4445,6 +4445,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
}
......@@ -5018,6 +5019,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
EmitPushTaggedOperand(instr->left());
EmitPushTaggedOperand(instr->right());
StringAddStub stub(instr->hydrogen()->flags());
......@@ -6122,6 +6124,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
void LCodeGen::DoTypeof(LTypeof* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
LOperand* input = instr->value();
EmitPushTaggedOperand(input);
CallRuntime(Runtime::kTypeof, 1, instr);
......@@ -6366,6 +6369,7 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
__ cmp(eax, isolate()->factory()->undefined_value());
DeoptimizeIf(equal, instr->environment());
......
......@@ -1290,10 +1290,9 @@ LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
LOperand* context = UseAny(instr->context());
LOperand* input = UseRegister(instr->value());
LOperand* temp = FixedTemp(xmm4);
LMathRound* result = new(zone()) LMathRound(context, input, temp);
LMathRound* result = new(zone()) LMathRound(input, temp);
return AssignEnvironment(DefineAsRegister(result));
}
......@@ -1355,10 +1354,9 @@ LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
LOperand* context = UseAny(instr->context());
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp);
LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
return DefineSameAsFirst(result);
}
......@@ -2116,12 +2114,10 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub()
? UseFixed(instr->context(), esi)
: NULL;
LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
return new(zone()) LReturn(UseFixed(instr->value(), eax), context,
parameter_count);
return new(zone()) LReturn(
UseFixed(instr->value(), eax), context, parameter_count);
}
......
......@@ -742,15 +742,13 @@ class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
class LMathRound V8_FINAL : public LTemplateInstruction<1, 2, 1> {
class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathRound(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[1] = context;
LMathRound(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
......@@ -853,15 +851,13 @@ class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 2, 1> {
class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[1] = context;
LMathPowHalf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
......@@ -970,6 +966,7 @@ class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
inputs_[2] = right;
}
LOperand* context() { return inputs_[1]; }
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
......@@ -1074,6 +1071,7 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
LOperand* context() { return inputs_[0]; }
Token::Value op() const { return hydrogen()->token(); }
};
......@@ -1100,6 +1098,7 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
temps_[0] = temp;
}
LOperand* context() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
......@@ -1513,7 +1512,8 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
public:
explicit LReturn(LOperand* value, LOperand* context,
explicit LReturn(LOperand* value,
LOperand* context,
LOperand* parameter_count) {
inputs_[0] = value;
inputs_[1] = context;
......
......@@ -140,12 +140,12 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
const int
Register::kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters] = {
// rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r15
0, 3, 2, 1, 7, 8, 9, 11, 14, 15
// rax, rbx, rdx, rcx, rsi, rdi, r8, r9, r11, r14, r15
0, 3, 2, 1, 6, 7, 8, 9, 11, 14, 15
};
const int Register::kAllocationIndexByRegisterCode[kNumRegisters] = {
0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, -1, -1, 8, 9
0, 3, 2, 1, -1, -1, 4, 5, 6, 7, -1, 8, -1, -1, 9, 10
};
......
......@@ -91,11 +91,10 @@ struct Register {
// The non-allocatable registers are:
// rsp - stack pointer
// rbp - frame pointer
// rsi - context register
// r10 - fixed scratch register
// r12 - smi constant register
// r13 - root register
static const int kMaxNumAllocatableRegisters = 10;
static const int kMaxNumAllocatableRegisters = 11;
static int NumAllocatableRegisters() {
return kMaxNumAllocatableRegisters;
}
......@@ -118,6 +117,7 @@ struct Register {
"rbx",
"rdx",
"rcx",
"rsi",
"rdi",
"r8",
"r9",
......
......@@ -1136,8 +1136,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
}
// Return value is in xmm0.
__ movsd(double_result, xmm0);
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&done);
__ IncrementCounter(counters->math_pow(), 1);
......
......@@ -3366,8 +3366,8 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
__ bind(&runtime);
__ PrepareCallCFunction(2);
__ movq(arg_reg_1, object);
__ movq(arg_reg_2, index, RelocInfo::NONE64);
__ movq(arg_reg_1, object);
__ movq(arg_reg_2, index, RelocInfo::NONE64);
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ jmp(&done);
......
This diff is collapsed.
......@@ -193,7 +193,10 @@ class LCodeGen: public LCodeGenBase {
void CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr);
LInstruction* instr,
LOperand* context);
void LoadContextFromDeferred(LOperand* context);
enum RDIState {
RDI_UNINITIALIZED,
......
This diff is collapsed.
This diff is collapsed.
......@@ -311,11 +311,6 @@ void MacroAssembler::RecordWriteField(
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are rsi.
ASSERT(!value.is(rsi) && !dst.is(rsi));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
......@@ -392,11 +387,6 @@ void MacroAssembler::RecordWrite(Register object,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are rsi.
ASSERT(!value.is(rsi) && !address.is(rsi));
ASSERT(!object.is(value));
ASSERT(!object.is(address));
ASSERT(!value.is(address));
......
......@@ -73,7 +73,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Save registers make sure they don't get clobbered.
int reg_num = 0;
for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
Register reg = Register::from_code(reg_num);
Register reg = Register::FromAllocationIndex(reg_num);
if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
__ push(reg);
param_offset += kPointerSize;
......@@ -91,7 +91,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Make sure no registers have been unexpectedly clobbered
for (--reg_num; reg_num >= 0; --reg_num) {
Register reg = Register::from_code(reg_num);
Register reg = Register::FromAllocationIndex(reg_num);
if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
__ cmp(reg, MemOperand(esp, 0));
__ Assert(equal, kRegisterWasClobbered);
......
......@@ -65,21 +65,25 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ push(rdi);
if (!source_reg.is(rsp)) {
__ lea(source_reg, MemOperand(rsp, -8 * kPointerSize - offset));
// The argument we pass to the stub is not a heap number, but instead
// stack-allocated and offset-wise made to look like a heap number for
// the stub. We create that "heap number" after pushing all allocatable
// registers.
int double_argument_slot =
(Register::NumAllocatableRegisters() - 1) * kPointerSize + kDoubleSize;
__ lea(source_reg, MemOperand(rsp, -double_argument_slot - offset));
}
int param_offset = 7 * kPointerSize;
// Save registers make sure they don't get clobbered.
int reg_num = 0;
for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
Register reg = Register::from_code(reg_num);
Register reg = Register::FromAllocationIndex(reg_num);
if (!reg.is(rsp) && !reg.is(rbp) && !reg.is(destination_reg)) {
__ push(reg);
param_offset += kPointerSize;
}
}
// Re-push the double argument
// Put the double argument into the designated double argument slot.
__ subq(rsp, Immediate(kDoubleSize));
__ movsd(MemOperand(rsp, 0), xmm0);
......@@ -90,7 +94,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Make sure no registers have been unexpectedly clobbered
for (--reg_num; reg_num >= 0; --reg_num) {
Register reg = Register::from_code(reg_num);
Register reg = Register::FromAllocationIndex(reg_num);
if (!reg.is(rsp) && !reg.is(rbp) && !reg.is(destination_reg)) {
__ cmpq(reg, MemOperand(rsp, 0));
__ Assert(equal, kRegisterWasClobbered);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment