Commit 1a122359 authored by haitao.feng@intel.com's avatar haitao.feng@intel.com

Revert some movq->movp changes under the _WIN64 macro for X64

R=verwaest@chromium.org

Review URL: https://codereview.chromium.org/132233027

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18803 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 10e04142
...@@ -131,10 +131,10 @@ ModuloFunction CreateModuloFunction() { ...@@ -131,10 +131,10 @@ ModuloFunction CreateModuloFunction() {
// Compute x mod y. // Compute x mod y.
// Load y and x (use argument backing store as temporary storage). // Load y and x (use argument backing store as temporary storage).
__ movsd(Operand(rsp, kPointerSize * 2), xmm1); __ movsd(Operand(rsp, kRegisterSize * 2), xmm1);
__ movsd(Operand(rsp, kPointerSize), xmm0); __ movsd(Operand(rsp, kRegisterSize), xmm0);
__ fld_d(Operand(rsp, kPointerSize * 2)); __ fld_d(Operand(rsp, kRegisterSize * 2));
__ fld_d(Operand(rsp, kPointerSize)); __ fld_d(Operand(rsp, kRegisterSize));
// Clear exception flags before operation. // Clear exception flags before operation.
{ {
...@@ -170,14 +170,14 @@ ModuloFunction CreateModuloFunction() { ...@@ -170,14 +170,14 @@ ModuloFunction CreateModuloFunction() {
__ fstp(0); // Drop result in st(0). __ fstp(0); // Drop result in st(0).
int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000); int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
__ movq(rcx, kNaNValue); __ movq(rcx, kNaNValue);
__ movp(Operand(rsp, kPointerSize), rcx); __ movq(Operand(rsp, kRegisterSize), rcx);
__ movsd(xmm0, Operand(rsp, kPointerSize)); __ movsd(xmm0, Operand(rsp, kRegisterSize));
__ jmp(&return_result); __ jmp(&return_result);
// If result is valid, return that. // If result is valid, return that.
__ bind(&valid_result); __ bind(&valid_result);
__ fstp_d(Operand(rsp, kPointerSize)); __ fstp_d(Operand(rsp, kRegisterSize));
__ movsd(xmm0, Operand(rsp, kPointerSize)); __ movsd(xmm0, Operand(rsp, kRegisterSize));
// Clean up FPU stack and exceptions and return xmm0 // Clean up FPU stack and exceptions and return xmm0
__ bind(&return_result); __ bind(&return_result);
......
...@@ -186,9 +186,9 @@ void Deoptimizer::EntryGenerator::Generate() { ...@@ -186,9 +186,9 @@ void Deoptimizer::EntryGenerator::Generate() {
// On windows put the arguments on the stack (PrepareCallCFunction // On windows put the arguments on the stack (PrepareCallCFunction
// has created space for this). On linux pass the arguments in r8 and r9. // has created space for this). On linux pass the arguments in r8 and r9.
#ifdef _WIN64 #ifdef _WIN64
__ movp(Operand(rsp, 4 * kPointerSize), arg5); __ movq(Operand(rsp, 4 * kRegisterSize), arg5);
__ LoadAddress(arg5, ExternalReference::isolate_address(isolate())); __ LoadAddress(arg5, ExternalReference::isolate_address(isolate()));
__ movp(Operand(rsp, 5 * kPointerSize), arg5); __ movq(Operand(rsp, 5 * kRegisterSize), arg5);
#else #else
__ movp(r8, arg5); __ movp(r8, arg5);
__ LoadAddress(r9, ExternalReference::isolate_address(isolate())); __ LoadAddress(r9, ExternalReference::isolate_address(isolate()));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment