Commit 6c8a6c26 authored by erik.corry@gmail.com's avatar erik.corry@gmail.com

Cosmetic (ARM/MIPS): Use FieldMemOperand where appropriate.

Review URL: https://chromiumcodereview.appspot.com/9963051

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@11207 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent b5bf01ec
...@@ -1730,7 +1730,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall( ...@@ -1730,7 +1730,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
// We can't address the last element in one operation. Compute the more // We can't address the last element in one operation. Compute the more
// expensive shift first, and use an offset later on. // expensive shift first, and use an offset later on.
__ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize)); __ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
__ ldr(r0, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag)); __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
__ cmp(r0, r6); __ cmp(r0, r6);
__ b(eq, &call_builtin); __ b(eq, &call_builtin);
...@@ -1738,7 +1738,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall( ...@@ -1738,7 +1738,7 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
__ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Fill with the hole. // Fill with the hole.
__ str(r6, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag)); __ str(r6, FieldMemOperand(elements, FixedArray::kHeaderSize));
__ Drop(argc + 1); __ Drop(argc + 1);
__ Ret(); __ Ret();
......
...@@ -2977,7 +2977,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) { ...@@ -2977,7 +2977,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
__ Move(f14, zero_reg, a1); __ Move(f14, zero_reg, a1);
// Subtract and store the result in the heap number. // Subtract and store the result in the heap number.
__ sub_d(f0, f12, f14); __ sub_d(f0, f12, f14);
__ sdc1(f0, MemOperand(s0, HeapNumber::kValueOffset - kHeapObjectTag)); __ sdc1(f0, FieldMemOperand(s0, HeapNumber::kValueOffset));
__ mov(v0, s0); __ mov(v0, s0);
} else { } else {
__ PrepareCallCFunction(2, a0); __ PrepareCallCFunction(2, a0);
......
...@@ -513,7 +513,7 @@ void RegExpMacroAssemblerMIPS::CheckBitInTable( ...@@ -513,7 +513,7 @@ void RegExpMacroAssemblerMIPS::CheckBitInTable(
__ Addu(a0, a0, current_character()); __ Addu(a0, a0, current_character());
} }
__ lbu(a0, MemOperand(a0, ByteArray::kHeaderSize - kHeapObjectTag)); __ lbu(a0, FieldMemOperand(a0, ByteArray::kHeaderSize));
BranchOrBacktrack(on_bit_set, ne, a0, Operand(zero_reg)); BranchOrBacktrack(on_bit_set, ne, a0, Operand(zero_reg));
} }
......
...@@ -1737,14 +1737,14 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall( ...@@ -1737,14 +1737,14 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
// expensive shift first, and use an offset later on. // expensive shift first, and use an offset later on.
__ sll(t1, t0, kPointerSizeLog2 - kSmiTagSize); __ sll(t1, t0, kPointerSizeLog2 - kSmiTagSize);
__ Addu(elements, elements, t1); __ Addu(elements, elements, t1);
__ lw(v0, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag)); __ lw(v0, FieldMemOperand(elements, FixedArray::kHeaderSize));
__ Branch(&call_builtin, eq, v0, Operand(t2)); __ Branch(&call_builtin, eq, v0, Operand(t2));
// Set the array's length. // Set the array's length.
__ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset)); __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Fill with the hole. // Fill with the hole.
__ sw(t2, MemOperand(elements, FixedArray::kHeaderSize - kHeapObjectTag)); __ sw(t2, FieldMemOperand(elements, FixedArray::kHeaderSize));
__ Drop(argc + 1); __ Drop(argc + 1);
__ Ret(); __ Ret();
...@@ -3496,7 +3496,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( ...@@ -3496,7 +3496,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
CpuFeatures::Scope scope(FPU); CpuFeatures::Scope scope(FPU);
__ mtc1(value, f0); __ mtc1(value, f0);
__ cvt_d_w(f0, f0); __ cvt_d_w(f0, f0);
__ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag)); __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret(); __ Ret();
} else { } else {
Register dst1 = t2; Register dst1 = t2;
...@@ -3544,7 +3544,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( ...@@ -3544,7 +3544,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ Cvt_d_uw(f0, value, f22); __ Cvt_d_uw(f0, value, f22);
__ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag)); __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret(); __ Ret();
} else { } else {
...@@ -3598,7 +3598,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray( ...@@ -3598,7 +3598,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ AllocateHeapNumber(v0, t3, t5, t6, &slow); __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
// The float (single) value is already in fpu reg f0 (if we use float). // The float (single) value is already in fpu reg f0 (if we use float).
__ cvt_d_s(f0, f0); __ cvt_d_s(f0, f0);
__ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag)); __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret(); __ Ret();
} else { } else {
// Allocate a HeapNumber for the result. Don't use a0 and a1 as // Allocate a HeapNumber for the result. Don't use a0 and a1 as
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment