Commit b99802f5 authored by ulan@chromium.org's avatar ulan@chromium.org

Out-of-line constant pool on Arm: Stage 1 - Free up r7 for use as constant pool pointer register

First stage of implementing an out-of-line constant pool on Arm.  This CL
frees up register r7 for use as a constant pool pointer in later stages.

BUG=
R=ulan@chromium.org

Review URL: https://chromiumcodereview.appspot.com/21063002

Patch from Ross McIlroy <rmcilroy@chromium.org>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@16898 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent c47cc38b
......@@ -118,7 +118,8 @@ class CpuFeatures : public AllStatic {
// Core register
struct Register {
static const int kNumRegisters = 16;
static const int kMaxNumAllocatableRegisters = 8;
static const int kMaxNumAllocatableRegisters =
FLAG_enable_ool_constant_pool ? 7 : 8;
static const int kSizeInBytes = 4;
inline static int NumAllocatableRegisters();
......@@ -201,6 +202,7 @@ const Register r3 = { kRegister_r3_Code };
const Register r4 = { kRegister_r4_Code };
const Register r5 = { kRegister_r5_Code };
const Register r6 = { kRegister_r6_Code };
// Used as constant pool pointer register if FLAGS_enable_ool_constant_pool.
const Register r7 = { kRegister_r7_Code };
// Used as context register.
const Register r8 = { kRegister_r8_Code };
......
......@@ -445,9 +445,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: object size (in words)
// r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged)
__ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
__ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
__ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
if (count_constructions) {
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
__ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
......@@ -455,14 +454,16 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
// r0: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
__ cmp(r0, r6);
__ add(ip, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
__ cmp(r0, ip);
__ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
}
__ InitializeFieldsWithFiller(r5, r0, r7);
__ InitializeFieldsWithFiller(r5, r0, r6);
// To allow for truncation.
__ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
__ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
}
__ InitializeFieldsWithFiller(r5, r6, r7);
__ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
__ InitializeFieldsWithFiller(r5, r0, r6);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
......@@ -527,16 +528,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
{ Label loop, entry;
if (count_constructions) {
__ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
} else if (FLAG_debug_code) {
__ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
__ cmp(r7, r8);
__ Assert(eq, kUndefinedValueNotLoaded);
}
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(&entry);
__ bind(&loop);
__ str(r7, MemOperand(r2, kPointerSize, PostIndex));
__ str(r0, MemOperand(r2, kPointerSize, PostIndex));
__ bind(&entry);
__ cmp(r2, r6);
__ b(lt, &loop);
......@@ -700,7 +695,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r2: receiver
// r3: argc
// r4: argv
// r5-r7, cp may be clobbered
// r5-r6, r7 (if not FLAG_enable_ool_constant_pool) and cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Clear the context before we push it when entering the internal frame.
......@@ -740,7 +735,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ mov(r5, Operand(r4));
__ mov(r6, Operand(r4));
__ mov(r7, Operand(r4));
if (!FLAG_enable_ool_constant_pool) {
__ mov(r7, Operand(r4));
}
if (kR9Available == 1) {
__ mov(r9, Operand(r4));
}
......
This diff is collapsed.
......@@ -106,7 +106,6 @@ class StringHelper : public AllStatic {
Register scratch2,
Register scratch3,
Register scratch4,
Register scratch5,
int flags);
......
......@@ -444,15 +444,16 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ push(lr);
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r4: source FixedArray
// r5: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
// Use lr as a temporary register.
__ mov(lr, Operand(r5, LSL, 2));
__ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
__ Allocate(lr, r6, r7, r9, &gc_required, DOUBLE_ALIGNMENT);
__ Allocate(lr, r6, r4, r9, &gc_required, DOUBLE_ALIGNMENT);
// r6: destination FixedDoubleArray, not tagged as heap object.
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
// r4: source FixedArray.
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
......@@ -483,15 +484,15 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Prepare for conversion loop.
__ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r7, r6, Operand(FixedDoubleArray::kHeaderSize));
__ add(r6, r7, Operand(r5, LSL, 2));
__ add(r9, r6, Operand(FixedDoubleArray::kHeaderSize));
__ add(r6, r9, Operand(r5, LSL, 2));
__ mov(r4, Operand(kHoleNanLower32));
__ mov(r5, Operand(kHoleNanUpper32));
// r3: begin of source FixedArray element fields, not tagged
// r4: kHoleNanLower32
// r5: kHoleNanUpper32
// r6: end of destination FixedDoubleArray, not tagged
// r7: begin of FixedDoubleArray element fields, not tagged
// r9: begin of FixedDoubleArray element fields, not tagged
__ b(&entry);
......@@ -514,30 +515,30 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Convert and copy elements.
__ bind(&loop);
__ ldr(r9, MemOperand(r3, 4, PostIndex));
// r9: current element
__ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
__ ldr(lr, MemOperand(r3, 4, PostIndex));
// lr: current element
__ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
// Normal smi, convert to double and store.
__ vmov(s0, r9);
__ vmov(s0, lr);
__ vcvt_f64_s32(d0, s0);
__ vstr(d0, r7, 0);
__ add(r7, r7, Operand(8));
__ vstr(d0, r9, 0);
__ add(r9, r9, Operand(8));
__ b(&entry);
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
if (FLAG_debug_code) {
// Restore a "smi-untagged" heap object.
__ SmiTag(r9);
__ orr(r9, r9, Operand(1));
__ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
__ SmiTag(lr);
__ orr(lr, lr, Operand(1));
__ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
__ Assert(eq, kObjectFoundInSmiOnlyArray);
}
__ Strd(r4, r5, MemOperand(r7, 8, PostIndex));
__ Strd(r4, r5, MemOperand(r9, 8, PostIndex));
__ bind(&entry);
__ cmp(r7, r6);
__ cmp(r9, r6);
__ b(lt, &loop);
__ pop(lr);
......@@ -577,7 +578,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Allocate new FixedArray.
__ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
__ add(r0, r0, Operand(r5, LSL, 1));
__ Allocate(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
__ Allocate(r0, r6, r3, r9, &gc_required, NO_ALLOCATION_FLAGS);
// r6: destination FixedArray, not tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
......@@ -589,14 +590,12 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ add(r3, r6, Operand(FixedArray::kHeaderSize));
__ add(r6, r6, Operand(kHeapObjectTag));
__ add(r5, r3, Operand(r5, LSL, 1));
__ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
__ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses in r4 to fully take advantage of post-indexing.
// r3: begin of destination FixedArray element fields, not tagged
// r4: begin of source FixedDoubleArray element fields, not tagged, +4
// r5: end of destination FixedArray, not tagged
// r6: destination FixedArray
// r7: the-hole pointer
// r9: heap number map
__ b(&entry);
......@@ -608,7 +607,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ bind(&loop);
__ ldr(r1, MemOperand(r4, 8, PostIndex));
// lr: current element's upper 32 bit
// r1: current element's upper 32 bit
// r4: address of next element's upper 32 bit
__ cmp(r1, Operand(kHoleNanUpper32));
__ b(eq, &convert_hole);
......@@ -631,7 +630,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
__ str(r7, MemOperand(r3, 4, PostIndex));
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
__ str(r0, MemOperand(r3, 4, PostIndex));
__ bind(&entry);
__ cmp(r3, r5);
......
......@@ -268,8 +268,8 @@ void Deoptimizer::EntryGenerator::Generate() {
__ bind(&inner_push_loop);
__ sub(r3, r3, Operand(sizeof(uint32_t)));
__ add(r6, r2, Operand(r3));
__ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
__ push(r7);
__ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset()));
__ push(r6);
__ bind(&inner_loop_header);
__ cmp(r3, Operand::Zero());
__ b(ne, &inner_push_loop); // test for gt?
......@@ -315,9 +315,9 @@ void Deoptimizer::EntryGenerator::Generate() {
__ InitializeRootRegister();
__ pop(ip); // remove pc
__ pop(r7); // get continuation, leave pc on stack
__ pop(ip); // get continuation, leave pc on stack
__ pop(lr);
__ Jump(r7);
__ Jump(ip);
__ stop("Unreachable.");
}
......
......@@ -64,7 +64,7 @@ const RegList kCalleeSaved =
1 << 4 | // r4 v1
1 << 5 | // r5 v2
1 << 6 | // r6 v3
1 << 7 | // r7 v4
1 << 7 | // r7 v4 (pp in JavaScript code)
1 << 8 | // r8 v5 (cp in JavaScript code)
kR9Available << 9 | // r9 v6
1 << 10 | // r10 v7
......
......@@ -3962,9 +3962,8 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop,
empty_separator_loop, one_char_separator_loop,
Label bailout, done, one_char_separator, long_separator, non_trivial_array,
not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
one_char_separator_loop_entry, long_separator_loop;
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
......@@ -3982,19 +3981,18 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Register string = r4;
Register element = r5;
Register elements_end = r6;
Register scratch1 = r7;
Register scratch2 = r9;
Register scratch = r9;
// Separator operand is on the stack.
__ pop(separator);
// Check that the array is a JSArray.
__ JumpIfSmi(array, &bailout);
__ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE);
__ CompareObjectType(array, scratch, array_length, JS_ARRAY_TYPE);
__ b(ne, &bailout);
// Check that the array has fast elements.
__ CheckFastElements(scratch1, scratch2, &bailout);
__ CheckFastElements(scratch, array_length, &bailout);
// If the array has length zero, return the empty string.
__ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
......@@ -4031,11 +4029,11 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ bind(&loop);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ JumpIfSmi(string, &bailout);
__ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
__ ldr(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ add(string_length, string_length, Operand(scratch1), SetCC);
__ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout);
__ ldr(scratch, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ add(string_length, string_length, Operand(scratch), SetCC);
__ b(vs, &bailout);
__ cmp(element, elements_end);
__ b(lt, &loop);
......@@ -4056,23 +4054,23 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Check that the separator is a flat ASCII string.
__ JumpIfSmi(separator, &bailout);
__ ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
__ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
__ ldr(scratch, FieldMemOperand(separator, HeapObject::kMapOffset));
__ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout);
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi
__ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ sub(string_length, string_length, Operand(scratch1));
__ smull(scratch2, ip, array_length, scratch1);
__ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ sub(string_length, string_length, Operand(scratch));
__ smull(scratch, ip, array_length, scratch);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
// zero.
__ cmp(ip, Operand::Zero());
__ b(ne, &bailout);
__ tst(scratch2, Operand(0x80000000));
__ tst(scratch, Operand(0x80000000));
__ b(ne, &bailout);
__ add(string_length, string_length, Operand(scratch2), SetCC);
__ add(string_length, string_length, Operand(scratch), SetCC);
__ b(vs, &bailout);
__ SmiUntag(string_length);
......@@ -4089,9 +4087,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// array_length: Length of the array.
__ AllocateAsciiString(result,
string_length,
scratch1,
scratch2,
elements_end,
scratch,
string, // used as scratch
elements_end, // used as scratch
&bailout);
// Prepare for looping. Set up elements_end to end of the array. Set
// result_pos to the position of the result where to write the first
......@@ -4104,8 +4102,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Check the length of the separator.
__ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ cmp(scratch1, Operand(Smi::FromInt(1)));
__ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ cmp(scratch, Operand(Smi::FromInt(1)));
__ b(eq, &one_char_separator);
__ b(gt, &long_separator);
......@@ -4123,7 +4121,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &empty_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
......@@ -4155,7 +4153,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &one_char_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
......@@ -4176,7 +4174,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ add(string,
separator,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ CopyBytes(string, result_pos, string_length, scratch);
__ bind(&long_separator);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
......@@ -4185,7 +4183,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ CopyBytes(string, result_pos, string_length, scratch);
__ cmp(element, elements_end);
__ b(lt, &long_separator_loop); // End while (element < elements_end).
ASSERT(result.is(r0));
......
......@@ -1394,7 +1394,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Register receiver = r2;
Register receiver_map = r3;
Register elements_map = r6;
Register elements = r7; // Elements array of the receiver.
Register elements = r9; // Elements array of the receiver.
// r4 and r5 are used as general scratch registers.
// Check that the key is a smi.
......
......@@ -3879,9 +3879,9 @@ void LCodeGen::DoPower(LPower* instr) {
} else if (exponent_type.IsTagged()) {
Label no_deopt;
__ JumpIfSmi(r2, &no_deopt);
__ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset));
__ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r7, Operand(ip));
__ cmp(r6, Operand(ip));
DeoptimizeIf(ne, instr->environment());
__ bind(&no_deopt);
MathPowStub stub(MathPowStub::TAGGED);
......@@ -5386,24 +5386,24 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
Label materialized;
// Registers will be used as follows:
// r7 = literals array.
// r6 = literals array.
// r1 = regexp literal.
// r0 = regexp literal clone.
// r2 and r4-r6 are used as temporaries.
// r2-5 are used as temporaries.
int literal_offset =
FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
__ LoadHeapObject(r7, instr->hydrogen()->literals());
__ ldr(r1, FieldMemOperand(r7, literal_offset));
__ LoadHeapObject(r6, instr->hydrogen()->literals());
__ ldr(r1, FieldMemOperand(r6, literal_offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r1, ip);
__ b(ne, &materialized);
// Create regexp literal using runtime function
// Result will be in r0.
__ mov(r6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
__ mov(r5, Operand(instr->hydrogen()->pattern()));
__ mov(r4, Operand(instr->hydrogen()->flags()));
__ Push(r7, r6, r5, r4);
__ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
__ mov(r4, Operand(instr->hydrogen()->pattern()));
__ mov(r3, Operand(instr->hydrogen()->flags()));
__ Push(r6, r5, r4, r3);
CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
__ mov(r1, r0);
......
......@@ -1337,7 +1337,7 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// For the JSEntry handler, we must preserve r0-r4, r5-r7 are available.
// For the JSEntry handler, we must preserve r0-r4, r5-r6 are available.
// We will build up the handler from the bottom by pushing on the stack.
// Set up the code object (r5) and the state (r6) for pushing.
unsigned state =
......@@ -1348,9 +1348,9 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
// Push the frame pointer, context, state, and code object.
if (kind == StackHandler::JS_ENTRY) {
mov(r7, Operand(Smi::FromInt(0))); // Indicates no context.
mov(cp, Operand(Smi::FromInt(0))); // Indicates no context.
mov(ip, Operand::Zero()); // NULL frame pointer.
stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit());
} else {
stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
}
......@@ -2305,13 +2305,15 @@ void MacroAssembler::CallApiFunctionAndReturn(
ExternalReference::handle_scope_level_address(isolate()),
next_address);
ASSERT(!thunk_last_arg.is(r3));
// Allocate HandleScope in callee-save registers.
mov(r7, Operand(next_address));
ldr(r4, MemOperand(r7, kNextOffset));
ldr(r5, MemOperand(r7, kLimitOffset));
ldr(r6, MemOperand(r7, kLevelOffset));
mov(r9, Operand(next_address));
ldr(r4, MemOperand(r9, kNextOffset));
ldr(r5, MemOperand(r9, kLimitOffset));
ldr(r6, MemOperand(r9, kLevelOffset));
add(r6, r6, Operand(1));
str(r6, MemOperand(r7, kLevelOffset));
str(r6, MemOperand(r9, kLevelOffset));
if (FLAG_log_timer_events) {
FrameScope frame(this, StackFrame::MANUAL);
......@@ -2322,7 +2324,6 @@ void MacroAssembler::CallApiFunctionAndReturn(
PopSafepointRegisters();
}
ASSERT(!thunk_last_arg.is(r3));
Label profiler_disabled;
Label end_profiler_check;
bool* is_profiling_flag =
......@@ -2368,15 +2369,15 @@ void MacroAssembler::CallApiFunctionAndReturn(
bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
str(r4, MemOperand(r7, kNextOffset));
str(r4, MemOperand(r9, kNextOffset));
if (emit_debug_code()) {
ldr(r1, MemOperand(r7, kLevelOffset));
ldr(r1, MemOperand(r9, kLevelOffset));
cmp(r1, r6);
Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
}
sub(r6, r6, Operand(1));
str(r6, MemOperand(r7, kLevelOffset));
ldr(ip, MemOperand(r7, kLimitOffset));
str(r6, MemOperand(r9, kLevelOffset));
ldr(ip, MemOperand(r9, kLimitOffset));
cmp(r5, ip);
b(ne, &delete_allocated_handles);
......@@ -2409,7 +2410,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
// HandleScope limit has changed. Delete allocated extensions.
bind(&delete_allocated_handles);
str(r5, MemOperand(r7, kLimitOffset));
str(r5, MemOperand(r9, kLimitOffset));
mov(r4, r0);
PrepareCallCFunction(1, r5);
mov(r0, Operand(ExternalReference::isolate_address(isolate())));
......
......@@ -45,8 +45,9 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
// Give alias names to registers
const Register cp = { 8 }; // JavaScript context pointer
const Register kRootRegister = { 10 }; // Roots array pointer.
const Register pp = { kRegister_r7_Code }; // Constant pool pointer.
const Register cp = { kRegister_r8_Code }; // JavaScript context pointer.
const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
// Flags used for AllocateHeapNumber
enum TaggingMode {
......
......@@ -874,8 +874,8 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
// Store call data.
__ str(r6, MemOperand(sp, 3 * kPointerSize));
// Store isolate.
__ mov(r7, Operand(ExternalReference::isolate_address(masm->isolate())));
__ str(r7, MemOperand(sp, 4 * kPointerSize));
__ mov(r5, Operand(ExternalReference::isolate_address(masm->isolate())));
__ str(r5, MemOperand(sp, 4 * kPointerSize));
// Store ReturnValue default and ReturnValue.
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ str(r5, MemOperand(sp, 5 * kPointerSize));
......@@ -1855,15 +1855,15 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
Label fast_object, not_fast_object;
__ CheckFastObjectElements(r3, r7, &not_fast_object);
__ CheckFastObjectElements(r3, r9, &not_fast_object);
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(r3, r7, &call_builtin);
__ CheckFastSmiElements(r3, r9, &call_builtin);
__ ldr(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
__ ldr(r9, FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r7, ip);
__ cmp(r9, ip);
__ b(eq, &call_builtin);
// edx: receiver
// r3: map
......@@ -1871,7 +1871,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
r3,
r7,
r9,
&try_holey_map);
__ mov(r2, receiver);
ElementsTransitionGenerator::
......@@ -1884,7 +1884,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
FAST_HOLEY_ELEMENTS,
r3,
r7,
r9,
&call_builtin);
__ mov(r2, receiver);
ElementsTransitionGenerator::
......@@ -1917,7 +1917,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ bind(&attempt_to_grow_elements);
// r0: array's length + 1.
// r4: elements' length.
if (!FLAG_inline_new) {
__ b(&call_builtin);
......@@ -1928,8 +1927,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// the new element is non-Smi. For now, delegate to the builtin.
Label no_fast_elements_check;
__ JumpIfSmi(r2, &no_fast_elements_check);
__ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ CheckFastObjectElements(r7, r7, &call_builtin);
__ ldr(r9, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ CheckFastObjectElements(r9, r9, &call_builtin);
__ bind(&no_fast_elements_check);
ExternalReference new_space_allocation_top =
......@@ -1941,8 +1940,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Load top and check if it is the end of elements.
__ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
__ add(end_elements, end_elements, Operand(kEndElementsOffset));
__ mov(r7, Operand(new_space_allocation_top));
__ ldr(r3, MemOperand(r7));
__ mov(r4, Operand(new_space_allocation_top));
__ ldr(r3, MemOperand(r4));
__ cmp(end_elements, r3);
__ b(ne, &call_builtin);
......@@ -1954,7 +1953,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// We fit and could grow elements.
// Update new_space_allocation_top.
__ str(r3, MemOperand(r7));
__ str(r3, MemOperand(r4));
// Push the argument.
__ str(r2, MemOperand(end_elements));
// Fill the rest with holes.
......@@ -1965,6 +1964,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Update elements' and array's sizes.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
__ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
......
......@@ -826,6 +826,16 @@ DEFINE_implication(print_all_code, trace_codegen)
#endif
#endif
//
// Read-only flags
//
#undef FLAG
#define FLAG FLAG_READONLY
// assembler-arm.h
DEFINE_bool(enable_ool_constant_pool, false,
"enable use of out-of-line constant pools (ARM only)")
// Cleanup...
#undef FLAG_FULL
#undef FLAG_READONLY
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment