Commit 16a89e82 authored by danno@chromium.org's avatar danno@chromium.org

MIPS: port all relevant commits since the new-gc was landed.

This ports the following list of commits to mips. It is provided in this form
so that the mips port of the new-gc can be easily tested on your end.

This must be downloaded or landed after http://codereview.chromium.org/8106002
This is based on r9585. With these two mips commits, all tests are passing.

9319    Fix a harmless assert and a genuine bug in the GC-safety of stub generation ...
9329    ARM: Pregenerate some stubs that we call from other stubs.
9335    Initialize pre-allocated fields of JSObject with undefined.
9344    Put back the asserts in RememberedSetHelper, but correct this time
9370    Reorganize object type enum, such that proxies are no longer in the middle...
9392    Basic support for tracking smi-only arrays on ia32.
9402    Notify collector about lazily deoptimized code objects.
9411    Porting r9392 to arm (smi-only arrays).
9418    Small refactor to KeyedStoreIC::GenerateGeneric to make it slightly faster.
9447    Tighten up assertions checking GC-safety of stub calls.
9449    Record function call targets, use them for inlining.
9459    Make sure we don't flush the pregenerated stubs, since they need to b
9461    Fix the build on ARM
9466    Move the is_pregenerated flag so it does not overlap other flags....
9468    Fix the no-VFP3 build on ARM.
9475    Pass correct anchor_slot for EMBEDDED_OBJECT pointers from
9490    Adjust assertions in UpdateSlot to match UpdatePointer in PointersUpdatingVisitor.
9511    Clean list of external references from internal objects like the hole value.

9514    Simplify compares in KeyedStoreIC::GenerateGeneric.
9531    Porting r9456 to arm (Optimize KeyedStoreGeneric for Smi arrays).
9541    Fix load of potentially eval-shadowed let bindings.
9542    Fast allocation of block contexts.
9553    Activate smi-only optimizations for large array literals.

9575    Move declaration of SerializedScopeInfo from variables.h to objects.h
9577    Track elements_kind transitions in KeyedStoreICs.
9583    Fixing a bug in arm as pointed out in issue 1759.
9584    Refactor and fix polymorphic KeyedStoreIC creation.

BUG=
TEST=

Review URL: http://codereview.chromium.org/8112008
Patch from Paul Lind <pling44@gmail.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9601 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 4f7d11f9
......@@ -271,9 +271,9 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
Isolate::Current()->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
......
......@@ -770,20 +770,23 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ sll(t0, a3, kPointerSizeLog2);
__ addu(t6, t4, t0); // End of object.
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
{ Label loop, entry;
if (count_constructions) {
// To allow for truncation.
__ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
} else {
__ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
__ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
if (count_constructions) {
__ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
__ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
kBitsPerByte);
__ sll(t0, a0, kPointerSizeLog2);
__ addu(a0, t5, t0);
// a0: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
__ Assert(le, "Unexpected number of pre-allocated property fields.",
a0, Operand(t6));
}
__ jmp(&entry);
__ bind(&loop);
__ sw(t7, MemOperand(t5, 0));
__ addiu(t5, t5, kPointerSize);
__ bind(&entry);
__ Branch(&loop, Uless, t5, Operand(t6));
__ InitializeFieldsWithFiller(t5, a0, t7);
// To allow for truncation.
__ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
}
__ InitializeFieldsWithFiller(t5, t6, t7);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
......@@ -800,14 +803,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// The field instance sizes contains both pre-allocated property fields
// and in-object properties.
__ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
__ And(t6,
a0,
Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
__ srl(t0, t6, Map::kPreAllocatedPropertyFieldsByte * 8);
__ Addu(a3, a3, Operand(t0));
__ And(t6, a0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
__ srl(t0, t6, Map::kInObjectPropertiesByte * 8);
__ subu(a3, a3, t0);
__ Ext(t6, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
kBitsPerByte);
__ Addu(a3, a3, Operand(t6));
__ Ext(t6, a0, Map::kInObjectPropertiesByte * kBitsPerByte,
kBitsPerByte);
__ subu(a3, a3, t6);
// Done if no extra properties are to be allocated.
__ Branch(&allocated, eq, a3, Operand(zero_reg));
......@@ -1392,7 +1393,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kFunctionOffset = 4 * kPointerSize;
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
FrameScope scope(masm, StackFrame::INTERNAL);
__ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
__ push(a0);
__ lw(a0, MemOperand(fp, kArgsOffset)); // Get the args array.
......@@ -1526,7 +1528,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ InvokeFunction(a1, actual, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
frame_scope.GenerateLeaveFrame();
scope.GenerateLeaveFrame();
__ Ret(USE_DELAY_SLOT);
__ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
......@@ -1539,7 +1542,6 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
__ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
// Tear down the internal frame and remove function, receiver and args.
}
......
This diff is collapsed.
......@@ -66,7 +66,7 @@ class StoreBufferOverflowStub: public CodeStub {
void Generate(MacroAssembler* masm);
virtual bool CompilingCallsToThisStubIsGCSafe() { return true; }
virtual bool IsPregenerated();
static void GenerateFixedRegStubsAheadOfTime();
virtual bool SometimesSetsUpAFrame() { return false; }
......@@ -350,7 +350,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
ASSERT(SignRegisterBits::is_valid(sign_.code()));
}
bool CompilingCallsToThisStubIsGCSafe();
bool IsPregenerated();
static void GenerateFixedRegStubsAheadOfTime();
private:
......@@ -363,13 +363,15 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
class IntRegisterBits: public BitField<int, 0, 4> {};
class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
class ScratchRegisterBits: public BitField<int, 8, 4> {};
class SignRegisterBits: public BitField<int, 12, 4> {};
Major MajorKey() { return WriteInt32ToHeapNumber; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return IntRegisterBits::encode(the_int_.code())
| HeapNumberRegisterBits::encode(the_heap_number_.code())
| ScratchRegisterBits::encode(scratch_.code());
| ScratchRegisterBits::encode(scratch_.code())
| SignRegisterBits::encode(sign_.code());
}
void Generate(MacroAssembler* masm);
......@@ -425,7 +427,7 @@ class RecordWriteStub: public CodeStub {
INCREMENTAL_COMPACTION
};
virtual bool CompilingCallsToThisStubIsGCSafe();
virtual bool IsPregenerated();
static void GenerateFixedRegStubsAheadOfTime();
virtual bool SometimesSetsUpAFrame() { return false; }
......
This diff is collapsed.
......@@ -1205,116 +1205,144 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
// -- a2 : receiver
// -- ra : return address
// -----------------------------------
Label slow, fast, array, extra, exit;
Label slow, array, extra, check_if_double_array;
Label fast_object_with_map_check, fast_object_without_map_check;
Label fast_double_with_map_check, fast_double_without_map_check;
// Register usage.
Register value = a0;
Register key = a1;
Register receiver = a2;
Register elements = a3; // Elements array of the receiver.
// t0 is used as ip in the arm version.
// t3-t4 are used as temporaries.
Register elements_map = t2;
Register receiver_map = t3;
// t0 and t1 are used as general scratch registers.
// Check that the key is a smi.
__ JumpIfNotSmi(key, &slow);
// Check that the object isn't a smi.
__ JumpIfSmi(receiver, &slow);
// Get the map of the object.
__ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ lbu(t0, FieldMemOperand(t3, Map::kBitFieldOffset));
__ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
__ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
__ Branch(&slow, ne, t0, Operand(zero_reg));
// Check if the object is a JS array or not.
__ lbu(t3, FieldMemOperand(t3, Map::kInstanceTypeOffset));
__ Branch(&array, eq, t3, Operand(JS_ARRAY_TYPE));
__ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
__ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
// Check that the object is some kind of JSObject.
__ Branch(&slow, lt, t3, Operand(FIRST_JS_RECEIVER_TYPE));
__ Branch(&slow, eq, t3, Operand(JS_PROXY_TYPE));
__ Branch(&slow, eq, t3, Operand(JS_FUNCTION_PROXY_TYPE));
__ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE));
// Object case: Check key against length in the elements array.
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode and writable.
__ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
__ Branch(&slow, ne, t3, Operand(t0));
// Check array bounds. Both the key and the length of FixedArray are smis.
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ Branch(&fast, lo, key, Operand(t0));
// Fall thru to slow if un-tagged index >= length.
__ Branch(&fast_object_with_map_check, lo, key, Operand(t0));
// Slow case, handle jump to runtime.
__ bind(&slow);
// Entry registers are intact.
// a0: value.
// a1: key.
// a2: receiver.
GenerateRuntimeSetProperty(masm, strict_mode);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
// element to the array by writing to array[array.length].
__ bind(&extra);
// Condition code from comparing key and array length is still available.
// Only support writing to array[array.length].
__ Branch(&slow, ne, key, Operand(t0));
// Check for room in the elements backing store.
// Both the key and the length of FixedArray are smis.
__ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ Branch(&slow, hs, key, Operand(t0));
__ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ Branch(&check_if_double_array, ne, elements_map,
Operand(masm->isolate()->factory()->fixed_array_map()));
// Calculate key + 1 as smi.
STATIC_ASSERT(0 == kSmiTag);
__ Addu(t3, key, Operand(Smi::FromInt(1)));
__ sw(t3, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Branch(&fast);
STATIC_ASSERT(kSmiTag == 0);
__ Addu(t0, key, Operand(Smi::FromInt(1)));
__ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Branch(&fast_object_without_map_check);
__ bind(&check_if_double_array);
__ Branch(&slow, ne, elements_map,
Operand(masm->isolate()->factory()->fixed_double_array_map()));
// Add 1 to key, and go to common element store code for doubles.
STATIC_ASSERT(kSmiTag == 0);
__ Addu(t0, key, Operand(Smi::FromInt(1)));
__ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ jmp(&fast_double_without_map_check);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode (and writable); if it
// is the length is always a smi.
__ bind(&array);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
__ Branch(&slow, ne, t3, Operand(t0));
// Check the key against the length in the array.
__ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
__ Branch(&extra, hs, key, Operand(t0));
// Fall through to fast case.
__ bind(&fast);
__ bind(&fast_object_with_map_check);
Register scratch_value = t0;
Register address = t1;
// Fast case, store the value to the elements backing store.
__ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ Branch(&fast_double_with_map_check, ne, elements_map,
Operand(masm->isolate()->factory()->fixed_array_map()));
__ bind(&fast_object_without_map_check);
// Smi stores don't require further checks.
Label non_smi_value;
__ JumpIfNotSmi(value, &non_smi_value);
// It's irrelevant whether array is smi-only or not when writing a smi.
__ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
__ Addu(address, address, scratch_value);
__ sw(value, MemOperand(address));
// Skip write barrier if the written value is a smi.
__ JumpIfSmi(value, &exit);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, value);
__ bind(&non_smi_value);
// Escape to slow case when writing non-smi into smi-only array.
__ CheckFastObjectElements(receiver_map, scratch_value, &slow);
// Fast elements array, store the value to the elements backing store.
__ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
__ Addu(address, address, scratch_value);
__ sw(value, MemOperand(address));
// Update write barrier for the elements array address.
__ mov(scratch_value, value); // Preserve the value which is returned.
__ mov(v0, value); // Preserve the value which is returned.
__ RecordWrite(elements,
address,
scratch_value,
value,
kRAHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ bind(&exit);
__ mov(v0, a0); // Return the value written.
__ Ret();
__ bind(&fast_double_with_map_check);
// Check for fast double array case. If this fails, call through to the
// runtime.
__ Branch(&slow, ne, elements_map,
Operand(masm->isolate()->factory()->fixed_double_array_map()));
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
receiver,
elements,
t0,
t1,
t2,
t3,
&slow);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, value);
}
......
......@@ -279,11 +279,18 @@ void MacroAssembler::RecordWrite(Register object,
}
void MacroAssembler::RememberedSetHelper(Register address,
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address,
Register scratch,
SaveFPRegsMode fp_mode,
RememberedSetFinalAction and_then) {
Label done;
if (FLAG_debug_code) {
Label ok;
JumpIfNotInNewSpace(object, scratch, &ok);
stop("Remembered set pointer is in new space");
bind(&ok);
}
// Load store buffer top.
ExternalReference store_buffer =
ExternalReference::store_buffer_top(isolate());
......@@ -301,7 +308,7 @@ void MacroAssembler::RememberedSetHelper(Register address,
Branch(&done, eq, t8, Operand(zero_reg));
} else {
ASSERT(and_then == kReturnAtEnd);
Ret(ne, t8, Operand(zero_reg));
Ret(eq, t8, Operand(zero_reg));
}
push(ra);
StoreBufferOverflowStub store_buffer_overflow =
......@@ -809,7 +816,7 @@ void MacroAssembler::MultiPushFPU(RegList regs) {
int16_t stack_offset = num_to_push * kDoubleSize;
Subu(sp, sp, Operand(stack_offset));
for (int16_t i = kNumRegisters; i > 0; i--) {
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kDoubleSize;
sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
......@@ -851,7 +858,7 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) {
CpuFeatures::Scope scope(FPU);
int16_t stack_offset = 0;
for (int16_t i = kNumRegisters; i > 0; i--) {
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
stack_offset += kDoubleSize;
......@@ -3203,6 +3210,19 @@ void MacroAssembler::CopyBytes(Register src,
}
void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
Register end_offset,
Register filler) {
Label loop, entry;
Branch(&entry);
bind(&loop);
sw(filler, MemOperand(start_offset));
Addu(start_offset, start_offset, kPointerSize);
bind(&entry);
Branch(&loop, lt, start_offset, Operand(end_offset));
}
void MacroAssembler::CheckFastElements(Register map,
Register scratch,
Label* fail) {
......@@ -3213,6 +3233,117 @@ void MacroAssembler::CheckFastElements(Register map,
}
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 1);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, ls, scratch,
Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
Branch(fail, hi, scratch,
Operand(Map::kMaximumBitField2FastElementValue));
}
void MacroAssembler::CheckFastSmiOnlyElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Branch(fail, hi, scratch,
Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
}
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register receiver_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Label* fail) {
Label smi_value, maybe_nan, have_double_value, is_nan, done;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
// Handle smi values specially.
JumpIfSmi(value_reg, &smi_value);
// Ensure that the object is a heap number
CheckMap(value_reg,
scratch1,
isolate()->factory()->heap_number_map(),
fail,
DONT_DO_SMI_CHECK);
// Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
// in the exponent.
li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
bind(&have_double_value);
sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
Addu(scratch1, scratch1, elements_reg);
sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
sw(exponent_reg, FieldMemOperand(scratch1, offset));
jmp(&done);
bind(&maybe_nan);
// Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
// it's an Infinity, and the non-NaN code path applies.
Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
bind(&is_nan);
// Load canonical NaN for storing into the double array.
uint64_t nan_int64 = BitCast<uint64_t>(
FixedDoubleArray::canonical_not_the_hole_nan_as_double());
li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
jmp(&have_double_value);
bind(&smi_value);
Addu(scratch1, elements_reg,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
Addu(scratch1, scratch1, scratch2);
// scratch1 is now effective address of the double element
FloatingPointHelper::Destination destination;
if (CpuFeatures::IsSupported(FPU)) {
destination = FloatingPointHelper::kFPURegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
}
Register untagged_value = receiver_reg;
SmiUntag(untagged_value, value_reg);
FloatingPointHelper::ConvertIntToDouble(this,
untagged_value,
destination,
f0,
mantissa_reg,
exponent_reg,
scratch4,
f2);
if (destination == FloatingPointHelper::kFPURegisters) {
CpuFeatures::Scope scope(FPU);
sdc1(f0, MemOperand(scratch1, 0));
} else {
sw(mantissa_reg, MemOperand(scratch1, 0));
sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
}
bind(&done);
}
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
......@@ -4573,23 +4704,15 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
void MacroAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
CallCFunctionHelper(no_reg,
function,
t8,
num_reg_arguments,
num_double_arguments);
li(t8, Operand(function));
CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
}
void MacroAssembler::CallCFunction(Register function,
Register scratch,
int num_reg_arguments,
int num_double_arguments) {
CallCFunctionHelper(function,
ExternalReference::the_hole_value_location(isolate()),
scratch,
num_reg_arguments,
num_double_arguments);
CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
}
......@@ -4600,15 +4723,12 @@ void MacroAssembler::CallCFunction(ExternalReference function,
void MacroAssembler::CallCFunction(Register function,
Register scratch,
int num_arguments) {
CallCFunction(function, scratch, num_arguments, 0);
CallCFunction(function, num_arguments, 0);
}
void MacroAssembler::CallCFunctionHelper(Register function,
ExternalReference function_reference,
Register scratch,
int num_reg_arguments,
int num_double_arguments) {
ASSERT(has_frame());
......@@ -4639,10 +4759,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// allow preemption, so the return address in the link register
// stays correct.
if (function.is(no_reg)) {
function = t9;
li(function, Operand(function_reference));
} else if (!function.is(t9)) {
if (!function.is(t9)) {
mov(t9, function);
function = t9;
}
......
......@@ -279,7 +279,8 @@ class MacroAssembler: public Assembler {
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register addr,
void RememberedSetHelper(Register object, // Used for debug code.
Register addr,
Register scratch,
SaveFPRegsMode save_fp,
RememberedSetFinalAction and_then);
......@@ -299,7 +300,7 @@ class MacroAssembler: public Assembler {
}
// Check if object is in new space. Jumps if the object is in new space.
// The register scratch can be object itself, but it will be clobbered.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfInNewSpace(Register object,
Register scratch,
Label* branch) {
......@@ -868,6 +869,13 @@ class MacroAssembler: public Assembler {
Register length,
Register scratch);
// Initialize fields with filler values. Fields starting at |start_offset|
// not including end_offset are overwritten with the value in |filler|. At
// the end the loop, |start_offset| takes the value of |end_offset|.
void InitializeFieldsWithFiller(Register start_offset,
Register end_offset,
Register filler);
// -------------------------------------------------------------------------
// Support functions.
......@@ -891,6 +899,31 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map,
Register scratch,
Label* fail);
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
void CheckFastSmiOnlyElements(Register map,
Register scratch,
Label* fail);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
// the FastDoubleElements array elements, otherwise jump to fail.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register receiver_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Label* fail);
// Check if the map of an object is equal to a specified map (either
// given directly or as an index into the root list) and branch to
// label if not. Skip the smi check if not required (object is known
......@@ -1088,11 +1121,11 @@ class MacroAssembler: public Assembler {
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, Register scratch, int num_arguments);
void CallCFunction(Register function, int num_arguments);
void CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments);
void CallCFunction(Register function, Register scratch,
void CallCFunction(Register function,
int num_reg_arguments,
int num_double_arguments);
void GetCFunctionDoubleResult(const DoubleRegister dst);
......@@ -1316,8 +1349,6 @@ class MacroAssembler: public Assembler {
private:
void CallCFunctionHelper(Register function,
ExternalReference function_reference,
Register scratch,
int num_reg_arguments,
int num_double_arguments);
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment