Commit 26e692af authored by lrn@chromium.org's avatar lrn@chromium.org

X64: Change some smi operations to work on untagged integers instead.

Use direct reading and writing of integers from Smi fields.
Change RecordWrite with 0 offset to take untagged index instead of
smi tagged index.

Review URL: http://codereview.chromium.org/2872005

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@4893 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 93387f27
...@@ -1148,6 +1148,15 @@ void Assembler::incl(const Operand& dst) { ...@@ -1148,6 +1148,15 @@ void Assembler::incl(const Operand& dst) {
} }
void Assembler::incl(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xFF);
emit_modrm(0, dst);
}
void Assembler::int3() { void Assembler::int3() {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
......
...@@ -765,6 +765,7 @@ class Assembler : public Malloced { ...@@ -765,6 +765,7 @@ class Assembler : public Malloced {
void incq(Register dst); void incq(Register dst);
void incq(const Operand& dst); void incq(const Operand& dst);
void incl(Register dst);
void incl(const Operand& dst); void incl(const Operand& dst);
void lea(Register dst, const Operand& src); void lea(Register dst, const Operand& src);
......
...@@ -4769,8 +4769,8 @@ void DeferredSearchCache::Generate() { ...@@ -4769,8 +4769,8 @@ void DeferredSearchCache::Generate() {
__ cmpq(ArrayElement(cache_, dst_), key_); __ cmpq(ArrayElement(cache_, dst_), key_);
__ j(not_equal, &first_loop); __ j(not_equal, &first_loop);
__ Integer32ToSmi(scratch_, dst_); __ Integer32ToSmiField(
__ movq(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), scratch_); FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
__ movq(dst_, ArrayElement(cache_, dst_, 1)); __ movq(dst_, ArrayElement(cache_, dst_, 1));
__ jmp(exit_label()); __ jmp(exit_label());
...@@ -4791,8 +4791,8 @@ void DeferredSearchCache::Generate() { ...@@ -4791,8 +4791,8 @@ void DeferredSearchCache::Generate() {
__ cmpq(ArrayElement(cache_, dst_), key_); __ cmpq(ArrayElement(cache_, dst_), key_);
__ j(not_equal, &second_loop); __ j(not_equal, &second_loop);
__ Integer32ToSmi(scratch_, dst_); __ Integer32ToSmiField(
__ movq(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), scratch_); FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
__ movq(dst_, ArrayElement(cache_, dst_, 1)); __ movq(dst_, ArrayElement(cache_, dst_, 1));
__ jmp(exit_label()); __ jmp(exit_label());
...@@ -4814,50 +4814,50 @@ void DeferredSearchCache::Generate() { ...@@ -4814,50 +4814,50 @@ void DeferredSearchCache::Generate() {
// cache miss this optimization would hardly matter much. // cache miss this optimization would hardly matter much.
// Check if we could add new entry to cache. // Check if we could add new entry to cache.
__ movq(rbx, FieldOperand(rcx, FixedArray::kLengthOffset)); __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset)); __ SmiToInteger32(r9,
__ SmiCompare(rbx, r9); FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
__ cmpl(rbx, r9);
__ j(greater, &add_new_entry); __ j(greater, &add_new_entry);
// Check if we could evict entry after finger. // Check if we could evict entry after finger.
__ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset)); __ SmiToInteger32(rdx,
__ SmiToInteger32(rdx, rdx); FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
__ SmiToInteger32(rbx, rbx); __ addl(rdx, kEntrySizeImm);
__ addq(rdx, kEntrySizeImm);
Label forward; Label forward;
__ cmpq(rbx, rdx); __ cmpl(rbx, rdx);
__ j(greater, &forward); __ j(greater, &forward);
// Need to wrap over the cache. // Need to wrap over the cache.
__ movl(rdx, kEntriesIndexImm); __ movl(rdx, kEntriesIndexImm);
__ bind(&forward); __ bind(&forward);
__ Integer32ToSmi(r9, rdx); __ movl(r9, rdx);
__ jmp(&update_cache); __ jmp(&update_cache);
__ bind(&add_new_entry); __ bind(&add_new_entry);
// r9 holds cache size as smi. // r9 holds cache size as int32.
__ SmiToInteger32(rdx, r9); __ leal(rbx, Operand(r9, JSFunctionResultCache::kEntrySize));
__ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize)); __ Integer32ToSmiField(
__ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx); FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
// Update the cache itself. // Update the cache itself.
// rdx holds the index as int. // r9 holds the index as int32.
// r9 holds the index as smi.
__ bind(&update_cache); __ bind(&update_cache);
__ pop(rbx); // restore the key __ pop(rbx); // restore the key
__ movq(FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9); __ Integer32ToSmiField(
FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
// Store key. // Store key.
__ movq(ArrayElement(rcx, rdx), rbx); __ movq(ArrayElement(rcx, r9), rbx);
__ RecordWrite(rcx, 0, rbx, r9); __ RecordWrite(rcx, 0, rbx, r9);
// Store value. // Store value.
__ pop(rcx); // restore the cache. __ pop(rcx); // restore the cache.
__ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset)); __ SmiToInteger32(rdx,
__ SmiAddConstant(rdx, rdx, Smi::FromInt(1)); FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
__ movq(r9, rdx); __ incl(rdx);
__ SmiToInteger32(rdx, rdx); // Backup rax, because the RecordWrite macro clobbers its arguments.
__ movq(rbx, rax); __ movq(rbx, rax);
__ movq(ArrayElement(rcx, rdx), rbx); __ movq(ArrayElement(rcx, rdx), rax);
__ RecordWrite(rcx, 0, rbx, r9); __ RecordWrite(rcx, 0, rbx, rdx);
if (!dst_.is(rax)) { if (!dst_.is(rax)) {
__ movq(dst_, rax); __ movq(dst_, rax);
...@@ -8551,18 +8551,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { ...@@ -8551,18 +8551,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// rcx: RegExp data (FixedArray) // rcx: RegExp data (FixedArray)
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
__ movq(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset)); __ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
__ SmiCompare(rbx, Smi::FromInt(JSRegExp::IRREGEXP)); __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
__ j(not_equal, &runtime); __ j(not_equal, &runtime);
// rcx: RegExp data (FixedArray) // rcx: RegExp data (FixedArray)
// Check that the number of captures fit in the static offsets vector buffer. // Check that the number of captures fit in the static offsets vector buffer.
__ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset)); __ SmiToInteger32(rdx,
FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2. // Calculate number of capture registers (number_of_captures + 1) * 2.
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1); __ leal(rdx, Operand(rdx, rdx, times_1, 2));
__ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
// Check that the static offsets vector buffer is large enough. // Check that the static offsets vector buffer is large enough.
__ cmpq(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize)); __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
__ j(above, &runtime); __ j(above, &runtime);
// rcx: RegExp data (FixedArray) // rcx: RegExp data (FixedArray)
...@@ -8572,17 +8572,15 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { ...@@ -8572,17 +8572,15 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(rax, &runtime); __ JumpIfSmi(rax, &runtime);
Condition is_string = masm->IsObjectStringType(rax, rbx, rbx); Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
__ j(NegateCondition(is_string), &runtime); __ j(NegateCondition(is_string), &runtime);
// Get the length of the string to rbx.
__ movq(rbx, FieldOperand(rax, String::kLengthOffset));
// rbx: Length of subject string as smi // rax: Subject string.
// rcx: RegExp data (FixedArray) // rcx: RegExp data (FixedArray).
// rdx: Number of capture registers // rdx: Number of capture registers.
// Check that the third argument is a positive smi less than the string // Check that the third argument is a positive smi less than the string
// length. A negative value will be greater (unsigned comparison). // length. A negative value will be greater (unsigned comparison).
__ movq(rax, Operand(rsp, kPreviousIndexOffset)); __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
__ JumpIfNotSmi(rax, &runtime); __ JumpIfNotSmi(rbx, &runtime);
__ SmiCompare(rax, rbx); __ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset));
__ j(above_equal, &runtime); __ j(above_equal, &runtime);
// rcx: RegExp data (FixedArray) // rcx: RegExp data (FixedArray)
...@@ -8600,8 +8598,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { ...@@ -8600,8 +8598,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the last match info has space for the capture registers and the // Check that the last match info has space for the capture registers and the
// additional information. Ensure no overflow in add. // additional information. Ensure no overflow in add.
ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset); ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
__ movq(rax, FieldOperand(rbx, FixedArray::kLengthOffset)); __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
__ SmiToInteger32(rax, rax);
__ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead)); __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
__ cmpl(rdx, rax); __ cmpl(rdx, rax);
__ j(greater, &runtime); __ j(greater, &runtime);
...@@ -8674,8 +8671,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { ...@@ -8674,8 +8671,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// r12: code // r12: code
// Load used arguments before starting to push arguments for call to native // Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height. // RegExp code to avoid handling changing stack height.
__ movq(rbx, Operand(rsp, kPreviousIndexOffset)); __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
__ SmiToInteger64(rbx, rbx); // Previous index from smi.
// rax: subject string // rax: subject string
// rbx: previous index // rbx: previous index
...@@ -8787,10 +8783,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { ...@@ -8787,10 +8783,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&success); __ bind(&success);
__ movq(rax, Operand(rsp, kJSRegExpOffset)); __ movq(rax, Operand(rsp, kJSRegExpOffset));
__ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset)); __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
__ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset)); __ SmiToInteger32(rax,
FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2. // Calculate number of capture registers (number_of_captures + 1) * 2.
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1); __ leal(rdx, Operand(rax, rax, times_1, 2));
__ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
// rdx: Number of capture registers // rdx: Number of capture registers
// Load last_match_info which is still known to be a fast case JSArray. // Load last_match_info which is still known to be a fast case JSArray.
...@@ -8833,7 +8829,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { ...@@ -8833,7 +8829,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
rdx, rdx,
times_pointer_size, times_pointer_size,
RegExpImpl::kFirstCaptureOffset), RegExpImpl::kFirstCaptureOffset),
rdi); rdi);
__ jmp(&next_capture); __ jmp(&next_capture);
__ bind(&done); __ bind(&done);
...@@ -8877,9 +8873,9 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, ...@@ -8877,9 +8873,9 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// Make the hash mask from the length of the number string cache. It // Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry. // contains two elements (number and string) for each cache entry.
__ movq(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); __ SmiToInteger32(
// Divide smi tagged length by two. mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
__ PositiveSmiDivPowerOfTwoToInteger32(mask, mask, 1); __ shrl(mask, Immediate(1));
__ subq(mask, Immediate(1)); // Make mask. __ subq(mask, Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the // Calculate the entry in the number string cache. The hash value in the
...@@ -8916,8 +8912,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, ...@@ -8916,8 +8912,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
} }
__ bind(&is_smi); __ bind(&is_smi);
__ movq(scratch, object); __ SmiToInteger32(scratch, object);
__ SmiToInteger32(scratch, scratch);
GenerateConvertHashCodeToIndex(masm, scratch, mask); GenerateConvertHashCodeToIndex(masm, scratch, mask);
Register index = scratch; Register index = scratch;
...@@ -9344,29 +9339,30 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { ...@@ -9344,29 +9339,30 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ j(equal, &adaptor_frame); __ j(equal, &adaptor_frame);
// Get the length from the frame. // Get the length from the frame.
__ movq(rcx, Operand(rsp, 1 * kPointerSize)); __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
__ jmp(&try_allocate); __ jmp(&try_allocate);
// Patch the arguments.length and the parameters pointer. // Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame); __ bind(&adaptor_frame);
__ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ SmiToInteger32(rcx,
__ movq(Operand(rsp, 1 * kPointerSize), rcx); Operand(rdx,
ArgumentsAdaptorFrameConstants::kLengthOffset));
// Space on stack must already hold a smi.
__ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
// Do not clobber the length index for the indexing operation since // Do not clobber the length index for the indexing operation since
// it is used compute the size for allocation later. // it is used compute the size for allocation later.
SmiIndex index = masm->SmiToIndex(rbx, rcx, kPointerSizeLog2); __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
__ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement));
__ movq(Operand(rsp, 2 * kPointerSize), rdx); __ movq(Operand(rsp, 2 * kPointerSize), rdx);
// Try the new space allocation. Start out with computing the size of // Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array. // the arguments object and the elements array.
Label add_arguments_object; Label add_arguments_object;
__ bind(&try_allocate); __ bind(&try_allocate);
__ testq(rcx, rcx); __ testl(rcx, rcx);
__ j(zero, &add_arguments_object); __ j(zero, &add_arguments_object);
index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2); __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
__ lea(rcx, Operand(index.reg, index.scale, FixedArray::kHeaderSize));
__ bind(&add_arguments_object); __ bind(&add_arguments_object);
__ addq(rcx, Immediate(Heap::kArgumentsObjectSize)); __ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
// Do the allocation of both objects in one go. // Do the allocation of both objects in one go.
__ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT); __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
...@@ -9378,10 +9374,13 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { ...@@ -9378,10 +9374,13 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ movq(rdi, Operand(rdi, offset)); __ movq(rdi, Operand(rdi, offset));
// Copy the JS object part. // Copy the JS object part.
for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
__ movq(kScratchRegister, FieldOperand(rdi, i)); __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
__ movq(FieldOperand(rax, i), kScratchRegister); __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
} __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
__ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
__ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
__ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
// Setup the callee in-object property. // Setup the callee in-object property.
ASSERT(Heap::arguments_callee_index == 0); ASSERT(Heap::arguments_callee_index == 0);
...@@ -9395,7 +9394,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { ...@@ -9395,7 +9394,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// If there are no actual arguments, we're done. // If there are no actual arguments, we're done.
Label done; Label done;
__ testq(rcx, rcx); __ SmiTest(rcx);
__ j(zero, &done); __ j(zero, &done);
// Get the parameters pointer from the stack and untag the length. // Get the parameters pointer from the stack and untag the length.
...@@ -9417,7 +9416,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { ...@@ -9417,7 +9416,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister); __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
__ addq(rdi, Immediate(kPointerSize)); __ addq(rdi, Immediate(kPointerSize));
__ subq(rdx, Immediate(kPointerSize)); __ subq(rdx, Immediate(kPointerSize));
__ decq(rcx); __ decl(rcx);
__ j(not_zero, &loop); __ j(not_zero, &loop);
// Return and remove the on-stack parameters. // Return and remove the on-stack parameters.
...@@ -10832,19 +10831,13 @@ void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { ...@@ -10832,19 +10831,13 @@ void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ push(rax); __ push(rax);
// Push this stub's key. // Push this stub's key.
__ movq(rax, Immediate(MinorKey())); __ Push(Smi::FromInt(MinorKey()));
__ Integer32ToSmi(rax, rax);
__ push(rax);
// Although the operation and the type info are encoded into the key, // Although the operation and the type info are encoded into the key,
// the encoding is opaque, so push them too. // the encoding is opaque, so push them too.
__ movq(rax, Immediate(op_)); __ Push(Smi::FromInt(op_));
__ Integer32ToSmi(rax, rax);
__ push(rax);
__ movq(rax, Immediate(runtime_operands_type_)); __ Push(Smi::FromInt(runtime_operands_type_));
__ Integer32ToSmi(rax, rax);
__ push(rax);
__ push(rcx); __ push(rcx);
......
...@@ -1013,6 +1013,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { ...@@ -1013,6 +1013,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ movq(FieldOperand(rbx, index2.reg, index2.scale, FixedArray::kHeaderSize), __ movq(FieldOperand(rbx, index2.reg, index2.scale, FixedArray::kHeaderSize),
rax); rax);
__ movq(rdx, rax); __ movq(rdx, rax);
__ SmiToInteger32(rcx, rcx);
__ RecordWriteNonSmi(rbx, 0, rdx, rcx); __ RecordWriteNonSmi(rbx, 0, rdx, rcx);
__ ret(0); __ ret(0);
} }
......
...@@ -96,8 +96,8 @@ void MacroAssembler::RecordWriteHelper(Register object, ...@@ -96,8 +96,8 @@ void MacroAssembler::RecordWriteHelper(Register object,
// Compute number of region covering addr. See Page::GetRegionNumberForAddress // Compute number of region covering addr. See Page::GetRegionNumberForAddress
// method for more details. // method for more details.
and_(addr, Immediate(Page::kPageAlignmentMask));
shrl(addr, Immediate(Page::kRegionSizeLog2)); shrl(addr, Immediate(Page::kRegionSizeLog2));
andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2));
// Set dirty mark for region. // Set dirty mark for region.
bts(Operand(object, Page::kDirtyFlagOffset), addr); bts(Operand(object, Page::kDirtyFlagOffset), addr);
...@@ -106,25 +106,25 @@ void MacroAssembler::RecordWriteHelper(Register object, ...@@ -106,25 +106,25 @@ void MacroAssembler::RecordWriteHelper(Register object,
// For page containing |object| mark region covering [object+offset] dirty. // For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored. // object is the object being stored into, value is the object being stored.
// If offset is zero, then the smi_index register contains the array index into // If offset is zero, then the index register contains the array index into
// the elements array represented as a smi. Otherwise it can be used as a // the elements array represented a zero extended int32. Otherwise it can be
// scratch register. // used as a scratch register.
// All registers are clobbered by the operation. // All registers are clobbered by the operation.
void MacroAssembler::RecordWrite(Register object, void MacroAssembler::RecordWrite(Register object,
int offset, int offset,
Register value, Register value,
Register smi_index) { Register index) {
// The compiled code assumes that record write doesn't change the // The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered // context register, so we check that none of the clobbered
// registers are rsi. // registers are rsi.
ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi)); ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
// First, check if a write barrier is even needed. The tests below // First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen. // catch stores of Smis and stores into young gen.
Label done; Label done;
JumpIfSmi(value, &done); JumpIfSmi(value, &done);
RecordWriteNonSmi(object, offset, value, smi_index); RecordWriteNonSmi(object, offset, value, index);
bind(&done); bind(&done);
// Clobber all input registers when running with the debug-code flag // Clobber all input registers when running with the debug-code flag
...@@ -135,7 +135,7 @@ void MacroAssembler::RecordWrite(Register object, ...@@ -135,7 +135,7 @@ void MacroAssembler::RecordWrite(Register object,
if (FLAG_debug_code) { if (FLAG_debug_code) {
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
} }
} }
...@@ -143,7 +143,7 @@ void MacroAssembler::RecordWrite(Register object, ...@@ -143,7 +143,7 @@ void MacroAssembler::RecordWrite(Register object,
void MacroAssembler::RecordWriteNonSmi(Register object, void MacroAssembler::RecordWriteNonSmi(Register object,
int offset, int offset,
Register scratch, Register scratch,
Register smi_index) { Register index) {
Label done; Label done;
if (FLAG_debug_code) { if (FLAG_debug_code) {
...@@ -151,6 +151,16 @@ void MacroAssembler::RecordWriteNonSmi(Register object, ...@@ -151,6 +151,16 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
JumpIfNotSmi(object, &okay); JumpIfNotSmi(object, &okay);
Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis"); Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
bind(&okay); bind(&okay);
if (offset == 0) {
// index must be int32.
Register tmp = index.is(rax) ? rbx : rax;
push(tmp);
movl(tmp, index);
cmpq(tmp, index);
Check(equal, "Index register for RecordWrite must be untagged int32.");
pop(tmp);
}
} }
// Test that the object address is not in the new space. We cannot // Test that the object address is not in the new space. We cannot
...@@ -163,16 +173,15 @@ void MacroAssembler::RecordWriteNonSmi(Register object, ...@@ -163,16 +173,15 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
ASSERT(IsAligned(offset, kPointerSize) || ASSERT(IsAligned(offset, kPointerSize) ||
IsAligned(offset + kHeapObjectTag, kPointerSize)); IsAligned(offset + kHeapObjectTag, kPointerSize));
Register dst = smi_index; Register dst = index;
if (offset != 0) { if (offset != 0) {
lea(dst, Operand(object, offset)); lea(dst, Operand(object, offset));
} else { } else {
// array access: calculate the destination address in the same manner as // array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric. // KeyedStoreIC::GenerateGeneric.
SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
lea(dst, FieldOperand(object, lea(dst, FieldOperand(object,
index.reg, index,
index.scale, times_pointer_size,
FixedArray::kHeaderSize)); FixedArray::kHeaderSize));
} }
RecordWriteHelper(object, dst, scratch); RecordWriteHelper(object, dst, scratch);
...@@ -184,7 +193,7 @@ void MacroAssembler::RecordWriteNonSmi(Register object, ...@@ -184,7 +193,7 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
if (FLAG_debug_code) { if (FLAG_debug_code) {
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE); movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
} }
} }
...@@ -485,6 +494,23 @@ void MacroAssembler::Integer32ToSmi(Register dst, ...@@ -485,6 +494,23 @@ void MacroAssembler::Integer32ToSmi(Register dst,
} }
void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
if (FLAG_debug_code) {
testb(dst, Immediate(0x01));
Label ok;
j(zero, &ok);
if (allow_stub_calls()) {
Abort("Integer32ToSmiField writing to non-smi location");
} else {
int3();
}
bind(&ok);
}
ASSERT(kSmiShift % kBitsPerByte == 0);
movl(Operand(dst, kSmiShift / kBitsPerByte), src);
}
void MacroAssembler::Integer64PlusConstantToSmi(Register dst, void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
Register src, Register src,
int constant) { int constant) {
...@@ -520,6 +546,11 @@ void MacroAssembler::SmiToInteger64(Register dst, Register src) { ...@@ -520,6 +546,11 @@ void MacroAssembler::SmiToInteger64(Register dst, Register src) {
} }
void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
}
void MacroAssembler::SmiTest(Register src) { void MacroAssembler::SmiTest(Register src) {
testq(src, src); testq(src, src);
} }
...@@ -556,6 +587,11 @@ void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) { ...@@ -556,6 +587,11 @@ void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
} }
void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
}
void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst, void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
Register src, Register src,
int power) { int power) {
......
...@@ -203,6 +203,9 @@ class MacroAssembler: public Assembler { ...@@ -203,6 +203,9 @@ class MacroAssembler: public Assembler {
// NOTICE: Destroys the dst register even if unsuccessful! // NOTICE: Destroys the dst register even if unsuccessful!
void Integer32ToSmi(Register dst, Register src, Label* on_overflow); void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
// Stores an integer32 value into a memory field that already holds a smi.
void Integer32ToSmiField(const Operand& dst, Register src);
// Adds constant to src and tags the result as a smi. // Adds constant to src and tags the result as a smi.
// Result must be a valid smi. // Result must be a valid smi.
void Integer64PlusConstantToSmi(Register dst, Register src, int constant); void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
...@@ -214,6 +217,7 @@ class MacroAssembler: public Assembler { ...@@ -214,6 +217,7 @@ class MacroAssembler: public Assembler {
// Convert smi to 64-bit integer (sign extended if necessary). // Convert smi to 64-bit integer (sign extended if necessary).
void SmiToInteger64(Register dst, Register src); void SmiToInteger64(Register dst, Register src);
void SmiToInteger64(Register dst, const Operand& src);
// Multiply a positive smi's integer value by a power of two. // Multiply a positive smi's integer value by a power of two.
// Provides result as 64-bit integer value. // Provides result as 64-bit integer value.
...@@ -234,6 +238,8 @@ class MacroAssembler: public Assembler { ...@@ -234,6 +238,8 @@ class MacroAssembler: public Assembler {
void SmiCompare(Register dst, const Operand& src); void SmiCompare(Register dst, const Operand& src);
void SmiCompare(const Operand& dst, Register src); void SmiCompare(const Operand& dst, Register src);
void SmiCompare(const Operand& dst, Smi* src); void SmiCompare(const Operand& dst, Smi* src);
// Compare the int32 in src register to the value of the smi stored at dst.
void SmiCompareInteger32(const Operand& dst, Register src);
// Sets sign and zero flags depending on value of smi in register. // Sets sign and zero flags depending on value of smi in register.
void SmiTest(Register src); void SmiTest(Register src);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment