Commit 0e11fbcd authored by lrn@chromium.org's avatar lrn@chromium.org

Removed unsafe optimization in RecordWrite.

Optimization was only unsafe if new-space was in the low half of memory and an object could be
located in the top half at an addressed that only differ from a new-space address by the high
bit.

Review URL: http://codereview.chromium.org/159784


git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2608 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent fd8b3769
......@@ -226,7 +226,9 @@ enum ScaleFactor {
times_1 = 0,
times_2 = 1,
times_4 = 2,
times_8 = 3
times_8 = 3,
times_pointer_size = times_4,
times_half_pointer_size = times_2
};
......
......@@ -146,43 +146,30 @@ void MacroAssembler::RecordWrite(Register object, int offset,
// for the remembered set bits.
Label done;
// This optimization cannot survive serialization and deserialization,
// so we disable as long as serialization can take place.
int32_t new_space_start =
reinterpret_cast<int32_t>(ExternalReference::new_space_start().address());
if (Serializer::enabled() || new_space_start < 0) {
// Cannot do smart bit-twiddling. Need to do two consecutive checks.
// Check for Smi first.
test(value, Immediate(kSmiTagMask));
j(zero, &done);
// Test that the object address is not in the new space. We cannot
// set remembered set bits in the new space.
// Skip barrier if writing a smi.
ASSERT_EQ(0, kSmiTag);
test(value, Immediate(kSmiTagMask));
j(zero, &done);
if (Serializer::enabled()) {
// Can't do arithmetic on external references if it might get serialized.
mov(value, Operand(object));
and_(value, Heap::NewSpaceMask());
cmp(Operand(value), Immediate(ExternalReference::new_space_start()));
j(equal, &done);
} else {
// move the value SmiTag into the sign bit
shl(value, 31);
// combine the object with value SmiTag
or_(value, Operand(object));
// remove the uninteresing bits inside the page
and_(value, Heap::NewSpaceMask() | (1 << 31));
// xor has two effects:
// - if the value was a smi, then the result will be negative
// - if the object is pointing into new space area the page bits will
// all be zero
xor_(value, new_space_start | (1 << 31));
// Check for both conditions in one branch
j(less_equal, &done);
int32_t new_space_start = reinterpret_cast<int32_t>(
ExternalReference::new_space_start().address());
lea(value, Operand(object, -new_space_start));
and_(value, Heap::NewSpaceMask());
j(equal, &done);
}
if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
// Compute the bit offset in the remembered set, leave it in 'value'.
mov(value, Operand(object));
lea(value, Operand(object, offset));
and_(value, Page::kPageAlignmentMask);
add(Operand(value), Immediate(offset));
shr(value, kObjectAlignmentBits);
shr(value, kPointerSizeLog2);
// Compute the page address from the heap object pointer, leave it in
// 'object'.
......@@ -192,7 +179,7 @@ void MacroAssembler::RecordWrite(Register object, int offset,
// to limit code size. We should probably evaluate this decision by
// measuring the performance of an equivalent implementation using
// "simpler" instructions
bts(Operand(object, 0), value);
bts(Operand(object, Page::kRSetOffset), value);
} else {
Register dst = scratch;
if (offset != 0) {
......@@ -201,7 +188,9 @@ void MacroAssembler::RecordWrite(Register object, int offset,
// array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
// into an array of words.
lea(dst, Operand(object, dst, times_2,
ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiTag);
lea(dst, Operand(object, dst, times_half_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
}
// If we are already generating a shared stub, not inlining the
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment