Commit 76833936 authored by whesse@chromium.org's avatar whesse@chromium.org

Make Array::kHeaderSize protected, and only use kHeaderSize of its subclasses.

Many X64 bugs have been due to the difference between (unaligned) Array::kHeaderSize and (aligned) FixedArray::kHeaderSize.
Review URL: http://codereview.chromium.org/155687

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@2502 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 21f44e8f
...@@ -2897,7 +2897,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { ...@@ -2897,7 +2897,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
__ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset)); __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
// Write to the indexed properties array. // Write to the indexed properties array.
int offset = i * kPointerSize + Array::kHeaderSize; int offset = i * kPointerSize + FixedArray::kHeaderSize;
__ str(r0, FieldMemOperand(r1, offset)); __ str(r0, FieldMemOperand(r1, offset));
// Update the write barrier for the array address. // Update the write barrier for the array address.
......
...@@ -91,14 +91,14 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, ...@@ -91,14 +91,14 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
__ b(ne, miss); __ b(ne, miss);
// Compute the capacity mask. // Compute the capacity mask.
const int kCapacityOffset = const int kCapacityOffset = StringDictionary::kHeaderSize +
Array::kHeaderSize + StringDictionary::kCapacityIndex * kPointerSize; StringDictionary::kCapacityIndex * kPointerSize;
__ ldr(r3, FieldMemOperand(t0, kCapacityOffset)); __ ldr(r3, FieldMemOperand(t0, kCapacityOffset));
__ mov(r3, Operand(r3, ASR, kSmiTagSize)); // convert smi to int __ mov(r3, Operand(r3, ASR, kSmiTagSize)); // convert smi to int
__ sub(r3, r3, Operand(1)); __ sub(r3, r3, Operand(1));
const int kElementsStartOffset = const int kElementsStartOffset = StringDictionary::kHeaderSize +
Array::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize; StringDictionary::kElementsStartIndex * kPointerSize;
// Generate an unrolled loop that performs a few probes before // Generate an unrolled loop that performs a few probes before
// giving up. Measurements done on Gmail indicate that 2 probes // giving up. Measurements done on Gmail indicate that 2 probes
...@@ -599,7 +599,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { ...@@ -599,7 +599,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Fast case: Do the load. // Fast case: Do the load.
__ bind(&fast); __ bind(&fast);
__ add(r3, r1, Operand(Array::kHeaderSize - kHeapObjectTag)); __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2)); __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2));
__ cmp(r0, Operand(Factory::the_hole_value())); __ cmp(r0, Operand(Factory::the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty // In case the loaded value is the_hole we have to consult GetProperty
...@@ -666,9 +666,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { ...@@ -666,9 +666,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Untag the key (for checking against untagged length in the fixed array). // Untag the key (for checking against untagged length in the fixed array).
__ mov(r1, Operand(r1, ASR, kSmiTagSize)); __ mov(r1, Operand(r1, ASR, kSmiTagSize));
// Compute address to store into and check array bounds. // Compute address to store into and check array bounds.
__ add(r2, r3, Operand(Array::kHeaderSize - kHeapObjectTag)); __ add(r2, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2)); __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2));
__ ldr(ip, FieldMemOperand(r3, Array::kLengthOffset)); __ ldr(ip, FieldMemOperand(r3, FixedArray::kLengthOffset));
__ cmp(r1, Operand(ip)); __ cmp(r1, Operand(ip));
__ b(lo, &fast); __ b(lo, &fast);
...@@ -696,7 +696,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { ...@@ -696,7 +696,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(r3, Operand(r2)); __ mov(r3, Operand(r2));
// NOTE: Computing the address to store into must take the fact // NOTE: Computing the address to store into must take the fact
// that the key has been incremented into account. // that the key has been incremented into account.
int displacement = Array::kHeaderSize - kHeapObjectTag - int displacement = FixedArray::kHeaderSize - kHeapObjectTag -
((1 << kSmiTagSize) * 2); ((1 << kSmiTagSize) * 2);
__ add(r2, r2, Operand(displacement)); __ add(r2, r2, Operand(displacement));
__ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
...@@ -721,7 +721,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { ...@@ -721,7 +721,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ cmp(r1, Operand(ip)); __ cmp(r1, Operand(ip));
__ b(hs, &extra); __ b(hs, &extra);
__ mov(r3, Operand(r2)); __ mov(r3, Operand(r2));
__ add(r2, r2, Operand(Array::kHeaderSize - kHeapObjectTag)); __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
......
...@@ -226,7 +226,7 @@ void MacroAssembler::RecordWrite(Register object, Register offset, ...@@ -226,7 +226,7 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
// Add the page header (including remembered set), array header, and array // Add the page header (including remembered set), array header, and array
// body size to the page address. // body size to the page address.
add(object, object, Operand(Page::kObjectStartOffset add(object, object, Operand(Page::kObjectStartOffset
+ Array::kHeaderSize)); + FixedArray::kHeaderSize));
add(object, object, Operand(scratch)); add(object, object, Operand(scratch));
bind(&fast); bind(&fast);
......
...@@ -1187,7 +1187,7 @@ bool Heap::CreateInitialMaps() { ...@@ -1187,7 +1187,7 @@ bool Heap::CreateInitialMaps() {
set_undetectable_long_ascii_string_map(Map::cast(obj)); set_undetectable_long_ascii_string_map(Map::cast(obj));
Map::cast(obj)->set_is_undetectable(); Map::cast(obj)->set_is_undetectable();
obj = AllocateMap(BYTE_ARRAY_TYPE, Array::kAlignedSize); obj = AllocateMap(BYTE_ARRAY_TYPE, ByteArray::kAlignedSize);
if (obj->IsFailure()) return false; if (obj->IsFailure()) return false;
set_byte_array_map(Map::cast(obj)); set_byte_array_map(Map::cast(obj));
......
...@@ -4351,7 +4351,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { ...@@ -4351,7 +4351,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
FieldOperand(elements.reg(), JSObject::kElementsOffset)); FieldOperand(elements.reg(), JSObject::kElementsOffset));
// Write to the indexed properties array. // Write to the indexed properties array.
int offset = i * kPointerSize + Array::kHeaderSize; int offset = i * kPointerSize + FixedArray::kHeaderSize;
__ mov(FieldOperand(elements.reg(), offset), prop_value.reg()); __ mov(FieldOperand(elements.reg(), offset), prop_value.reg());
// Update the write barrier for the array address. // Update the write barrier for the array address.
...@@ -6309,7 +6309,7 @@ void Reference::GetValue(TypeofState typeof_state) { ...@@ -6309,7 +6309,7 @@ void Reference::GetValue(TypeofState typeof_state) {
__ mov(index.reg(), key.reg()); __ mov(index.reg(), key.reg());
__ sar(index.reg(), kSmiTagSize); __ sar(index.reg(), kSmiTagSize);
__ cmp(index.reg(), __ cmp(index.reg(),
FieldOperand(elements.reg(), Array::kLengthOffset)); FieldOperand(elements.reg(), FixedArray::kLengthOffset));
deferred->Branch(above_equal); deferred->Branch(above_equal);
// Load and check that the result is not the hole. We could // Load and check that the result is not the hole. We could
...@@ -6323,7 +6323,7 @@ void Reference::GetValue(TypeofState typeof_state) { ...@@ -6323,7 +6323,7 @@ void Reference::GetValue(TypeofState typeof_state) {
__ mov(value.reg(), Operand(elements.reg(), __ mov(value.reg(), Operand(elements.reg(),
index.reg(), index.reg(),
times_4, times_4,
Array::kHeaderSize - kHeapObjectTag)); FixedArray::kHeaderSize - kHeapObjectTag));
elements.Unuse(); elements.Unuse();
index.Unuse(); index.Unuse();
__ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value())); __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
...@@ -6495,7 +6495,7 @@ void Reference::SetValue(InitState init_state) { ...@@ -6495,7 +6495,7 @@ void Reference::SetValue(InitState init_state) {
__ mov(Operand(tmp.reg(), __ mov(Operand(tmp.reg(),
key.reg(), key.reg(),
times_2, times_2,
Array::kHeaderSize - kHeapObjectTag), FixedArray::kHeaderSize - kHeapObjectTag),
value.reg()); value.reg());
__ IncrementCounter(&Counters::keyed_store_inline, 1); __ IncrementCounter(&Counters::keyed_store_inline, 1);
......
...@@ -89,7 +89,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label, ...@@ -89,7 +89,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
// Compute the capacity mask. // Compute the capacity mask.
const int kCapacityOffset = const int kCapacityOffset =
Array::kHeaderSize + StringDictionary::kCapacityIndex * kPointerSize; StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
__ mov(r2, FieldOperand(r0, kCapacityOffset)); __ mov(r2, FieldOperand(r0, kCapacityOffset));
__ shr(r2, kSmiTagSize); // convert smi to int __ shr(r2, kSmiTagSize); // convert smi to int
__ dec(r2); __ dec(r2);
...@@ -99,7 +100,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label, ...@@ -99,7 +100,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
// cover ~93% of loads from dictionaries. // cover ~93% of loads from dictionaries.
static const int kProbes = 4; static const int kProbes = 4;
const int kElementsStartOffset = const int kElementsStartOffset =
Array::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize; StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
for (int i = 0; i < kProbes; i++) { for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask. // Compute the masked index: (hash + i + i * i) & mask.
__ mov(r1, FieldOperand(name, String::kLengthOffset)); __ mov(r1, FieldOperand(name, String::kLengthOffset));
...@@ -266,7 +268,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { ...@@ -266,7 +268,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
Immediate(Factory::hash_table_map())); Immediate(Factory::hash_table_map()));
__ j(equal, &slow, not_taken); __ j(equal, &slow, not_taken);
// Check that the key (index) is within bounds. // Check that the key (index) is within bounds.
__ cmp(eax, FieldOperand(ecx, Array::kLengthOffset)); __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
__ j(below, &fast, taken); __ j(below, &fast, taken);
// Slow case: Load name and receiver from stack and jump to runtime. // Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow); __ bind(&slow);
...@@ -304,7 +306,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { ...@@ -304,7 +306,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ jmp(&index_int); __ jmp(&index_int);
// Fast case: Do the load. // Fast case: Do the load.
__ bind(&fast); __ bind(&fast);
__ mov(eax, Operand(ecx, eax, times_4, Array::kHeaderSize - kHeapObjectTag)); __ mov(eax,
Operand(ecx, eax, times_4, FixedArray::kHeaderSize - kHeapObjectTag));
__ cmp(Operand(eax), Immediate(Factory::the_hole_value())); __ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty // In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched. // to ensure the prototype chain is searched.
...@@ -422,7 +425,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { ...@@ -422,7 +425,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// eax: value // eax: value
// ecx: FixedArray // ecx: FixedArray
// ebx: index (as a smi) // ebx: index (as a smi)
__ mov(Operand(ecx, ebx, times_2, Array::kHeaderSize - kHeapObjectTag), eax); __ mov(Operand(ecx, ebx, times_2, FixedArray::kHeaderSize - kHeapObjectTag),
eax);
// Update write barrier for the elements array address. // Update write barrier for the elements array address.
__ mov(edx, Operand(eax)); __ mov(edx, Operand(eax));
__ RecordWrite(ecx, 0, edx, ebx); __ RecordWrite(ecx, 0, edx, ebx);
......
...@@ -79,7 +79,7 @@ static void RecordWriteHelper(MacroAssembler* masm, ...@@ -79,7 +79,7 @@ static void RecordWriteHelper(MacroAssembler* masm,
// Add the page header, array header, and array body size to the page // Add the page header, array header, and array body size to the page
// address. // address.
masm->add(Operand(object), Immediate(Page::kObjectStartOffset masm->add(Operand(object), Immediate(Page::kObjectStartOffset
+ Array::kHeaderSize)); + FixedArray::kHeaderSize));
masm->add(object, Operand(scratch)); masm->add(object, Operand(scratch));
...@@ -199,9 +199,10 @@ void MacroAssembler::RecordWrite(Register object, int offset, ...@@ -199,9 +199,10 @@ void MacroAssembler::RecordWrite(Register object, int offset,
lea(dst, Operand(object, offset)); lea(dst, Operand(object, offset));
} else { } else {
// array access: calculate the destination address in the same manner as // array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
lea(dst, // into an array of words.
Operand(object, dst, times_2, Array::kHeaderSize - kHeapObjectTag)); lea(dst, Operand(object, dst, times_2,
FixedArray::kHeaderSize - kHeapObjectTag));
} }
// If we are already generating a shared stub, not inlining the // If we are already generating a shared stub, not inlining the
// record write code isn't going to save us any memory. // record write code isn't going to save us any memory.
......
...@@ -1718,6 +1718,10 @@ class Array: public HeapObject { ...@@ -1718,6 +1718,10 @@ class Array: public HeapObject {
// Layout descriptor. // Layout descriptor.
static const int kLengthOffset = HeapObject::kHeaderSize; static const int kLengthOffset = HeapObject::kHeaderSize;
protected:
// No code should use the Array class directly, only its subclasses.
// Use the kHeaderSize of the appropriate subclass, which may be aligned.
static const int kHeaderSize = kLengthOffset + kIntSize; static const int kHeaderSize = kLengthOffset + kIntSize;
static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize); static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
...@@ -2427,6 +2431,10 @@ class ByteArray: public Array { ...@@ -2427,6 +2431,10 @@ class ByteArray: public Array {
void ByteArrayVerify(); void ByteArrayVerify();
#endif #endif
// ByteArray headers are not quadword aligned.
static const int kHeaderSize = Array::kHeaderSize;
static const int kAlignedSize = Array::kAlignedSize;
private: private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray); DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray);
}; };
......
...@@ -5321,6 +5321,7 @@ void Reference::GetValue(TypeofState typeof_state) { ...@@ -5321,6 +5321,7 @@ void Reference::GetValue(TypeofState typeof_state) {
// patch the map check if appropriate. // patch the map check if appropriate.
// TODO(x64): Implement inlined loads for keyed properties. // TODO(x64): Implement inlined loads for keyed properties.
// Make sure to load length field as a 32-bit quantity.
// Comment cmnt(masm, "[ Load from keyed Property"); // Comment cmnt(masm, "[ Load from keyed Property");
RelocInfo::Mode mode = is_global RelocInfo::Mode mode = is_global
......
...@@ -36,7 +36,7 @@ TEST(HeapMaps) { ...@@ -36,7 +36,7 @@ TEST(HeapMaps) {
InitializeVM(); InitializeVM();
CheckMap(Heap::meta_map(), MAP_TYPE, Map::kSize); CheckMap(Heap::meta_map(), MAP_TYPE, Map::kSize);
CheckMap(Heap::heap_number_map(), HEAP_NUMBER_TYPE, HeapNumber::kSize); CheckMap(Heap::heap_number_map(), HEAP_NUMBER_TYPE, HeapNumber::kSize);
CheckMap(Heap::fixed_array_map(), FIXED_ARRAY_TYPE, Array::kAlignedSize); CheckMap(Heap::fixed_array_map(), FIXED_ARRAY_TYPE, FixedArray::kHeaderSize);
CheckMap(Heap::long_string_map(), LONG_STRING_TYPE, CheckMap(Heap::long_string_map(), LONG_STRING_TYPE,
SeqTwoByteString::kAlignedSize); SeqTwoByteString::kAlignedSize);
} }
......
...@@ -86,7 +86,8 @@ TEST(Promotion) { ...@@ -86,7 +86,8 @@ TEST(Promotion) {
v8::HandleScope sc; v8::HandleScope sc;
// Allocate a fixed array in the new space. // Allocate a fixed array in the new space.
int array_size = (Heap::MaxObjectSizeInPagedSpace() - Array::kHeaderSize) / int array_size =
(Heap::MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
(kPointerSize * 4); (kPointerSize * 4);
Object* obj = Heap::AllocateFixedArray(array_size); Object* obj = Heap::AllocateFixedArray(array_size);
CHECK(!obj->IsFailure()); CHECK(!obj->IsFailure());
...@@ -118,7 +119,7 @@ TEST(NoPromotion) { ...@@ -118,7 +119,7 @@ TEST(NoPromotion) {
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE)); CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
// Allocate a big Fixed array in the new space. // Allocate a big Fixed array in the new space.
int size = (Heap::MaxObjectSizeInPagedSpace() - Array::kHeaderSize) / int size = (Heap::MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
kPointerSize; kPointerSize;
Object* obj = Heap::AllocateFixedArray(size); Object* obj = Heap::AllocateFixedArray(size);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment