Commit d300350f authored by Sigurd Schneider's avatar Sigurd Schneider Committed by Commit Bot

Reland "Enable alignment checks when reading object fields"

This is a reland of 5ce68669

TBR=ishell@chromium.org

Original change's description:
> Enable alignment checks when reading object fields
>
> Drive-by: Fix alignment bugs caused by DCHECKS.
>
> Bug: v8:9264
>
> Change-Id: I0836b1d08fea2ce11d8f7929e12f303b6ae06efe
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1617676
> Commit-Queue: Sigurd Schneider <sigurds@chromium.org>
> Reviewed-by: Igor Sheludko <ishell@chromium.org>
> Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#61643}

Bug: v8:9264
Change-Id: Ice9b819cc29eec0c341f16ef35fad4867f5df85b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1619754Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Reviewed-by: 's avatarJakob Kummerow <jkummerow@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Sigurd Schneider <sigurds@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61664}
parent 36634adc
...@@ -5690,7 +5690,7 @@ void VerifySmisVisitor::VisitRootPointers(Root root, const char* description, ...@@ -5690,7 +5690,7 @@ void VerifySmisVisitor::VisitRootPointers(Root root, const char* description,
} }
} }
bool Heap::AllowedToBeMigrated(HeapObject obj, AllocationSpace dst) { bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
// Object migration is governed by the following rules: // Object migration is governed by the following rules:
// //
// 1) Objects in new-space can be migrated to the old space // 1) Objects in new-space can be migrated to the old space
...@@ -5703,8 +5703,8 @@ bool Heap::AllowedToBeMigrated(HeapObject obj, AllocationSpace dst) { ...@@ -5703,8 +5703,8 @@ bool Heap::AllowedToBeMigrated(HeapObject obj, AllocationSpace dst) {
// //
// Since this function is used for debugging only, we do not place // Since this function is used for debugging only, we do not place
// asserts here, but check everything explicitly. // asserts here, but check everything explicitly.
if (obj->map() == ReadOnlyRoots(this).one_pointer_filler_map()) return false; if (map == ReadOnlyRoots(this).one_pointer_filler_map()) return false;
InstanceType type = obj->map()->instance_type(); InstanceType type = map->instance_type();
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj); MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
AllocationSpace src = chunk->owner()->identity(); AllocationSpace src = chunk->owner()->identity();
switch (src) { switch (src) {
......
...@@ -464,7 +464,7 @@ class Heap { ...@@ -464,7 +464,7 @@ class Heap {
// Checks whether the given object is allowed to be migrated from it's // Checks whether the given object is allowed to be migrated from it's
// current space into the given destination space. Used for debugging. // current space into the given destination space. Used for debugging.
bool AllowedToBeMigrated(HeapObject object, AllocationSpace dest); bool AllowedToBeMigrated(Map map, HeapObject object, AllocationSpace dest);
void CheckHandleCount(); void CheckHandleCount();
......
...@@ -1231,7 +1231,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor { ...@@ -1231,7 +1231,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
HeapObject src, int size, AllocationSpace dest) { HeapObject src, int size, AllocationSpace dest) {
Address dst_addr = dst->address(); Address dst_addr = dst->address();
Address src_addr = src->address(); Address src_addr = src->address();
DCHECK(base->heap_->AllowedToBeMigrated(src, dest)); DCHECK(base->heap_->AllowedToBeMigrated(src->map(), src, dest));
DCHECK_NE(dest, LO_SPACE); DCHECK_NE(dest, LO_SPACE);
DCHECK_NE(dest, CODE_LO_SPACE); DCHECK_NE(dest, CODE_LO_SPACE);
if (dest == OLD_SPACE) { if (dest == OLD_SPACE) {
......
...@@ -135,7 +135,7 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject( ...@@ -135,7 +135,7 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value || static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
std::is_same<THeapObjectSlot, HeapObjectSlot>::value, std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
"Only FullHeapObjectSlot and HeapObjectSlot are expected here"); "Only FullHeapObjectSlot and HeapObjectSlot are expected here");
DCHECK(heap()->AllowedToBeMigrated(object, NEW_SPACE)); DCHECK(heap()->AllowedToBeMigrated(map, object, NEW_SPACE));
AllocationAlignment alignment = HeapObject::RequiredAlignment(map); AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation = AllocationResult allocation =
allocator_.Allocate(NEW_SPACE, object_size, alignment); allocator_.Allocate(NEW_SPACE, object_size, alignment);
......
...@@ -463,7 +463,6 @@ void Scavenger::Process(OneshotBarrier* barrier) { ...@@ -463,7 +463,6 @@ void Scavenger::Process(OneshotBarrier* barrier) {
struct PromotionListEntry entry; struct PromotionListEntry entry;
while (promotion_list_.Pop(&entry)) { while (promotion_list_.Pop(&entry)) {
HeapObject target = entry.heap_object; HeapObject target = entry.heap_object;
DCHECK(!target->IsMap());
IterateAndScavengePromotedObject(target, entry.map, entry.size); IterateAndScavengePromotedObject(target, entry.map, entry.size);
done = false; done = false;
if (have_barrier && ((++objects % kInterruptThreshold) == 0)) { if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
......
...@@ -66,8 +66,14 @@ CAST_ACCESSOR(WeakArrayList) ...@@ -66,8 +66,14 @@ CAST_ACCESSOR(WeakArrayList)
SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset) SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
int FixedArrayBase::length() const { int FixedArrayBase::length() const {
DCHECK(!IsInRange(map()->instance_type(), FIRST_FIXED_TYPED_ARRAY_TYPE, #ifdef DEBUG
LAST_FIXED_TYPED_ARRAY_TYPE)); // Only read the map word once to avoid race with evacuator.
MapWord mw = map_word();
if (!mw.IsForwardingAddress()) {
DCHECK(!IsInRange(mw.ToMap()->instance_type(), FIRST_FIXED_TYPED_ARRAY_TYPE,
LAST_FIXED_TYPED_ARRAY_TYPE));
}
#endif
Object value = READ_FIELD(*this, kLengthOffset); Object value = READ_FIELD(*this, kLengthOffset);
return Smi::ToInt(value); return Smi::ToInt(value);
} }
...@@ -360,7 +366,8 @@ uint64_t FixedDoubleArray::get_representation(int index) { ...@@ -360,7 +366,8 @@ uint64_t FixedDoubleArray::get_representation(int index) {
map() != GetReadOnlyRoots().fixed_array_map()); map() != GetReadOnlyRoots().fixed_array_map());
DCHECK(index >= 0 && index < this->length()); DCHECK(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kDoubleSize; int offset = kHeaderSize + index * kDoubleSize;
return READ_UINT64_FIELD(*this, offset); // Bug(v8:8875): Doubles may be unaligned.
return ReadUnalignedValue<uint64_t>(FIELD_ADDR(*this, offset));
} }
Handle<Object> FixedDoubleArray::get(FixedDoubleArray array, int index, Handle<Object> FixedDoubleArray::get(FixedDoubleArray array, int index,
......
...@@ -32,7 +32,8 @@ void HeapNumberBase::set_value(double value) { ...@@ -32,7 +32,8 @@ void HeapNumberBase::set_value(double value) {
} }
uint64_t HeapNumberBase::value_as_bits() const { uint64_t HeapNumberBase::value_as_bits() const {
return READ_UINT64_FIELD(*this, kValueOffset); // Bug(v8:8875): HeapNumber's double may be unaligned.
return ReadUnalignedValue<uint64_t>(FIELD_ADDR(*this, kValueOffset));
} }
void HeapNumberBase::set_value_as_bits(uint64_t bits) { void HeapNumberBase::set_value_as_bits(uint64_t bits) {
......
...@@ -336,13 +336,14 @@ ...@@ -336,13 +336,14 @@
} \ } \
} while (false) } while (false)
#define READ_DOUBLE_FIELD(p, offset) ReadDoubleValue(FIELD_ADDR(p, offset)) // BUG(v8:8875): Double fields may be unaligned.
#define READ_DOUBLE_FIELD(p, offset) \
ReadUnalignedValue<double>(FIELD_ADDR(p, offset))
#define WRITE_DOUBLE_FIELD(p, offset, value) \ #define WRITE_DOUBLE_FIELD(p, offset, value) \
WriteDoubleValue(FIELD_ADDR(p, offset), value) WriteUnalignedValue<double>(FIELD_ADDR(p, offset), value)
#define READ_INT_FIELD(p, offset) \ #define READ_INT_FIELD(p, offset) (Memory<const int>(FIELD_ADDR(p, offset)))
(*reinterpret_cast<const int*>(FIELD_ADDR(p, offset)))
#define WRITE_INT_FIELD(p, offset, value) \ #define WRITE_INT_FIELD(p, offset, value) \
(*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value) (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
...@@ -352,7 +353,7 @@ ...@@ -352,7 +353,7 @@
reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset)))) reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset))))
#define READ_UINT8_FIELD(p, offset) \ #define READ_UINT8_FIELD(p, offset) \
(*reinterpret_cast<const uint8_t*>(FIELD_ADDR(p, offset))) (Memory<const uint8_t>(FIELD_ADDR(p, offset)))
#define WRITE_UINT8_FIELD(p, offset, value) \ #define WRITE_UINT8_FIELD(p, offset, value) \
(*reinterpret_cast<uint8_t*>(FIELD_ADDR(p, offset)) = value) (*reinterpret_cast<uint8_t*>(FIELD_ADDR(p, offset)) = value)
...@@ -361,8 +362,7 @@ ...@@ -361,8 +362,7 @@
base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \ base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic8>(value)); static_cast<base::Atomic8>(value));
#define READ_INT8_FIELD(p, offset) \ #define READ_INT8_FIELD(p, offset) (Memory<const int8_t>(FIELD_ADDR(p, offset)))
(*reinterpret_cast<const int8_t*>(FIELD_ADDR(p, offset)))
#define RELAXED_READ_INT8_FIELD(p, offset) \ #define RELAXED_READ_INT8_FIELD(p, offset) \
static_cast<int8_t>(base::Relaxed_Load( \ static_cast<int8_t>(base::Relaxed_Load( \
...@@ -372,13 +372,13 @@ ...@@ -372,13 +372,13 @@
(*reinterpret_cast<int8_t*>(FIELD_ADDR(p, offset)) = value) (*reinterpret_cast<int8_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_UINT16_FIELD(p, offset) \ #define READ_UINT16_FIELD(p, offset) \
(*reinterpret_cast<const uint16_t*>(FIELD_ADDR(p, offset))) (Memory<const uint16_t>(FIELD_ADDR(p, offset)))
#define WRITE_UINT16_FIELD(p, offset, value) \ #define WRITE_UINT16_FIELD(p, offset, value) \
(*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value) (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_INT16_FIELD(p, offset) \ #define READ_INT16_FIELD(p, offset) \
(*reinterpret_cast<const int16_t*>(FIELD_ADDR(p, offset))) (Memory<const int16_t>(FIELD_ADDR(p, offset)))
#define WRITE_INT16_FIELD(p, offset, value) \ #define WRITE_INT16_FIELD(p, offset, value) \
(*reinterpret_cast<int16_t*>(FIELD_ADDR(p, offset)) = value) (*reinterpret_cast<int16_t*>(FIELD_ADDR(p, offset)) = value)
...@@ -392,8 +392,7 @@ ...@@ -392,8 +392,7 @@
reinterpret_cast<base::Atomic16*>(FIELD_ADDR(p, offset)), \ reinterpret_cast<base::Atomic16*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic16>(value)); static_cast<base::Atomic16>(value));
#define READ_UINT32_FIELD(p, offset) \ #define READ_UINT32_FIELD(p, offset) (Memory<uint32_t>(FIELD_ADDR(p, offset)))
(*reinterpret_cast<const uint32_t*>(FIELD_ADDR(p, offset)))
#define RELAXED_READ_UINT32_FIELD(p, offset) \ #define RELAXED_READ_UINT32_FIELD(p, offset) \
static_cast<uint32_t>(base::Relaxed_Load( \ static_cast<uint32_t>(base::Relaxed_Load( \
...@@ -407,8 +406,7 @@ ...@@ -407,8 +406,7 @@
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \ reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic32>(value)); static_cast<base::Atomic32>(value));
#define READ_INT32_FIELD(p, offset) \ #define READ_INT32_FIELD(p, offset) (Memory<int32_t>(FIELD_ADDR(p, offset)))
(*reinterpret_cast<const int32_t*>(FIELD_ADDR(p, offset)))
#define RELAXED_READ_INT32_FIELD(p, offset) \ #define RELAXED_READ_INT32_FIELD(p, offset) \
static_cast<int32_t>(base::Relaxed_Load( \ static_cast<int32_t>(base::Relaxed_Load( \
...@@ -427,8 +425,7 @@ ...@@ -427,8 +425,7 @@
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \ reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic32>(value)); static_cast<base::Atomic32>(value));
#define READ_FLOAT_FIELD(p, offset) \ #define READ_FLOAT_FIELD(p, offset) (Memory<const float>(FIELD_ADDR(p, offset)))
(*reinterpret_cast<const float*>(FIELD_ADDR(p, offset)))
#define WRITE_FLOAT_FIELD(p, offset, value) \ #define WRITE_FLOAT_FIELD(p, offset, value) \
(*reinterpret_cast<float*>(FIELD_ADDR(p, offset)) = value) (*reinterpret_cast<float*>(FIELD_ADDR(p, offset)) = value)
...@@ -459,28 +456,24 @@ ...@@ -459,28 +456,24 @@
#else // V8_COMPRESS_POINTERS #else // V8_COMPRESS_POINTERS
#define READ_INTPTR_FIELD(p, offset) \ #define READ_INTPTR_FIELD(p, offset) (Memory<intptr_t>(FIELD_ADDR(p, offset)))
(*reinterpret_cast<const intptr_t*>(FIELD_ADDR(p, offset)))
#define WRITE_INTPTR_FIELD(p, offset, value) \ #define WRITE_INTPTR_FIELD(p, offset, value) \
(*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value) (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_UINTPTR_FIELD(p, offset) \ #define READ_UINTPTR_FIELD(p, offset) (Memory<uintptr_t>(FIELD_ADDR(p, offset)))
(*reinterpret_cast<const uintptr_t*>(FIELD_ADDR(p, offset)))
#define WRITE_UINTPTR_FIELD(p, offset, value) \ #define WRITE_UINTPTR_FIELD(p, offset, value) \
(*reinterpret_cast<uintptr_t*>(FIELD_ADDR(p, offset)) = value) (*reinterpret_cast<uintptr_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_UINT64_FIELD(p, offset) \ #define READ_UINT64_FIELD(p, offset) (Memory<uint64_t>(FIELD_ADDR(p, offset)))
(*reinterpret_cast<const uint64_t*>(FIELD_ADDR(p, offset)))
#define WRITE_UINT64_FIELD(p, offset, value) \ #define WRITE_UINT64_FIELD(p, offset, value) \
(*reinterpret_cast<uint64_t*>(FIELD_ADDR(p, offset)) = value) (*reinterpret_cast<uint64_t*>(FIELD_ADDR(p, offset)) = value)
#endif // V8_COMPRESS_POINTERS #endif // V8_COMPRESS_POINTERS
#define READ_BYTE_FIELD(p, offset) \ #define READ_BYTE_FIELD(p, offset) (Memory<byte>(FIELD_ADDR(p, offset)))
(*reinterpret_cast<const byte*>(FIELD_ADDR(p, offset)))
#define RELAXED_READ_BYTE_FIELD(p, offset) \ #define RELAXED_READ_BYTE_FIELD(p, offset) \
static_cast<byte>(base::Relaxed_Load( \ static_cast<byte>(base::Relaxed_Load( \
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#ifndef V8_V8MEMORY_H_ #ifndef V8_V8MEMORY_H_
#define V8_V8MEMORY_H_ #define V8_V8MEMORY_H_
#include "src/base/macros.h"
#include "src/globals.h" #include "src/globals.h"
namespace v8 { namespace v8 {
...@@ -15,13 +16,12 @@ namespace internal { ...@@ -15,13 +16,12 @@ namespace internal {
// Note that this class currently relies on undefined behaviour. There is a // Note that this class currently relies on undefined behaviour. There is a
// proposal (http://wg21.link/p0593r2) to make it defined behaviour though. // proposal (http://wg21.link/p0593r2) to make it defined behaviour though.
template <class T> template <class T>
T& Memory(Address addr) { inline T& Memory(Address addr) {
// {addr} must be aligned. DCHECK(IsAligned(addr, alignof(T)));
DCHECK_EQ(0, addr & (alignof(T) - 1));
return *reinterpret_cast<T*>(addr); return *reinterpret_cast<T*>(addr);
} }
template <class T> template <class T>
T& Memory(byte* addr) { inline T& Memory(byte* addr) {
return Memory<T>(reinterpret_cast<Address>(addr)); return Memory<T>(reinterpret_cast<Address>(addr));
} }
...@@ -39,18 +39,6 @@ static inline void WriteUnalignedValue(Address p, V value) { ...@@ -39,18 +39,6 @@ static inline void WriteUnalignedValue(Address p, V value) {
memcpy(reinterpret_cast<void*>(p), &value, sizeof(V)); memcpy(reinterpret_cast<void*>(p), &value, sizeof(V));
} }
static inline double ReadFloatValue(Address p) {
return ReadUnalignedValue<float>(p);
}
static inline double ReadDoubleValue(Address p) {
return ReadUnalignedValue<double>(p);
}
static inline void WriteDoubleValue(Address p, double value) {
WriteUnalignedValue(p, value);
}
static inline uint16_t ReadUnalignedUInt16(Address p) { static inline uint16_t ReadUnalignedUInt16(Address p) {
return ReadUnalignedValue<uint16_t>(p); return ReadUnalignedValue<uint16_t>(p);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment