Commit e10e149b authored by Peter Marshall's avatar Peter Marshall Committed by Commit Bot

[debug] Convert some slow dchecks to dchecks

For bounds accesses, we definitely want to see these dchecks fire in
all configurations.

Change-Id: I54516794e32aaf0b00a2b1d88ffaf449f5a20b7b
Reviewed-on: https://chromium-review.googlesource.com/1109835
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#53923}
parent 1eef0262
...@@ -1882,9 +1882,9 @@ TNode<MaybeObject> CodeStubAssembler::LoadArrayElement( ...@@ -1882,9 +1882,9 @@ TNode<MaybeObject> CodeStubAssembler::LoadArrayElement(
SloppyTNode<HeapObject> array, int array_header_size, Node* index_node, SloppyTNode<HeapObject> array, int array_header_size, Node* index_node,
int additional_offset, ParameterMode parameter_mode, int additional_offset, ParameterMode parameter_mode,
LoadSensitivity needs_poisoning) { LoadSensitivity needs_poisoning) {
CSA_SLOW_ASSERT(this, IntPtrGreaterThanOrEqual( CSA_ASSERT(this, IntPtrGreaterThanOrEqual(
ParameterToIntPtr(index_node, parameter_mode), ParameterToIntPtr(index_node, parameter_mode),
IntPtrConstant(0))); IntPtrConstant(0)));
DCHECK_EQ(additional_offset % kPointerSize, 0); DCHECK_EQ(additional_offset % kPointerSize, 0);
int32_t header_size = array_header_size + additional_offset - kHeapObjectTag; int32_t header_size = array_header_size + additional_offset - kHeapObjectTag;
TNode<IntPtrT> offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS, TNode<IntPtrT> offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
...@@ -2228,11 +2228,11 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement( ...@@ -2228,11 +2228,11 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement(
STATIC_ASSERT(FixedArrayBase::kLengthOffset == WeakFixedArray::kLengthOffset); STATIC_ASSERT(FixedArrayBase::kLengthOffset == WeakFixedArray::kLengthOffset);
// Check that index_node + additional_offset <= object.length. // Check that index_node + additional_offset <= object.length.
// TODO(cbruni): Use proper LoadXXLength helpers // TODO(cbruni): Use proper LoadXXLength helpers
CSA_SLOW_ASSERT( CSA_ASSERT(this,
this, IsOffsetInBounds( IsOffsetInBounds(
offset, offset,
LoadAndUntagObjectField(object, FixedArrayBase::kLengthOffset), LoadAndUntagObjectField(object, FixedArrayBase::kLengthOffset),
FixedArray::kHeaderSize + endian_correction)); FixedArray::kHeaderSize + endian_correction));
if (SmiValuesAre32Bits()) { if (SmiValuesAre32Bits()) {
return UncheckedCast<Int32T>(Load(MachineType::Int32(), object, offset)); return UncheckedCast<Int32T>(Load(MachineType::Int32(), object, offset));
} else { } else {
...@@ -2267,9 +2267,8 @@ TNode<Float64T> CodeStubAssembler::LoadFixedDoubleArrayElement( ...@@ -2267,9 +2267,8 @@ TNode<Float64T> CodeStubAssembler::LoadFixedDoubleArrayElement(
FixedDoubleArray::kHeaderSize + additional_offset - kHeapObjectTag; FixedDoubleArray::kHeaderSize + additional_offset - kHeapObjectTag;
TNode<IntPtrT> offset = ElementOffsetFromIndex( TNode<IntPtrT> offset = ElementOffsetFromIndex(
index_node, HOLEY_DOUBLE_ELEMENTS, parameter_mode, header_size); index_node, HOLEY_DOUBLE_ELEMENTS, parameter_mode, header_size);
CSA_SLOW_ASSERT( CSA_ASSERT(this, IsOffsetInBounds(
this, offset, LoadAndUntagFixedArrayBaseLength(object),
IsOffsetInBounds(offset, LoadAndUntagFixedArrayBaseLength(object),
FixedDoubleArray::kHeaderSize, HOLEY_DOUBLE_ELEMENTS)); FixedDoubleArray::kHeaderSize, HOLEY_DOUBLE_ELEMENTS));
return LoadDoubleWithHoleCheck(object, offset, if_hole, machine_type); return LoadDoubleWithHoleCheck(object, offset, if_hole, machine_type);
} }
...@@ -4164,7 +4163,7 @@ Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array, ...@@ -4164,7 +4163,7 @@ Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
ElementsKind from_kind, ElementsKind from_kind,
ElementsKind to_kind, ElementsKind to_kind,
Label* if_hole) { Label* if_hole) {
CSA_SLOW_ASSERT(this, IsFixedArrayWithKind(array, from_kind)); CSA_ASSERT(this, IsFixedArrayWithKind(array, from_kind));
if (IsDoubleElementsKind(from_kind)) { if (IsDoubleElementsKind(from_kind)) {
Node* value = Node* value =
LoadDoubleWithHoleCheck(array, offset, if_hole, MachineType::Float64()); LoadDoubleWithHoleCheck(array, offset, if_hole, MachineType::Float64());
......
...@@ -56,7 +56,7 @@ bool FixedArray::ContainsOnlySmisOrHoles() { ...@@ -56,7 +56,7 @@ bool FixedArray::ContainsOnlySmisOrHoles() {
} }
Object* FixedArray::get(int index) const { Object* FixedArray::get(int index) const {
SLOW_DCHECK(index >= 0 && index < this->length()); DCHECK(index >= 0 && index < this->length());
return RELAXED_READ_FIELD(this, kHeaderSize + index * kPointerSize); return RELAXED_READ_FIELD(this, kHeaderSize + index * kPointerSize);
} }
...@@ -221,7 +221,7 @@ void FixedDoubleArray::FillWithHoles(int from, int to) { ...@@ -221,7 +221,7 @@ void FixedDoubleArray::FillWithHoles(int from, int to) {
} }
MaybeObject* WeakFixedArray::Get(int index) const { MaybeObject* WeakFixedArray::Get(int index) const {
SLOW_DCHECK(index >= 0 && index < this->length()); DCHECK(index >= 0 && index < this->length());
return RELAXED_READ_WEAK_FIELD(this, OffsetOfElementAt(index)); return RELAXED_READ_WEAK_FIELD(this, OffsetOfElementAt(index));
} }
...@@ -255,7 +255,7 @@ MaybeObject** WeakFixedArray::GetFirstElementAddress() { ...@@ -255,7 +255,7 @@ MaybeObject** WeakFixedArray::GetFirstElementAddress() {
} }
MaybeObject* WeakArrayList::Get(int index) const { MaybeObject* WeakArrayList::Get(int index) const {
SLOW_DCHECK(index >= 0 && index < this->capacity()); DCHECK(index >= 0 && index < this->capacity());
return RELAXED_READ_WEAK_FIELD(this, OffsetOfElementAt(index)); return RELAXED_READ_WEAK_FIELD(this, OffsetOfElementAt(index));
} }
...@@ -406,12 +406,12 @@ byte* ByteArray::GetDataStartAddress() { ...@@ -406,12 +406,12 @@ byte* ByteArray::GetDataStartAddress() {
template <class T> template <class T>
PodArray<T>* PodArray<T>::cast(Object* object) { PodArray<T>* PodArray<T>::cast(Object* object) {
SLOW_DCHECK(object->IsByteArray()); DCHECK(object->IsByteArray());
return reinterpret_cast<PodArray<T>*>(object); return reinterpret_cast<PodArray<T>*>(object);
} }
template <class T> template <class T>
const PodArray<T>* PodArray<T>::cast(const Object* object) { const PodArray<T>* PodArray<T>::cast(const Object* object) {
SLOW_DCHECK(object->IsByteArray()); DCHECK(object->IsByteArray());
return reinterpret_cast<const PodArray<T>*>(object); return reinterpret_cast<const PodArray<T>*>(object);
} }
...@@ -762,18 +762,18 @@ STATIC_CONST_MEMBER_DEFINITION const InstanceType ...@@ -762,18 +762,18 @@ STATIC_CONST_MEMBER_DEFINITION const InstanceType
template <class Traits> template <class Traits>
FixedTypedArray<Traits>* FixedTypedArray<Traits>::cast(Object* object) { FixedTypedArray<Traits>* FixedTypedArray<Traits>::cast(Object* object) {
SLOW_DCHECK(object->IsHeapObject() && DCHECK(object->IsHeapObject() &&
HeapObject::cast(object)->map()->instance_type() == HeapObject::cast(object)->map()->instance_type() ==
Traits::kInstanceType); Traits::kInstanceType);
return reinterpret_cast<FixedTypedArray<Traits>*>(object); return reinterpret_cast<FixedTypedArray<Traits>*>(object);
} }
template <class Traits> template <class Traits>
const FixedTypedArray<Traits>* FixedTypedArray<Traits>::cast( const FixedTypedArray<Traits>* FixedTypedArray<Traits>::cast(
const Object* object) { const Object* object) {
SLOW_DCHECK(object->IsHeapObject() && DCHECK(object->IsHeapObject() &&
HeapObject::cast(object)->map()->instance_type() == HeapObject::cast(object)->map()->instance_type() ==
Traits::kInstanceType); Traits::kInstanceType);
return reinterpret_cast<FixedTypedArray<Traits>*>(object); return reinterpret_cast<FixedTypedArray<Traits>*>(object);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment