Commit 74b8ef6c authored by ivica.bogosavljevic's avatar ivica.bogosavljevic Committed by Commit bot

MIPS: Fix `[builtins] Reland of Port TypedArrayInitialize to CodeStubAssembler.`

Fix ff8b1abb

This fixes the problem with the alignment of typed arrays in turbofan. Namely,
Float64 typed arrays weren't properly aligned on 32bit architectures,
and this causes crashes on those architectures that do not support misaligned
memory access.

TEST=mjsunit/es6/typedarray-*
BUG=v8:6075

Review-Url: https://codereview.chromium.org/2784253002
Cr-Commit-Position: refs/heads/master@{#44366}
parent 45e3c56d
...@@ -214,7 +214,7 @@ void TypedArrayBuiltinsAssembler::DoInitialize(Node* const holder, Node* length, ...@@ -214,7 +214,7 @@ void TypedArrayBuiltinsAssembler::DoInitialize(Node* const holder, Node* length,
// Allocate a FixedTypedArray and set the length, base pointer and external // Allocate a FixedTypedArray and set the length, base pointer and external
// pointer. // pointer.
CSA_ASSERT(this, IsRegularHeapObjectSize(total_size.value())); CSA_ASSERT(this, IsRegularHeapObjectSize(total_size.value()));
Node* elements = Allocate(total_size.value()); Node* elements = AllocateInNewSpace(total_size.value(), kDoubleAlignment);
StoreMapNoWriteBarrier(elements, fixed_typed_map.value()); StoreMapNoWriteBarrier(elements, fixed_typed_map.value());
StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length); StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
......
...@@ -680,10 +680,8 @@ void CodeStubAssembler::BranchIfFastJSArray( ...@@ -680,10 +680,8 @@ void CodeStubAssembler::BranchIfFastJSArray(
BranchIfPrototypesHaveNoElements(map, if_true, if_false); BranchIfPrototypesHaveNoElements(map, if_true, if_false);
} }
Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes, Node* CodeStubAssembler::AllocateRaw(Node* size_in_bytes, AllocationFlags flags,
AllocationFlags flags, Node* top_address, Node* limit_address) {
Node* top_address,
Node* limit_address) {
Node* top = Load(MachineType::Pointer(), top_address); Node* top = Load(MachineType::Pointer(), top_address);
Node* limit = Load(MachineType::Pointer(), limit_address); Node* limit = Load(MachineType::Pointer(), limit_address);
...@@ -692,12 +690,14 @@ Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes, ...@@ -692,12 +690,14 @@ Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
Label runtime_call(this, Label::kDeferred), no_runtime_call(this); Label runtime_call(this, Label::kDeferred), no_runtime_call(this);
Label merge_runtime(this, &result); Label merge_runtime(this, &result);
bool needs_double_alignment = flags & kDoubleAlignment;
if (flags & kAllowLargeObjectAllocation) { if (flags & kAllowLargeObjectAllocation) {
Label next(this); Label next(this);
GotoIf(IsRegularHeapObjectSize(size_in_bytes), &next); GotoIf(IsRegularHeapObjectSize(size_in_bytes), &next);
Node* runtime_flags = SmiConstant( Node* runtime_flags = SmiConstant(
Smi::FromInt(AllocateDoubleAlignFlag::encode(false) | Smi::FromInt(AllocateDoubleAlignFlag::encode(needs_double_alignment) |
AllocateTargetSpace::encode(AllocationSpace::LO_SPACE))); AllocateTargetSpace::encode(AllocationSpace::LO_SPACE)));
Node* const runtime_result = Node* const runtime_result =
CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(), CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
...@@ -708,7 +708,25 @@ Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes, ...@@ -708,7 +708,25 @@ Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
Bind(&next); Bind(&next);
} }
Node* new_top = IntPtrAdd(top, size_in_bytes); Variable adjusted_size(this, MachineType::PointerRepresentation(),
size_in_bytes);
if (needs_double_alignment) {
Label not_aligned(this), done_alignment(this, &adjusted_size);
Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), &not_aligned,
&done_alignment);
Bind(&not_aligned);
Node* not_aligned_size = IntPtrAdd(size_in_bytes, IntPtrConstant(4));
adjusted_size.Bind(not_aligned_size);
Goto(&done_alignment);
Bind(&done_alignment);
}
Node* new_top = IntPtrAdd(top, adjusted_size.value());
Branch(UintPtrGreaterThanOrEqual(new_top, limit), &runtime_call, Branch(UintPtrGreaterThanOrEqual(new_top, limit), &runtime_call,
&no_runtime_call); &no_runtime_call);
...@@ -716,7 +734,7 @@ Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes, ...@@ -716,7 +734,7 @@ Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
Node* runtime_result; Node* runtime_result;
if (flags & kPretenured) { if (flags & kPretenured) {
Node* runtime_flags = SmiConstant( Node* runtime_flags = SmiConstant(
Smi::FromInt(AllocateDoubleAlignFlag::encode(false) | Smi::FromInt(AllocateDoubleAlignFlag::encode(needs_double_alignment) |
AllocateTargetSpace::encode(AllocationSpace::OLD_SPACE))); AllocateTargetSpace::encode(AllocationSpace::OLD_SPACE)));
runtime_result = runtime_result =
CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(), CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
...@@ -733,57 +751,58 @@ Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes, ...@@ -733,57 +751,58 @@ Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
Node* no_runtime_result = top; Node* no_runtime_result = top;
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address, StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
new_top); new_top);
no_runtime_result = BitcastWordToTagged(
IntPtrAdd(no_runtime_result, IntPtrConstant(kHeapObjectTag)));
result.Bind(no_runtime_result);
Goto(&merge_runtime);
Bind(&merge_runtime); Variable address(this, MachineType::PointerRepresentation(),
return result.value(); no_runtime_result);
}
Node* CodeStubAssembler::AllocateRawAligned(Node* size_in_bytes, if (needs_double_alignment) {
AllocationFlags flags, Label needs_filler(this), done_filling(this, &address);
Node* top_address, Branch(IntPtrEqual(adjusted_size.value(), size_in_bytes), &done_filling,
Node* limit_address) { &needs_filler);
Node* top = Load(MachineType::Pointer(), top_address);
Variable adjusted_size(this, MachineType::PointerRepresentation(),
size_in_bytes);
if (flags & kDoubleAlignment) {
Label not_aligned(this), done_alignment(this, &adjusted_size);
Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), &not_aligned,
&done_alignment);
Bind(&not_aligned); Bind(&needs_filler);
Node* not_aligned_size = // Store a filler and increase the address by kPointerSize.
IntPtrAdd(size_in_bytes, IntPtrConstant(kPointerSize)); StoreNoWriteBarrier(MachineRepresentation::kTagged, top,
adjusted_size.Bind(not_aligned_size); LoadRoot(Heap::kOnePointerFillerMapRootIndex));
Goto(&done_alignment); address.Bind(IntPtrAdd(no_runtime_result, IntPtrConstant(4)));
Bind(&done_alignment); Goto(&done_filling);
Bind(&done_filling);
} }
Variable address(this, MachineRepresentation::kTagged, no_runtime_result = BitcastWordToTagged(
AllocateRawUnaligned(adjusted_size.value(), kNone, IntPtrAdd(address.value(), IntPtrConstant(kHeapObjectTag)));
top_address, limit_address));
Label needs_filler(this), done_filling(this, &address); result.Bind(no_runtime_result);
Branch(IntPtrEqual(adjusted_size.value(), size_in_bytes), &done_filling, Goto(&merge_runtime);
&needs_filler);
Bind(&needs_filler); Bind(&merge_runtime);
// Store a filler and increase the address by kPointerSize. return result.value();
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top, }
LoadRoot(Heap::kOnePointerFillerMapRootIndex));
address.Bind(BitcastWordToTagged(
IntPtrAdd(address.value(), IntPtrConstant(kPointerSize))));
Goto(&done_filling);
Bind(&done_filling); Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
// Update the top. AllocationFlags flags,
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address, Node* top_address,
IntPtrAdd(top, adjusted_size.value())); Node* limit_address) {
return address.value(); DCHECK((flags & kDoubleAlignment) == 0);
return AllocateRaw(size_in_bytes, flags, top_address, limit_address);
}
Node* CodeStubAssembler::AllocateRawDoubleAligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
Node* limit_address) {
#if defined(V8_HOST_ARCH_32_BIT)
return AllocateRaw(size_in_bytes, flags | kDoubleAlignment, top_address,
limit_address);
#elif defined(V8_HOST_ARCH_64_BIT)
// Allocation on 64 bit machine is naturally double aligned
return AllocateRaw(size_in_bytes, flags & ~kDoubleAlignment, top_address,
limit_address);
#else
#error Architecture not supported
#endif
} }
Node* CodeStubAssembler::AllocateInNewSpace(Node* size_in_bytes, Node* CodeStubAssembler::AllocateInNewSpace(Node* size_in_bytes,
...@@ -812,13 +831,13 @@ Node* CodeStubAssembler::Allocate(Node* size_in_bytes, AllocationFlags flags) { ...@@ -812,13 +831,13 @@ Node* CodeStubAssembler::Allocate(Node* size_in_bytes, AllocationFlags flags) {
.address()); .address());
Node* limit_address = IntPtrAdd(top_address, IntPtrConstant(kPointerSize)); Node* limit_address = IntPtrAdd(top_address, IntPtrConstant(kPointerSize));
#ifdef V8_HOST_ARCH_32_BIT
if (flags & kDoubleAlignment) { if (flags & kDoubleAlignment) {
return AllocateRawAligned(size_in_bytes, flags, top_address, limit_address); return AllocateRawDoubleAligned(size_in_bytes, flags, top_address,
limit_address);
} else {
return AllocateRawUnaligned(size_in_bytes, flags, top_address,
limit_address);
} }
#endif
return AllocateRawUnaligned(size_in_bytes, flags, top_address, limit_address);
} }
Node* CodeStubAssembler::AllocateInNewSpace(int size_in_bytes, Node* CodeStubAssembler::AllocateInNewSpace(int size_in_bytes,
......
...@@ -1329,10 +1329,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler { ...@@ -1329,10 +1329,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void HandleBreakOnNode(); void HandleBreakOnNode();
Node* AllocateRawAligned(Node* size_in_bytes, AllocationFlags flags, Node* AllocateRawDoubleAligned(Node* size_in_bytes, AllocationFlags flags,
Node* top_address, Node* limit_address); Node* top_address, Node* limit_address);
Node* AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags, Node* AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags,
Node* top_adddress, Node* limit_address); Node* top_adddress, Node* limit_address);
Node* AllocateRaw(Node* size_in_bytes, AllocationFlags flags,
Node* top_address, Node* limit_address);
// Allocate and return a JSArray of given total size in bytes with header // Allocate and return a JSArray of given total size in bytes with header
// fields initialized. // fields initialized.
Node* AllocateUninitializedJSArray(ElementsKind kind, Node* array_map, Node* AllocateUninitializedJSArray(ElementsKind kind, Node* array_map,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment