Commit 74b8ef6c authored by ivica.bogosavljevic's avatar ivica.bogosavljevic Committed by Commit bot

MIPS: Fix `[builtins] Reland of Port TypedArrayInitialize to CodeStubAssembler.`

Fix ff8b1abb

This fixes the problem with the alignment of typed arrays in turbofan. Namely,
Float64 typed arrays weren't properly aligned on 32bit architectures,
and this causes crashes on those architectures that do not support misaligned
memory access.

TEST=mjsunit/es6/typedarray-*
BUG=v8:6075

Review-Url: https://codereview.chromium.org/2784253002
Cr-Commit-Position: refs/heads/master@{#44366}
parent 45e3c56d
......@@ -214,7 +214,7 @@ void TypedArrayBuiltinsAssembler::DoInitialize(Node* const holder, Node* length,
// Allocate a FixedTypedArray and set the length, base pointer and external
// pointer.
CSA_ASSERT(this, IsRegularHeapObjectSize(total_size.value()));
Node* elements = Allocate(total_size.value());
Node* elements = AllocateInNewSpace(total_size.value(), kDoubleAlignment);
StoreMapNoWriteBarrier(elements, fixed_typed_map.value());
StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
......
......@@ -680,10 +680,8 @@ void CodeStubAssembler::BranchIfFastJSArray(
BranchIfPrototypesHaveNoElements(map, if_true, if_false);
}
Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
Node* limit_address) {
Node* CodeStubAssembler::AllocateRaw(Node* size_in_bytes, AllocationFlags flags,
Node* top_address, Node* limit_address) {
Node* top = Load(MachineType::Pointer(), top_address);
Node* limit = Load(MachineType::Pointer(), limit_address);
......@@ -692,12 +690,14 @@ Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
Label runtime_call(this, Label::kDeferred), no_runtime_call(this);
Label merge_runtime(this, &result);
bool needs_double_alignment = flags & kDoubleAlignment;
if (flags & kAllowLargeObjectAllocation) {
Label next(this);
GotoIf(IsRegularHeapObjectSize(size_in_bytes), &next);
Node* runtime_flags = SmiConstant(
Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
Smi::FromInt(AllocateDoubleAlignFlag::encode(needs_double_alignment) |
AllocateTargetSpace::encode(AllocationSpace::LO_SPACE)));
Node* const runtime_result =
CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
......@@ -708,7 +708,25 @@ Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
Bind(&next);
}
Node* new_top = IntPtrAdd(top, size_in_bytes);
Variable adjusted_size(this, MachineType::PointerRepresentation(),
size_in_bytes);
if (needs_double_alignment) {
Label not_aligned(this), done_alignment(this, &adjusted_size);
Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), &not_aligned,
&done_alignment);
Bind(&not_aligned);
Node* not_aligned_size = IntPtrAdd(size_in_bytes, IntPtrConstant(4));
adjusted_size.Bind(not_aligned_size);
Goto(&done_alignment);
Bind(&done_alignment);
}
Node* new_top = IntPtrAdd(top, adjusted_size.value());
Branch(UintPtrGreaterThanOrEqual(new_top, limit), &runtime_call,
&no_runtime_call);
......@@ -716,7 +734,7 @@ Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
Node* runtime_result;
if (flags & kPretenured) {
Node* runtime_flags = SmiConstant(
Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
Smi::FromInt(AllocateDoubleAlignFlag::encode(needs_double_alignment) |
AllocateTargetSpace::encode(AllocationSpace::OLD_SPACE)));
runtime_result =
CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
......@@ -733,57 +751,58 @@ Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
Node* no_runtime_result = top;
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
new_top);
no_runtime_result = BitcastWordToTagged(
IntPtrAdd(no_runtime_result, IntPtrConstant(kHeapObjectTag)));
result.Bind(no_runtime_result);
Goto(&merge_runtime);
Bind(&merge_runtime);
return result.value();
}
Variable address(this, MachineType::PointerRepresentation(),
no_runtime_result);
Node* CodeStubAssembler::AllocateRawAligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
Node* limit_address) {
Node* top = Load(MachineType::Pointer(), top_address);
Variable adjusted_size(this, MachineType::PointerRepresentation(),
size_in_bytes);
if (flags & kDoubleAlignment) {
Label not_aligned(this), done_alignment(this, &adjusted_size);
Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), &not_aligned,
&done_alignment);
if (needs_double_alignment) {
Label needs_filler(this), done_filling(this, &address);
Branch(IntPtrEqual(adjusted_size.value(), size_in_bytes), &done_filling,
&needs_filler);
Bind(&not_aligned);
Node* not_aligned_size =
IntPtrAdd(size_in_bytes, IntPtrConstant(kPointerSize));
adjusted_size.Bind(not_aligned_size);
Goto(&done_alignment);
Bind(&needs_filler);
// Store a filler and increase the address by kPointerSize.
StoreNoWriteBarrier(MachineRepresentation::kTagged, top,
LoadRoot(Heap::kOnePointerFillerMapRootIndex));
address.Bind(IntPtrAdd(no_runtime_result, IntPtrConstant(4)));
Bind(&done_alignment);
Goto(&done_filling);
Bind(&done_filling);
}
Variable address(this, MachineRepresentation::kTagged,
AllocateRawUnaligned(adjusted_size.value(), kNone,
top_address, limit_address));
no_runtime_result = BitcastWordToTagged(
IntPtrAdd(address.value(), IntPtrConstant(kHeapObjectTag)));
Label needs_filler(this), done_filling(this, &address);
Branch(IntPtrEqual(adjusted_size.value(), size_in_bytes), &done_filling,
&needs_filler);
result.Bind(no_runtime_result);
Goto(&merge_runtime);
Bind(&needs_filler);
// Store a filler and increase the address by kPointerSize.
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top,
LoadRoot(Heap::kOnePointerFillerMapRootIndex));
address.Bind(BitcastWordToTagged(
IntPtrAdd(address.value(), IntPtrConstant(kPointerSize))));
Goto(&done_filling);
Bind(&merge_runtime);
return result.value();
}
Bind(&done_filling);
// Update the top.
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
IntPtrAdd(top, adjusted_size.value()));
return address.value();
Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
Node* limit_address) {
DCHECK((flags & kDoubleAlignment) == 0);
return AllocateRaw(size_in_bytes, flags, top_address, limit_address);
}
Node* CodeStubAssembler::AllocateRawDoubleAligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
Node* limit_address) {
#if defined(V8_HOST_ARCH_32_BIT)
return AllocateRaw(size_in_bytes, flags | kDoubleAlignment, top_address,
limit_address);
#elif defined(V8_HOST_ARCH_64_BIT)
// Allocation on 64 bit machine is naturally double aligned
return AllocateRaw(size_in_bytes, flags & ~kDoubleAlignment, top_address,
limit_address);
#else
#error Architecture not supported
#endif
}
Node* CodeStubAssembler::AllocateInNewSpace(Node* size_in_bytes,
......@@ -812,13 +831,13 @@ Node* CodeStubAssembler::Allocate(Node* size_in_bytes, AllocationFlags flags) {
.address());
Node* limit_address = IntPtrAdd(top_address, IntPtrConstant(kPointerSize));
#ifdef V8_HOST_ARCH_32_BIT
if (flags & kDoubleAlignment) {
return AllocateRawAligned(size_in_bytes, flags, top_address, limit_address);
return AllocateRawDoubleAligned(size_in_bytes, flags, top_address,
limit_address);
} else {
return AllocateRawUnaligned(size_in_bytes, flags, top_address,
limit_address);
}
#endif
return AllocateRawUnaligned(size_in_bytes, flags, top_address, limit_address);
}
Node* CodeStubAssembler::AllocateInNewSpace(int size_in_bytes,
......
......@@ -1329,10 +1329,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void HandleBreakOnNode();
Node* AllocateRawAligned(Node* size_in_bytes, AllocationFlags flags,
Node* top_address, Node* limit_address);
Node* AllocateRawDoubleAligned(Node* size_in_bytes, AllocationFlags flags,
Node* top_address, Node* limit_address);
Node* AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags,
Node* top_adddress, Node* limit_address);
Node* AllocateRaw(Node* size_in_bytes, AllocationFlags flags,
Node* top_address, Node* limit_address);
// Allocate and return a JSArray of given total size in bytes with header
// fields initialized.
Node* AllocateUninitializedJSArray(ElementsKind kind, Node* array_map,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment