Commit 6346cdb6 authored by Benedikt Meurer's avatar Benedikt Meurer Committed by Commit Bot

[turbofan] Initial Word64 support in representation selection.

This adds support to TurboFan's representation selection for the Word64
representation, and makes use of that to handle indices for memory access
and allocation instructions (i.e. LoadElement, StoreElement, Allocate,
etc.). These instructions had previously used Word32 as representation
for the indices / sizes, and then internally converted it to the correct
representation (aka Word64 on 64-bit architectures) later on, but that
was kind of brittle, and sometimes led to weird generated code.

The change thus only adds support to convert integer values in the safe
integer range from all kinds of representations to Word64 (on 64-bit
architectures). We don't yet handle the opposite direction and none of
the representation selection heuristics for the numeric operations were
changed so far. This will be done in follow-up CLs.

This CL itself is supposed to be neutral wrt. functionality, and only
serves as a starting point, and a cleanup for the (weird) implicit
Word64 index/size handling.

Bug: v8:7881, v8:8015, v8:8171
Design-Document: http://bit.ly/turbofan-word64
Change-Id: I3c6961a0e96cbc3fb8ac9d3e1be8f2e5c89bfd25
Cq-Include-Trybots: luci.chromium.try:linux_chromium_headless_rel
Reviewed-on: https://chromium-review.googlesource.com/1224932
Commit-Queue: Benedikt Meurer <bmeurer@chromium.org>
Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#55886}
parent 52fc1552
......@@ -1113,20 +1113,20 @@ TF_BUILTIN(RunMicrotasks, InternalBuiltinsAssembler) {
}
TF_BUILTIN(AllocateInNewSpace, CodeStubAssembler) {
TNode<Int32T> requested_size =
UncheckedCast<Int32T>(Parameter(Descriptor::kRequestedSize));
TNode<IntPtrT> requested_size =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
TailCallRuntime(Runtime::kAllocateInNewSpace, NoContextConstant(),
SmiFromInt32(requested_size));
SmiFromIntPtr(requested_size));
}
TF_BUILTIN(AllocateInOldSpace, CodeStubAssembler) {
TNode<Int32T> requested_size =
UncheckedCast<Int32T>(Parameter(Descriptor::kRequestedSize));
TNode<IntPtrT> requested_size =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
int flags = AllocateTargetSpace::encode(OLD_SPACE);
TailCallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
SmiFromInt32(requested_size), SmiConstant(flags));
SmiFromIntPtr(requested_size), SmiConstant(flags));
}
TF_BUILTIN(Abort, CodeStubAssembler) {
......
......@@ -1198,6 +1198,7 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
V(ChangeUint32ToFloat64, kArm64Uint32ToFloat64) \
V(TruncateFloat32ToInt32, kArm64Float32ToInt32) \
V(ChangeFloat64ToInt32, kArm64Float64ToInt32) \
V(ChangeFloat64ToInt64, kArm64Float64ToInt64) \
V(TruncateFloat32ToUint32, kArm64Float32ToUint32) \
V(ChangeFloat64ToUint32, kArm64Float64ToUint32) \
V(ChangeFloat64ToUint64, kArm64Float64ToUint64) \
......
......@@ -640,6 +640,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kChangeTaggedSignedToInt32:
result = LowerChangeTaggedSignedToInt32(node);
break;
case IrOpcode::kChangeTaggedSignedToInt64:
result = LowerChangeTaggedSignedToInt64(node);
break;
case IrOpcode::kChangeTaggedToBit:
result = LowerChangeTaggedToBit(node);
break;
......@@ -649,6 +652,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kChangeTaggedToUint32:
result = LowerChangeTaggedToUint32(node);
break;
case IrOpcode::kChangeTaggedToInt64:
result = LowerChangeTaggedToInt64(node);
break;
case IrOpcode::kChangeTaggedToFloat64:
result = LowerChangeTaggedToFloat64(node);
break;
......@@ -1147,6 +1153,11 @@ Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node) {
return ChangeSmiToInt32(value);
}
Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt64(Node* node) {
Node* value = node->InputAt(0);
return ChangeSmiToInt64(value);
}
Node* EffectControlLinearizer::LowerChangeTaggedToBit(Node* node) {
Node* value = node->InputAt(0);
return __ WordEqual(value, __ TrueConstant());
......@@ -1283,6 +1294,26 @@ Node* EffectControlLinearizer::LowerChangeTaggedToUint32(Node* node) {
return done.PhiAt(0);
}
Node* EffectControlLinearizer::LowerChangeTaggedToInt64(Node* node) {
Node* value = node->InputAt(0);
auto if_not_smi = __ MakeDeferredLabel();
auto done = __ MakeLabel(MachineRepresentation::kWord64);
Node* check = ObjectIsSmi(value);
__ GotoIfNot(check, &if_not_smi);
__ Goto(&done, ChangeSmiToInt64(value));
__ Bind(&if_not_smi);
STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
vfalse = __ ChangeFloat64ToInt64(vfalse);
__ Goto(&done, vfalse);
__ Bind(&done);
return done.PhiAt(0);
}
Node* EffectControlLinearizer::LowerChangeTaggedToFloat64(Node* node) {
return LowerTruncateTaggedToFloat64(node);
}
......@@ -2855,9 +2886,9 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
// We need a loop here to properly deal with indirect strings
// (SlicedString, ConsString and ThinString).
auto loop = __ MakeLoopLabel(MachineRepresentation::kTagged,
MachineRepresentation::kWord32);
MachineType::PointerRepresentation());
auto loop_next = __ MakeLabel(MachineRepresentation::kTagged,
MachineRepresentation::kWord32);
MachineType::PointerRepresentation());
auto loop_done = __ MakeLabel(MachineRepresentation::kWord32);
__ Goto(&loop, receiver, position);
__ Bind(&loop);
......@@ -2944,16 +2975,14 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
__ Bind(&if_onebyte);
{
Node* result = __ Load(MachineType::Uint8(), receiver_data,
ChangeInt32ToIntPtr(position));
Node* result = __ Load(MachineType::Uint8(), receiver_data, position);
__ Goto(&loop_done, result);
}
__ Bind(&if_twobyte);
{
Node* result = __ Load(
MachineType::Uint16(), receiver_data,
__ Word32Shl(ChangeInt32ToIntPtr(position), __ Int32Constant(1)));
Node* result = __ Load(MachineType::Uint16(), receiver_data,
__ WordShl(position, __ IntPtrConstant(1)));
__ Goto(&loop_done, result);
}
}
......@@ -2965,7 +2994,7 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
Node* receiver_parent =
__ LoadField(AccessBuilder::ForSlicedStringParent(), receiver);
__ Goto(&loop_next, receiver_parent,
__ Int32Add(position, ChangeSmiToInt32(receiver_offset)));
__ IntAdd(position, ChangeSmiToIntPtr(receiver_offset)));
}
__ Bind(&if_runtime);
......@@ -2975,7 +3004,7 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
Node* result = __ Call(call_descriptor, __ CEntryStubConstant(1),
receiver, ChangeInt32ToSmi(position),
receiver, ChangeIntPtrToSmi(position),
__ ExternalConstant(ExternalReference::Create(id)),
__ Int32Constant(2), __ NoContextConstant());
__ Goto(&loop_done, ChangeSmiToInt32(result));
......@@ -3474,8 +3503,8 @@ Node* EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value) {
return result;
}
Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
return __ WordShl(ChangeInt32ToIntPtr(value), SmiShiftBitsConstant());
Node* EffectControlLinearizer::ChangeIntPtrToSmi(Node* value) {
return __ WordShl(value, SmiShiftBitsConstant());
}
Node* EffectControlLinearizer::ChangeInt32ToIntPtr(Node* value) {
......@@ -3492,6 +3521,10 @@ Node* EffectControlLinearizer::ChangeIntPtrToInt32(Node* value) {
return value;
}
Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
return ChangeIntPtrToSmi(ChangeInt32ToIntPtr(value));
}
Node* EffectControlLinearizer::ChangeUint32ToUintPtr(Node* value) {
if (machine()->Is64()) {
value = __ ChangeUint32ToUint64(value);
......@@ -3516,6 +3549,11 @@ Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) {
return value;
}
Node* EffectControlLinearizer::ChangeSmiToInt64(Node* value) {
CHECK(machine()->Is64());
return ChangeSmiToIntPtr(value);
}
Node* EffectControlLinearizer::ObjectIsSmi(Node* value) {
return __ WordEqual(__ WordAnd(value, __ IntPtrConstant(kSmiTagMask)),
__ IntPtrConstant(kSmiTag));
......@@ -3866,12 +3904,6 @@ Node* EffectControlLinearizer::LowerLoadDataViewElement(Node* node) {
Node* index = node->InputAt(2);
Node* is_little_endian = node->InputAt(3);
// On 64-bit platforms, we need to feed a Word64 index to the Load and
// Store operators.
if (machine()->Is64()) {
index = __ ChangeUint32ToUint64(index);
}
// We need to keep the {buffer} alive so that the GC will not release the
// ArrayBuffer (if there's any) as long as we are still operating on it.
__ Retain(buffer);
......@@ -3914,12 +3946,6 @@ void EffectControlLinearizer::LowerStoreDataViewElement(Node* node) {
Node* value = node->InputAt(3);
Node* is_little_endian = node->InputAt(4);
// On 64-bit platforms, we need to feed a Word64 index to the Load and
// Store operators.
if (machine()->Is64()) {
index = __ ChangeUint32ToUint64(index);
}
// We need to keep the {buffer} alive so that the GC will not release the
// ArrayBuffer (if there's any) as long as we are still operating on it.
__ Retain(buffer);
......
......@@ -53,9 +53,11 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerChangeFloat64ToTagged(Node* node);
Node* LowerChangeFloat64ToTaggedPointer(Node* node);
Node* LowerChangeTaggedSignedToInt32(Node* node);
Node* LowerChangeTaggedSignedToInt64(Node* node);
Node* LowerChangeTaggedToBit(Node* node);
Node* LowerChangeTaggedToInt32(Node* node);
Node* LowerChangeTaggedToUint32(Node* node);
Node* LowerChangeTaggedToInt64(Node* node);
Node* LowerChangeTaggedToTaggedSigned(Node* node);
Node* LowerCheckBounds(Node* node, Node* frame_state);
Node* LowerPoisonIndex(Node* node);
......@@ -190,10 +192,12 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* ChangeInt32ToSmi(Node* value);
Node* ChangeInt32ToIntPtr(Node* value);
Node* ChangeIntPtrToInt32(Node* value);
Node* ChangeIntPtrToSmi(Node* value);
Node* ChangeUint32ToUintPtr(Node* value);
Node* ChangeUint32ToSmi(Node* value);
Node* ChangeSmiToIntPtr(Node* value);
Node* ChangeSmiToInt32(Node* value);
Node* ChangeSmiToInt64(Node* value);
Node* ObjectIsSmi(Node* value);
Node* LoadFromSeqString(Node* receiver, Node* position, Node* is_one_byte);
......
......@@ -26,8 +26,10 @@ Node* GraphAssembler::Int32Constant(int32_t value) {
return jsgraph()->Int32Constant(value);
}
Node* GraphAssembler::UniqueInt32Constant(int32_t value) {
return graph()->NewNode(common()->Int32Constant(value));
Node* GraphAssembler::UniqueIntPtrConstant(intptr_t value) {
return graph()->NewNode(
machine()->Is64() ? common()->Int64Constant(value)
: common()->Int32Constant(static_cast<int32_t>(value)));
}
Node* GraphAssembler::SmiConstant(int32_t value) {
......
......@@ -24,6 +24,7 @@ namespace compiler {
V(ChangeUint32ToFloat64) \
V(ChangeUint32ToUint64) \
V(ChangeFloat64ToInt32) \
V(ChangeFloat64ToInt64) \
V(ChangeFloat64ToUint32) \
V(TruncateInt64ToInt32) \
V(RoundFloat64ToInt32) \
......@@ -176,7 +177,7 @@ class GraphAssembler {
Node* IntPtrConstant(intptr_t value);
Node* Uint32Constant(int32_t value);
Node* Int32Constant(int32_t value);
Node* UniqueInt32Constant(int32_t value);
Node* UniqueIntPtrConstant(intptr_t value);
Node* SmiConstant(int32_t value);
Node* Float64Constant(double value);
Node* Projection(int index, Node* value);
......
......@@ -1472,6 +1472,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat64(node), VisitChangeUint32ToFloat64(node);
case IrOpcode::kChangeFloat64ToInt32:
return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
case IrOpcode::kChangeFloat64ToInt64:
return MarkAsWord64(node), VisitChangeFloat64ToInt64(node);
case IrOpcode::kChangeFloat64ToUint32:
return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
case IrOpcode::kChangeFloat64ToUint64:
......@@ -2290,6 +2292,10 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
UNIMPLEMENTED();
}
void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
UNIMPLEMENTED();
}
......
......@@ -618,6 +618,11 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
break;
}
case IrOpcode::kChangeFloat64ToInt64: {
Float64Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt64(static_cast<int64_t>(m.Value()));
break;
}
case IrOpcode::kChangeFloat64ToUint32: {
Float64Matcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt32(FastD2UI(m.Value()));
......
......@@ -145,6 +145,7 @@ MachineType AtomicOpType(Operator const* op) {
V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToInt64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToUint64, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
......
......@@ -319,6 +319,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// the input value is representable in the target value.
const Operator* ChangeFloat32ToFloat64();
const Operator* ChangeFloat64ToInt32(); // narrowing
const Operator* ChangeFloat64ToInt64();
const Operator* ChangeFloat64ToUint32(); // narrowing
const Operator* ChangeFloat64ToUint64();
const Operator* TruncateFloat64ToUint32();
......
......@@ -67,7 +67,7 @@ MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group)
: group_(group), size_(std::numeric_limits<int>::max()), top_(nullptr) {}
MemoryOptimizer::AllocationState::AllocationState(AllocationGroup* group,
int size, Node* top)
intptr_t size, Node* top)
: group_(group), size_(size), top_(top) {}
bool MemoryOptimizer::AllocationState::IsNewSpaceAllocation() const {
......@@ -175,27 +175,35 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Check if we can fold this allocation into a previous allocation represented
// by the incoming {state}.
Int32Matcher m(size);
if (m.HasValue() && m.Value() < kMaxRegularHeapObjectSize) {
int32_t const object_size = m.Value();
IntPtrMatcher m(size);
if (m.IsInRange(0, kMaxRegularHeapObjectSize)) {
intptr_t const object_size = m.Value();
if (allocation_folding_ == AllocationFolding::kDoAllocationFolding &&
state->size() <= kMaxRegularHeapObjectSize - object_size &&
state->group()->pretenure() == pretenure) {
// We can fold this Allocate {node} into the allocation {group}
// represented by the given {state}. Compute the upper bound for
// the new {state}.
int32_t const state_size = state->size() + object_size;
intptr_t const state_size = state->size() + object_size;
// Update the reservation check to the actual maximum upper bound.
AllocationGroup* const group = state->group();
if (OpParameter<int32_t>(group->size()->op()) < state_size) {
NodeProperties::ChangeOp(group->size(),
common()->Int32Constant(state_size));
if (machine()->Is64()) {
if (OpParameter<int64_t>(group->size()->op()) < state_size) {
NodeProperties::ChangeOp(group->size(),
common()->Int64Constant(state_size));
}
} else {
if (OpParameter<int32_t>(group->size()->op()) < state_size) {
NodeProperties::ChangeOp(
group->size(),
common()->Int32Constant(static_cast<int32_t>(state_size)));
}
}
// Update the allocation top with the new object allocation.
// TODO(bmeurer): Defer writing back top as much as possible.
Node* top = __ IntAdd(state->top(), __ IntPtrConstant(object_size));
Node* top = __ IntAdd(state->top(), size);
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
top_address, __ IntPtrConstant(0), top);
......@@ -213,7 +221,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Setup a mutable reservation size node; will be patched as we fold
// additional allocations into this new group.
Node* size = __ UniqueInt32Constant(object_size);
Node* size = __ UniqueIntPtrConstant(object_size);
// Load allocation top and limit.
Node* top =
......@@ -223,10 +231,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Check if we need to collect garbage before we can start bump pointer
// allocation (always done for folded allocations).
Node* check = __ UintLessThan(
__ IntAdd(top,
machine()->Is64() ? __ ChangeInt32ToInt64(size) : size),
limit);
Node* check = __ UintLessThan(__ IntAdd(top, size), limit);
__ GotoIfNot(check, &call_runtime);
__ Goto(&done, top);
......@@ -277,8 +282,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
__ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
// Compute the new top.
Node* new_top =
__ IntAdd(top, machine()->Is64() ? __ ChangeInt32ToInt64(size) : size);
Node* new_top = __ IntAdd(top, size);
// Check if we can do bump pointer allocation here.
Node* check = __ UintLessThan(new_top, limit);
......@@ -423,19 +427,7 @@ void MemoryOptimizer::VisitOtherEffect(Node* node,
EnqueueUses(node, state);
}
Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* key) {
Node* index;
if (machine()->Is64()) {
// On 64-bit platforms, we need to feed a Word64 index to the Load and
// Store operators. Since LoadElement or StoreElement don't do any bounds
// checking themselves, we can be sure that the {key} was already checked
// and is in valid range, so we can do the further address computation on
// Word64 below, which ideally allows us to fuse the address computation
// with the actual memory access operation on Intel platforms.
index = graph()->NewNode(machine()->ChangeUint32ToUint64(), key);
} else {
index = key;
}
Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* index) {
int const element_size_shift =
ElementSizeLog2Of(access.machine_type.representation());
if (element_size_shift) {
......
......@@ -74,7 +74,7 @@ class MemoryOptimizer final {
static AllocationState const* Closed(AllocationGroup* group, Zone* zone) {
return new (zone) AllocationState(group);
}
static AllocationState const* Open(AllocationGroup* group, int size,
static AllocationState const* Open(AllocationGroup* group, intptr_t size,
Node* top, Zone* zone) {
return new (zone) AllocationState(group, size, top);
}
......@@ -83,17 +83,17 @@ class MemoryOptimizer final {
AllocationGroup* group() const { return group_; }
Node* top() const { return top_; }
int size() const { return size_; }
intptr_t size() const { return size_; }
private:
AllocationState();
explicit AllocationState(AllocationGroup* group);
AllocationState(AllocationGroup* group, int size, Node* top);
AllocationState(AllocationGroup* group, intptr_t size, Node* top);
AllocationGroup* const group_;
// The upper bound of the combined allocated object size on the current path
// (max int if allocation folding is impossible on this path).
int const size_;
intptr_t const size_;
Node* const top_;
DISALLOW_COPY_AND_ASSIGN(AllocationState);
......
......@@ -225,7 +225,9 @@
// Opcodes for VirtuaMachine-level operators.
#define SIMPLIFIED_CHANGE_OP_LIST(V) \
V(ChangeTaggedSignedToInt32) \
V(ChangeTaggedSignedToInt64) \
V(ChangeTaggedToInt32) \
V(ChangeTaggedToInt64) \
V(ChangeTaggedToUint32) \
V(ChangeTaggedToFloat64) \
V(ChangeTaggedToTaggedSigned) \
......@@ -605,6 +607,7 @@
V(TruncateFloat64ToWord32) \
V(ChangeFloat32ToFloat64) \
V(ChangeFloat64ToInt32) \
V(ChangeFloat64ToInt64) \
V(ChangeFloat64ToUint32) \
V(ChangeFloat64ToUint64) \
V(Float64SilenceNaN) \
......
......@@ -611,6 +611,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* ChangeFloat64ToInt32(Node* a) {
return AddNode(machine()->ChangeFloat64ToInt32(), a);
}
Node* ChangeFloat64ToInt64(Node* a) {
return AddNode(machine()->ChangeFloat64ToInt64(), a);
}
Node* ChangeFloat64ToUint32(Node* a) {
return AddNode(machine()->ChangeFloat64ToUint32(), a);
}
......
......@@ -10,6 +10,7 @@
#include "src/code-factory.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/type-cache.h"
#include "src/heap/factory-inl.h"
namespace v8 {
......@@ -124,6 +125,13 @@ bool IsWord(MachineRepresentation rep) {
} // namespace
RepresentationChanger::RepresentationChanger(JSGraph* jsgraph, Isolate* isolate)
: cache_(TypeCache::Get()),
jsgraph_(jsgraph),
isolate_(isolate),
testing_type_errors_(false),
type_error_(false) {}
// Changes representation from {output_rep} to {use_rep}. The {truncation}
// parameter is only used for sanity checking - if the changer cannot figure
// out signedness for the word32->float64 conversion, then we check that the
......@@ -620,6 +628,7 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
case IrOpcode::kInt64Constant:
case IrOpcode::kFloat32Constant:
case IrOpcode::kFloat64Constant:
UNREACHABLE();
......@@ -838,16 +847,78 @@ Node* RepresentationChanger::GetBitRepresentationFor(
Node* RepresentationChanger::GetWord64RepresentationFor(
Node* node, MachineRepresentation output_rep, Type output_type) {
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
case IrOpcode::kInt64Constant:
case IrOpcode::kFloat32Constant:
case IrOpcode::kFloat64Constant:
UNREACHABLE();
break;
case IrOpcode::kNumberConstant: {
double const fv = OpParameter<double>(node->op());
int64_t const iv = static_cast<int64_t>(fv);
if (static_cast<double>(iv) == fv) {
return jsgraph()->Int64Constant(iv);
}
break;
}
default:
break;
}
// Select the correct X -> Word64 operator.
const Operator* op;
if (output_type.Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
return jsgraph()->graph()->NewNode(
jsgraph()->common()->DeadValue(MachineRepresentation::kWord32), node);
jsgraph()->common()->DeadValue(MachineRepresentation::kWord64), node);
} else if (output_rep == MachineRepresentation::kBit) {
return node; // Sloppy comparison -> word64
} else if (output_rep == MachineRepresentation::kWord32) {
if (output_type.Is(Type::Unsigned32())) {
op = machine()->ChangeUint32ToUint64();
} else if (output_type.Is(Type::Signed32())) {
op = machine()->ChangeInt32ToInt64();
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord64);
}
} else if (output_rep == MachineRepresentation::kFloat32) {
if (output_type.Is(cache_.kSafeInteger)) {
// float32 -> float64 -> int64
node = InsertChangeFloat32ToFloat64(node);
op = machine()->ChangeFloat64ToInt64();
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord64);
}
} else if (output_rep == MachineRepresentation::kFloat64) {
if (output_type.Is(cache_.kSafeInteger)) {
op = machine()->ChangeFloat64ToInt64();
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord64);
}
} else if (output_rep == MachineRepresentation::kTaggedSigned) {
if (output_type.Is(Type::SignedSmall())) {
op = simplified()->ChangeTaggedSignedToInt64();
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord64);
}
} else if (CanBeTaggedPointer(output_rep)) {
if (output_type.Is(cache_.kSafeInteger)) {
op = simplified()->ChangeTaggedToInt64();
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord64);
}
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord64);
}
// Can't really convert Word64 to anything else. Purported to be internal.
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord64);
return jsgraph()->graph()->NewNode(op, node);
}
const Operator* RepresentationChanger::Int32OperatorFor(
......
......@@ -12,6 +12,9 @@ namespace v8 {
namespace internal {
namespace compiler {
// Foward declarations.
class TypeCache;
enum IdentifyZeros { kIdentifyZeros, kDistinguishZeros };
class Truncation final {
......@@ -255,11 +258,7 @@ class UseInfo {
// Eagerly folds any representation changes for constants.
class RepresentationChanger final {
public:
RepresentationChanger(JSGraph* jsgraph, Isolate* isolate)
: jsgraph_(jsgraph),
isolate_(isolate),
testing_type_errors_(false),
type_error_(false) {}
RepresentationChanger(JSGraph* jsgraph, Isolate* isolate);
// Changes representation from {output_type} to {use_rep}. The {truncation}
// parameter is only used for sanity checking - if the changer cannot figure
......@@ -286,6 +285,7 @@ class RepresentationChanger final {
}
private:
TypeCache const& cache_;
JSGraph* jsgraph_;
Isolate* isolate_;
......
......@@ -158,6 +158,10 @@ UseInfo UseInfoForBasePointer(const ElementAccess& access) {
return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::Word();
}
UseInfo UseInfoForIndex() {
return UseInfo(MachineType::PointerRepresentation(), Truncation::Word32());
}
void ReplaceEffectControlUses(Node* node, Node* effect, Node* control) {
for (Edge edge : node->use_edges()) {
if (NodeProperties::IsControlEdge(edge)) {
......@@ -2363,13 +2367,11 @@ class RepresentationSelector {
MachineRepresentation::kTaggedPointer);
}
case IrOpcode::kStringCharCodeAt: {
return VisitBinop(node, UseInfo::AnyTagged(),
UseInfo::TruncatingWord32(),
return VisitBinop(node, UseInfo::AnyTagged(), UseInfoForIndex(),
MachineRepresentation::kWord32);
}
case IrOpcode::kStringCodePointAt: {
return VisitBinop(node, UseInfo::AnyTagged(),
UseInfo::TruncatingWord32(),
return VisitBinop(node, UseInfo::AnyTagged(), UseInfoForIndex(),
MachineRepresentation::kTaggedSigned);
}
case IrOpcode::kStringFromSingleCharCode: {
......@@ -2503,7 +2505,7 @@ class RepresentationSelector {
}
case IrOpcode::kAllocate: {
ProcessInput(node, 0, UseInfo::TruncatingWord32());
ProcessInput(node, 0, UseInfoForIndex());
ProcessRemainingInputs(node, 1);
SetOutput(node, MachineRepresentation::kTaggedPointer);
return;
......@@ -2555,8 +2557,7 @@ class RepresentationSelector {
case IrOpcode::kLoadElement: {
if (truncation.IsUnused()) return VisitUnused(node);
ElementAccess access = ElementAccessOf(node->op());
VisitBinop(node, UseInfoForBasePointer(access),
UseInfo::TruncatingWord32(),
VisitBinop(node, UseInfoForBasePointer(access), UseInfoForIndex(),
access.machine_type.representation());
return;
}
......@@ -2576,7 +2577,7 @@ class RepresentationSelector {
access.base_is_tagged, element_representation, access.type,
input_info->representation(), value_node);
ProcessInput(node, 0, UseInfoForBasePointer(access)); // base
ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
ProcessInput(node, 1, UseInfoForIndex()); // index
ProcessInput(node, 2,
TruncatingUseInfoFromRepresentation(
element_representation)); // value
......@@ -2599,8 +2600,8 @@ class RepresentationSelector {
case IrOpcode::kTransitionAndStoreElement: {
Type value_type = TypeOf(node->InputAt(2));
ProcessInput(node, 0, UseInfo::AnyTagged()); // array
ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
ProcessInput(node, 0, UseInfo::AnyTagged()); // array
ProcessInput(node, 1, UseInfoForIndex()); // index
if (value_type.Is(Type::SignedSmall())) {
ProcessInput(node, 2, UseInfo::TruncatingWord32()); // value
......@@ -2635,10 +2636,10 @@ class RepresentationSelector {
case IrOpcode::kLoadTypedElement: {
MachineRepresentation const rep =
MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
ProcessInput(node, 1, UseInfo::AnyTagged()); // base pointer
ProcessInput(node, 2, UseInfo::Word()); // external pointer
ProcessInput(node, 3, UseInfo::TruncatingWord32()); // index
ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
ProcessInput(node, 1, UseInfo::AnyTagged()); // base pointer
ProcessInput(node, 2, UseInfo::Word()); // external pointer
ProcessInput(node, 3, UseInfoForIndex()); // index
ProcessRemainingInputs(node, 4);
SetOutput(node, rep);
return;
......@@ -2646,10 +2647,10 @@ class RepresentationSelector {
case IrOpcode::kLoadDataViewElement: {
MachineRepresentation const rep =
MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
ProcessInput(node, 1, UseInfo::Word()); // external pointer
ProcessInput(node, 2, UseInfo::TruncatingWord32()); // index
ProcessInput(node, 3, UseInfo::Bool()); // little-endian
ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
ProcessInput(node, 1, UseInfo::Word()); // external pointer
ProcessInput(node, 2, UseInfoForIndex()); // index
ProcessInput(node, 3, UseInfo::Bool()); // little-endian
ProcessRemainingInputs(node, 4);
SetOutput(node, rep);
return;
......@@ -2657,10 +2658,10 @@ class RepresentationSelector {
case IrOpcode::kStoreTypedElement: {
MachineRepresentation const rep =
MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
ProcessInput(node, 1, UseInfo::AnyTagged()); // base pointer
ProcessInput(node, 2, UseInfo::Word()); // external pointer
ProcessInput(node, 3, UseInfo::TruncatingWord32()); // index
ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
ProcessInput(node, 1, UseInfo::AnyTagged()); // base pointer
ProcessInput(node, 2, UseInfo::Word()); // external pointer
ProcessInput(node, 3, UseInfoForIndex()); // index
ProcessInput(node, 4,
TruncatingUseInfoFromRepresentation(rep)); // value
ProcessRemainingInputs(node, 5);
......@@ -2672,7 +2673,7 @@ class RepresentationSelector {
MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
ProcessInput(node, 1, UseInfo::Word()); // external pointer
ProcessInput(node, 2, UseInfo::TruncatingWord32()); // index
ProcessInput(node, 2, UseInfoForIndex()); // index
ProcessInput(node, 3,
TruncatingUseInfoFromRepresentation(rep)); // value
ProcessInput(node, 4, UseInfo::Bool()); // little-endian
......
......@@ -721,7 +721,9 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedSignedToInt64, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToInt64, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToTaggedSigned, Operator::kNoProperties, 1, 0) \
......
......@@ -642,7 +642,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* PlainPrimitiveToFloat64();
const Operator* ChangeTaggedSignedToInt32();
const Operator* ChangeTaggedSignedToInt64();
const Operator* ChangeTaggedToInt32();
const Operator* ChangeTaggedToInt64();
const Operator* ChangeTaggedToUint32();
const Operator* ChangeTaggedToFloat64();
const Operator* ChangeTaggedToTaggedSigned();
......
......@@ -1285,6 +1285,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// CheckTypeIs(node, to));
break;
}
case IrOpcode::kChangeTaggedSignedToInt64:
break;
case IrOpcode::kChangeTaggedToInt32: {
// Signed32 /\ Tagged -> Signed32 /\ UntaggedInt32
// TODO(neis): Activate once ChangeRepresentation works in typer.
......@@ -1294,6 +1296,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// CheckTypeIs(node, to));
break;
}
case IrOpcode::kChangeTaggedToInt64:
break;
case IrOpcode::kChangeTaggedToUint32: {
// Unsigned32 /\ Tagged -> Unsigned32 /\ UntaggedInt32
// TODO(neis): Activate once ChangeRepresentation works in typer.
......@@ -1701,6 +1705,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kChangeUint32ToFloat64:
case IrOpcode::kChangeFloat32ToFloat64:
case IrOpcode::kChangeFloat64ToInt32:
case IrOpcode::kChangeFloat64ToInt64:
case IrOpcode::kChangeFloat64ToUint32:
case IrOpcode::kChangeFloat64ToUint64:
case IrOpcode::kFloat64SilenceNaN:
......
......@@ -1273,6 +1273,7 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
V(Float64Sqrt, kSSEFloat64Sqrt) \
V(Float32Sqrt, kSSEFloat32Sqrt) \
V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \
V(ChangeFloat64ToInt64, kSSEFloat64ToInt64) \
V(ChangeFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(1)) \
V(TruncateFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(0)) \
V(ChangeFloat64ToUint64, kSSEFloat64ToUint64) \
......
......@@ -441,7 +441,7 @@ class AllocateDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kRequestedSize)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedPointer(), // result 1
MachineType::Int32()) // kRequestedSize
MachineType::IntPtr()) // kRequestedSize
DECLARE_DESCRIPTOR(AllocateDescriptor, CallInterfaceDescriptor)
};
......
......@@ -6,6 +6,7 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/representation-change.h"
#include "src/compiler/type-cache.h"
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
......@@ -46,6 +47,12 @@ class RepresentationChangerTester : public HandleAndZoneScope,
CHECK_EQ(expected, m.Value());
}
void CheckInt64Constant(Node* n, int64_t expected) {
Int64Matcher m(n);
CHECK(m.HasValue());
CHECK_EQ(expected, m.Value());
}
void CheckUint32Constant(Node* n, uint32_t expected) {
Uint32Matcher m(n);
CHECK(m.HasValue());
......@@ -267,6 +274,18 @@ TEST(ToUint32_constant) {
}
}
TEST(ToInt64_constant) {
RepresentationChangerTester r;
FOR_INT32_INPUTS(i) {
Node* n = r.jsgraph()->Constant(*i);
Node* use = r.Return(n);
Node* c = r.changer()->GetRepresentationFor(
n, MachineRepresentation::kTagged, TypeCache::Get().kSafeInteger, use,
UseInfo(MachineRepresentation::kWord64, Truncation::None()));
r.CheckInt64Constant(c, *i);
}
}
static void CheckChange(IrOpcode::Value expected, MachineRepresentation from,
Type from_type, UseInfo use_info) {
RepresentationChangerTester r;
......@@ -328,6 +347,39 @@ static void CheckChange(IrOpcode::Value expected, MachineRepresentation from,
CHECK_EQ(n, c->InputAt(0));
}
TEST(Word64) {
CheckChange(IrOpcode::kChangeInt32ToInt64, MachineRepresentation::kWord32,
Type::Signed32(), MachineRepresentation::kWord64);
CheckChange(IrOpcode::kChangeUint32ToUint64, MachineRepresentation::kWord32,
Type::Unsigned32(), MachineRepresentation::kWord64);
CheckChange(IrOpcode::kChangeFloat64ToInt64, MachineRepresentation::kFloat64,
Type::Signed32(), MachineRepresentation::kWord64);
CheckChange(IrOpcode::kChangeFloat64ToInt64, MachineRepresentation::kFloat64,
Type::Unsigned32(), MachineRepresentation::kWord64);
CheckChange(IrOpcode::kChangeFloat64ToInt64, MachineRepresentation::kFloat64,
TypeCache::Get().kSafeInteger, MachineRepresentation::kWord64);
CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
IrOpcode::kChangeFloat64ToInt64,
MachineRepresentation::kFloat32, Type::Signed32(),
MachineRepresentation::kWord64);
CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
IrOpcode::kChangeFloat64ToInt64,
MachineRepresentation::kFloat32, Type::Unsigned32(),
MachineRepresentation::kWord64);
CheckChange(IrOpcode::kChangeTaggedToInt64, MachineRepresentation::kTagged,
Type::Signed32(), MachineRepresentation::kWord64);
CheckChange(IrOpcode::kChangeTaggedToInt64, MachineRepresentation::kTagged,
Type::Unsigned32(), MachineRepresentation::kWord64);
CheckChange(IrOpcode::kChangeTaggedToInt64, MachineRepresentation::kTagged,
TypeCache::Get().kSafeInteger, MachineRepresentation::kWord64);
CheckChange(IrOpcode::kChangeTaggedSignedToInt64,
MachineRepresentation::kTaggedSigned, Type::SignedSmall(),
MachineRepresentation::kWord64);
}
TEST(SingleChanges) {
CheckChange(IrOpcode::kChangeTaggedToBit, MachineRepresentation::kTagged,
Type::Boolean(), MachineRepresentation::kBit);
......@@ -523,16 +575,10 @@ TEST(TypeErrors) {
MachineRepresentation::kWord64);
r.CheckTypeError(MachineRepresentation::kTagged, Type::Boolean(),
MachineRepresentation::kWord64);
// Word64 / Word32 shouldn't be implicitly converted.
r.CheckTypeError(MachineRepresentation::kWord64, Type::Internal(),
MachineRepresentation::kWord32);
r.CheckTypeError(MachineRepresentation::kWord32, Type::Number(),
MachineRepresentation::kWord64);
r.CheckTypeError(MachineRepresentation::kWord32, Type::Signed32(),
MachineRepresentation::kWord64);
r.CheckTypeError(MachineRepresentation::kWord32, Type::Unsigned32(),
MachineRepresentation::kWord64);
}
} // namespace compiler
......
......@@ -4174,7 +4174,6 @@ TEST(RunChangeFloat64ToInt32_B) {
}
}
TEST(RunChangeFloat64ToUint32) {
BufferedRawMachineAssemblerTester<uint32_t> m(MachineType::Float64());
m.Return(m.ChangeFloat64ToUint32(m.Parameter(0)));
......@@ -6340,6 +6339,18 @@ TEST(RunCallCFunction9) {
#if V8_TARGET_ARCH_64_BIT
// TODO(titzer): run int64 tests on all platforms when supported.
TEST(RunChangeFloat64ToInt64) {
BufferedRawMachineAssemblerTester<int64_t> m(MachineType::Float64());
m.Return(m.ChangeFloat64ToInt64(m.Parameter(0)));
FOR_INT64_INPUTS(i) {
double input = static_cast<double>(*i);
if (static_cast<int64_t>(input) == *i) {
CHECK_EQ(static_cast<int64_t>(input), m.Call(input));
}
}
}
TEST(RunBitcastInt64ToFloat64) {
int64_t input = 1;
Float64 output;
......
......@@ -344,6 +344,17 @@ TEST_F(MachineOperatorReducerTest, ChangeFloat64ToInt32WithConstant) {
}
}
// -----------------------------------------------------------------------------
// ChangeFloat64ToInt64
TEST_F(MachineOperatorReducerTest, ChangeFloat64ToInt64WithConstant) {
TRACED_FOREACH(int32_t, x, kInt32Values) {
Reduction reduction = Reduce(graph()->NewNode(
machine()->ChangeFloat64ToInt64(), Float64Constant(FastI2D(x))));
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(), IsInt64Constant(x));
}
}
// -----------------------------------------------------------------------------
// ChangeFloat64ToUint32
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment