Commit e06001f2 authored by Leszek Swirski's avatar Leszek Swirski Committed by V8 LUCI CQ

[maglev] Add number/string->index conversion

Add a conversion to int32 index for Numbers and Strings containing
indices, and change the element bounds check / lookup nodes to take an
int32 rather than a Smi. While we're at it, also turn the index node
into an int32 index different depending on its known representation.

Bug: v8:7700, v8:13287
Change-Id: Ie98502d58f789873d42f8801499e78bf777db70f
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3900012
Auto-Submit: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/main@{#83291}
parent 1f329e07
......@@ -1184,7 +1184,32 @@ bool MaglevGraphBuilder::TryBuildMonomorphicElementLoadFromSmiHandler(
DCHECK(!LoadHandler::ConvertHoleBits::decode(handler));
BuildMapCheck(object, map);
BuildCheckSmi(index);
switch (index->properties().value_representation()) {
case ValueRepresentation::kTagged: {
if (CheckedSmiTag* checked_untag = index->TryCast<CheckedSmiTag>()) {
index = checked_untag->input().node();
} else if (SmiConstant* constant = index->TryCast<SmiConstant>()) {
index = GetInt32Constant(constant->value().value());
} else if (NodeInfo::IsSmi(known_node_aspects().GetInfoFor(index))) {
// TODO(leszeks): This could be unchecked.
index = AddNewNode<CheckedSmiUntag>({index});
} else {
index = AddNewNode<CheckedObjectToIndex>({index});
}
break;
}
case ValueRepresentation::kInt32: {
// Already good.
break;
}
case ValueRepresentation::kFloat64: {
// TODO(leszeks): Pass in the index register (probably the
// accumulator), so that we can save this truncation on there as a
// conversion node.
index = AddNewNode<CheckedTruncateFloat64ToInt32>({index});
break;
}
}
if (LoadHandler::IsJsArrayBits::decode(handler)) {
DCHECK(map.IsJSArrayMap());
......@@ -1302,7 +1327,6 @@ void MaglevGraphBuilder::VisitGetKeyedProperty() {
ValueNode* object = LoadRegisterTagged(0);
// TODO(leszeks): We don't need to tag the key if it's an Int32 and a simple
// monomorphic element load.
ValueNode* key = GetAccumulatorTagged();
FeedbackSlot slot = GetSlotOperand(1);
compiler::FeedbackSource feedback_source{feedback(), slot};
......@@ -1329,6 +1353,10 @@ void MaglevGraphBuilder::VisitGetKeyedProperty() {
MaybeObjectHandle handler =
FeedbackNexusForSlot(slot).FindHandlerForMap(map.object());
// Get the accumulator without conversion. TryBuildMonomorphicElementLoad
// will try to pick the best representation.
ValueNode* key = current_interpreter_frame_.accumulator();
if (TryBuildMonomorphicElementLoad(object, key, map, handler)) return;
break;
}
......@@ -1339,6 +1367,7 @@ void MaglevGraphBuilder::VisitGetKeyedProperty() {
// Create a generic store in the fallthrough.
ValueNode* context = GetContext();
ValueNode* key = GetAccumulatorTagged();
SetAccumulator(
AddNewNode<GetKeyedGeneric>({context, object, key}, feedback_source));
}
......
......@@ -122,6 +122,7 @@ class MaglevGraphVerifier {
case Opcode::kCheckString:
case Opcode::kCheckSymbol:
case Opcode::kCheckedInternalizedString:
case Opcode::kCheckedObjectToIndex:
// TODO(victorgomes): Can we check that the input is Boolean?
case Opcode::kBranchIfToBooleanTrue:
case Opcode::kBranchIfRootConstant:
......@@ -177,10 +178,6 @@ class MaglevGraphVerifier {
case Opcode::kGenericLessThan:
case Opcode::kGenericLessThanOrEqual:
case Opcode::kGenericStrictEqual:
case Opcode::kCheckJSArrayBounds:
case Opcode::kCheckJSObjectElementsBounds:
case Opcode::kLoadTaggedElement:
case Opcode::kLoadDoubleElement:
case Opcode::kGetIterator:
case Opcode::kTaggedEqual:
case Opcode::kTaggedNotEqual:
......@@ -276,6 +273,14 @@ class MaglevGraphVerifier {
CheckValueInputIs(node, i, ValueRepresentation::kTagged);
}
break;
case Opcode::kCheckJSArrayBounds:
case Opcode::kCheckJSObjectElementsBounds:
case Opcode::kLoadTaggedElement:
case Opcode::kLoadDoubleElement:
DCHECK_EQ(node->input_count(), 2);
CheckValueInputIs(node, 0, ValueRepresentation::kTagged);
CheckValueInputIs(node, 1, ValueRepresentation::kInt32);
break;
case Opcode::kCallBuiltin: {
CallBuiltin* call_builtin = node->Cast<CallBuiltin>();
auto descriptor =
......
......@@ -181,6 +181,7 @@ void AllocateRaw(MaglevAssembler* masm, RegisterSnapshot& register_snapshot,
// Remove {object} from snapshot, since it is the returned allocated
// HeapObject.
register_snapshot.live_registers.clear(object);
register_snapshot.live_tagged_registers.clear(object);
{
SaveRegisterStateForCall save_register_state(masm, register_snapshot);
using D = AllocateDescriptor;
......@@ -1385,15 +1386,14 @@ void CheckJSArrayBounds::GenerateCode(MaglevAssembler* masm,
Register object = ToRegister(receiver_input());
Register index = ToRegister(index_input());
__ AssertNotSmi(object);
__ AssertSmi(index);
if (v8_flags.debug_code) {
__ CmpObjectType(object, JS_ARRAY_TYPE, kScratchRegister);
__ Assert(equal, AbortReason::kUnexpectedValue);
}
TaggedRegister length(kScratchRegister);
__ LoadAnyTaggedField(length, FieldOperand(object, JSArray::kLengthOffset));
__ cmp_tagged(index, length.reg());
__ SmiUntagField(kScratchRegister,
FieldOperand(object, JSArray::kLengthOffset));
__ cmpl(index, kScratchRegister);
__ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this);
}
......@@ -1407,7 +1407,6 @@ void CheckJSObjectElementsBounds::GenerateCode(MaglevAssembler* masm,
Register object = ToRegister(receiver_input());
Register index = ToRegister(index_input());
__ AssertNotSmi(object);
__ AssertSmi(index);
if (v8_flags.debug_code) {
__ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, kScratchRegister);
......@@ -1418,10 +1417,9 @@ void CheckJSObjectElementsBounds::GenerateCode(MaglevAssembler* masm,
if (v8_flags.debug_code) {
__ AssertNotSmi(kScratchRegister);
}
TaggedRegister length(kScratchRegister);
__ LoadAnyTaggedField(
length, FieldOperand(kScratchRegister, FixedArray::kLengthOffset));
__ cmp_tagged(index, length.reg());
__ SmiUntagField(kScratchRegister,
FieldOperand(kScratchRegister, FixedArray::kLengthOffset));
__ cmpl(index, kScratchRegister);
__ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this);
}
......@@ -1479,6 +1477,81 @@ void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm,
object, this, eager_deopt_info(), map_tmp);
}
void CheckedObjectToIndex::AllocateVreg(MaglevVregAllocationState* vreg_state) {
UseRegister(object_input());
DefineAsRegister(vreg_state, this);
set_double_temporaries_needed(1);
}
void CheckedObjectToIndex::GenerateCode(MaglevAssembler* masm,
const ProcessingState& state) {
Register object = ToRegister(object_input());
Register result_reg = ToRegister(result());
ZoneLabelRef done(masm->compilation_info()->zone());
Condition is_smi = __ CheckSmi(object);
__ JumpToDeferredIf(
NegateCondition(is_smi),
[](MaglevAssembler* masm, Label* return_label, Register object,
Register result_reg, ZoneLabelRef done, CheckedObjectToIndex* node) {
Label is_string;
__ LoadMap(kScratchRegister, object);
__ CmpInstanceTypeRange(kScratchRegister, kScratchRegister,
FIRST_STRING_TYPE, LAST_STRING_TYPE);
__ j(below_equal, &is_string);
__ cmpl(kScratchRegister, Immediate(HEAP_NUMBER_TYPE));
// The IC will go generic if it encounters something other than a
// Number or String key.
__ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotInt32, node);
// Heap Number.
{
DoubleRegister number_value = node->double_temporaries().first();
DoubleRegister converted_back = kScratchDoubleReg;
// Convert the input float64 value to int32.
__ Cvttsd2si(result_reg, number_value);
// Convert that int32 value back to float64.
__ Cvtlsi2sd(converted_back, result_reg);
// Check that the result of the float64->int32->float64 is equal to
// the input (i.e. that the conversion didn't truncate.
__ Ucomisd(number_value, converted_back);
__ j(equal, *done);
__ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32);
}
// String.
__ bind(&is_string);
{
RegisterSnapshot snapshot = node->register_snapshot();
snapshot.live_registers.clear(result_reg);
DCHECK(!snapshot.live_tagged_registers.has(result_reg));
{
SaveRegisterStateForCall save_register_state(masm, snapshot);
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(1);
__ Move(arg_reg_1, object);
__ CallCFunction(
ExternalReference::string_to_array_index_function(), 1);
// No need for safepoint since this is a fast C call.
__ Move(result_reg, kReturnRegister0);
}
__ cmpl(result_reg, Immediate(0));
__ j(greater_equal, *done);
__ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32);
}
},
object, result_reg, done, this);
// If we didn't enter the deferred block, we're a Smi.
if (result_reg == object) {
__ SmiUntag(object);
} else {
__ SmiUntag(result_reg, object);
}
__ bind(*done);
}
void LoadTaggedField::AllocateVreg(MaglevVregAllocationState* vreg_state) {
UseRegister(object_input());
DefineAsRegister(vreg_state, this);
......@@ -1538,20 +1611,9 @@ void LoadTaggedElement::GenerateCode(MaglevAssembler* masm,
__ DecompressAnyTagged(kScratchRegister,
FieldOperand(object, JSObject::kElementsOffset));
}
__ AssertSmi(index);
// Zero out top bits of index reg (these were previously either zero already,
// or the cage base). This technically mutates it, but since it's a Smi, that
// doesn't matter.
__ movl(index, index);
static_assert(kSmiTagSize + kSmiShiftSize < times_tagged_size,
"Folding the Smi shift into the FixedArray entry size shift "
"only works if the shift is small");
__ DecompressAnyTagged(
result_reg,
FieldOperand(kScratchRegister, index,
static_cast<ScaleFactor>(times_tagged_size -
(kSmiTagSize + kSmiShiftSize)),
FixedArray::kHeaderSize));
result_reg, FieldOperand(kScratchRegister, index, times_tagged_size,
FixedArray::kHeaderSize));
}
void LoadDoubleElement::AllocateVreg(MaglevVregAllocationState* vreg_state) {
......@@ -1579,19 +1641,8 @@ void LoadDoubleElement::GenerateCode(MaglevAssembler* masm,
__ DecompressAnyTagged(kScratchRegister,
FieldOperand(object, JSObject::kElementsOffset));
}
__ AssertSmi(index);
// Zero out top bits of index reg (these were previously either zero already,
// or the cage base). This technically mutates it, but since it's a Smi, that
// doesn't matter.
__ movl(index, index);
static_assert(kSmiTagSize + kSmiShiftSize < times_8,
"Folding the Smi shift into the FixedArray entry size shift "
"only works if the shift is small");
__ Movsd(result_reg,
FieldOperand(kScratchRegister, index,
static_cast<ScaleFactor>(times_8 -
(kSmiTagSize + kSmiShiftSize)),
FixedDoubleArray::kHeaderSize));
__ Movsd(result_reg, FieldOperand(kScratchRegister, index, times_8,
FixedDoubleArray::kHeaderSize));
}
void StoreTaggedFieldNoWriteBarrier::AllocateVreg(
......
......@@ -161,6 +161,7 @@ class CompactInterpreterFrameState;
V(CheckedSmiTag) \
V(CheckedSmiUntag) \
V(CheckedInternalizedString) \
V(CheckedObjectToIndex) \
V(ChangeInt32ToFloat64) \
V(CheckedTruncateFloat64ToInt32) \
V(Float64Box) \
......@@ -2775,6 +2776,23 @@ class CheckedInternalizedString
const CheckType check_type_;
};
class CheckedObjectToIndex
: public FixedInputValueNodeT<1, CheckedObjectToIndex> {
using Base = FixedInputValueNodeT<1, CheckedObjectToIndex>;
public:
explicit CheckedObjectToIndex(uint64_t bitfield) : Base(bitfield) {}
static constexpr OpProperties kProperties =
OpProperties::EagerDeopt() | OpProperties::Int32() |
OpProperties::DeferredCall() | OpProperties::ConversionNode();
static constexpr int kObjectIndex = 0;
Input& object_input() { return Node::input(kObjectIndex); }
DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS()
};
class GetTemplateObject : public FixedInputValueNodeT<1, GetTemplateObject> {
using Base = FixedInputValueNodeT<1, GetTemplateObject>;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment