Commit 08c76f21 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[ptr-compr][csa] Use TaggedIndex nodes in IC builtins

This CL simplifies IC code since we no longer need to keep
feedback slot indices in both Smi and IntPtr form and as
a result it should improve overall performance of --no-opt
mode on Octane by ~1%.

Bug: v8:10047
Change-Id: Ib717697cdb805c9f93286e9c62ee8a63361d3560
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1965586
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Reviewed-by: 's avatarRoss McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#66585}
parent 578c3a3b
......@@ -29,6 +29,8 @@ type Tagged generates 'TNode<MaybeObject>' constexpr 'MaybeObject';
type StrongTagged extends Tagged
generates 'TNode<Object>' constexpr 'ObjectPtr';
type Smi extends StrongTagged generates 'TNode<Smi>' constexpr 'Smi';
type TaggedIndex extends StrongTagged
generates 'TNode<TaggedIndex>' constexpr 'TaggedIndex';
// A possibly cleared weak pointer with a bit pattern that distinguishes it from
// strong HeapObject pointers and Smi values.
type WeakHeapObject extends Tagged;
......@@ -498,7 +500,8 @@ extern transitioning builtin HasProperty(implicit context: Context)(
JSAny, JSAny): Boolean;
extern transitioning macro HasProperty_Inline(implicit context: Context)(
JSReceiver, JSAny): Boolean;
extern builtin LoadIC(Context, JSAny, JSAny, Smi, FeedbackVector): JSAny;
extern builtin LoadIC(
Context, JSAny, JSAny, TaggedIndex, FeedbackVector): JSAny;
extern macro CollectCallFeedback(
JSAny, Context, Undefined | FeedbackVector, uintptr);
......@@ -880,6 +883,10 @@ macro SmiUntag<T: type>(value: SmiTagged<T>): T {
return %RawDownCast<T>(Unsigned(SmiToInt32(Convert<Smi>(value))));
}
extern macro SmiToInt32(Smi): int32;
extern macro TaggedIndexToIntPtr(TaggedIndex): intptr;
extern macro IntPtrToTaggedIndex(intptr): TaggedIndex;
extern macro TaggedIndexToSmi(TaggedIndex): Smi;
extern macro SmiToTaggedIndex(Smi): TaggedIndex;
extern macro RoundIntPtrToFloat64(intptr): float64;
extern macro ChangeFloat32ToFloat64(float32): float64;
extern macro ChangeNumberToFloat64(Number): float64;
......
......@@ -284,7 +284,7 @@ TF_BUILTIN(FastNewFunctionContextFunction, ConstructorBuiltinsAssembler) {
}
TNode<JSRegExp> ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral(
TNode<HeapObject> maybe_feedback_vector, TNode<UintPtrT> slot,
TNode<HeapObject> maybe_feedback_vector, TNode<TaggedIndex> slot,
TNode<Object> pattern, TNode<Smi> flags, TNode<Context> context) {
Label call_runtime(this, Label::kDeferred), end(this);
......@@ -311,8 +311,7 @@ TNode<JSRegExp> ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral(
BIND(&call_runtime);
{
result = CAST(CallRuntime(Runtime::kCreateRegExpLiteral, context,
maybe_feedback_vector, SmiTag(Signed(slot)),
pattern, flags));
maybe_feedback_vector, slot, pattern, flags));
Goto(&end);
}
......@@ -323,7 +322,7 @@ TNode<JSRegExp> ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral(
TF_BUILTIN(CreateRegExpLiteral, ConstructorBuiltinsAssembler) {
TNode<HeapObject> maybe_feedback_vector =
CAST(Parameter(Descriptor::kFeedbackVector));
TNode<UintPtrT> slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot)));
TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Object> pattern = CAST(Parameter(Descriptor::kPattern));
TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
......@@ -333,7 +332,7 @@ TF_BUILTIN(CreateRegExpLiteral, ConstructorBuiltinsAssembler) {
}
TNode<JSArray> ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral(
TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot,
TNode<Context> context, Label* call_runtime,
AllocationSiteMode allocation_site_mode) {
Label zero_capacity(this), cow_elements(this), fast_elements(this),
......@@ -356,7 +355,7 @@ TNode<JSArray> ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral(
TF_BUILTIN(CreateShallowArrayLiteral, ConstructorBuiltinsAssembler) {
TNode<FeedbackVector> feedback_vector =
CAST(Parameter(Descriptor::kFeedbackVector));
TNode<UintPtrT> slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot)));
TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<ArrayBoilerplateDescription> constant_elements =
CAST(Parameter(Descriptor::kConstantElements));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
......@@ -371,13 +370,12 @@ TF_BUILTIN(CreateShallowArrayLiteral, ConstructorBuiltinsAssembler) {
int const flags =
AggregateLiteral::kDisableMementos | AggregateLiteral::kIsShallow;
Return(CallRuntime(Runtime::kCreateArrayLiteral, context, feedback_vector,
SmiTag(Signed(slot)), constant_elements,
SmiConstant(flags)));
slot, constant_elements, SmiConstant(flags)));
}
}
TNode<JSArray> ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot,
TNode<Context> context) {
// Array literals always have a valid AllocationSite to properly track
// elements transitions.
......@@ -395,8 +393,10 @@ TNode<JSArray> ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
// TODO(cbruni): create the AllocationSite in CSA.
BIND(&initialize_allocation_site);
{
allocation_site =
CreateAllocationSiteInFeedbackVector(feedback_vector, slot);
allocation_site = CreateAllocationSiteInFeedbackVector(
feedback_vector,
// TODO(v8:10047): pass slot as TaggedIndex here
Unsigned(TaggedIndexToIntPtr(slot)));
Goto(&create_empty_array);
}
......@@ -421,7 +421,7 @@ TNode<JSArray> ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral(
TF_BUILTIN(CreateEmptyArrayLiteral, ConstructorBuiltinsAssembler) {
TNode<FeedbackVector> feedback_vector =
CAST(Parameter(Descriptor::kFeedbackVector));
TNode<UintPtrT> slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot)));
TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<JSArray> result =
EmitCreateEmptyArrayLiteral(feedback_vector, slot, context);
......@@ -429,7 +429,7 @@ TF_BUILTIN(CreateEmptyArrayLiteral, ConstructorBuiltinsAssembler) {
}
TNode<HeapObject> ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot,
Label* call_runtime) {
TNode<Object> maybe_allocation_site =
CAST(LoadFeedbackVectorSlot(feedback_vector, slot));
......@@ -609,7 +609,7 @@ TF_BUILTIN(CreateShallowObjectLiteral, ConstructorBuiltinsAssembler) {
Label call_runtime(this);
TNode<FeedbackVector> feedback_vector =
CAST(Parameter(Descriptor::kFeedbackVector));
TNode<UintPtrT> slot = Unsigned(SmiUntag(Parameter(Descriptor::kSlot)));
TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<HeapObject> copy =
EmitCreateShallowObjectLiteral(feedback_vector, slot, &call_runtime);
Return(copy);
......@@ -619,8 +619,8 @@ TF_BUILTIN(CreateShallowObjectLiteral, ConstructorBuiltinsAssembler) {
CAST(Parameter(Descriptor::kObjectBoilerplateDescription));
TNode<Smi> flags = CAST(Parameter(Descriptor::kFlags));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TailCallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector,
SmiTag(Signed(slot)), object_boilerplate_description, flags);
TailCallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector, slot,
object_boilerplate_description, flags);
}
// Used by the CreateEmptyObjectLiteral bytecode and the Object constructor.
......
......@@ -21,20 +21,20 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler {
ScopeType scope_type);
TNode<JSRegExp> EmitCreateRegExpLiteral(
TNode<HeapObject> maybe_feedback_vector, TNode<UintPtrT> slot,
TNode<HeapObject> maybe_feedback_vector, TNode<TaggedIndex> slot,
TNode<Object> pattern, TNode<Smi> flags, TNode<Context> context);
TNode<JSArray> EmitCreateShallowArrayLiteral(
TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot,
TNode<Context> context, Label* call_runtime,
AllocationSiteMode allocation_site_mode);
TNode<JSArray> EmitCreateEmptyArrayLiteral(
TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot,
TNode<Context> context);
TNode<HeapObject> EmitCreateShallowObjectLiteral(
TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot,
TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot,
Label* call_runtime);
TNode<JSObject> EmitCreateEmptyObjectLiteral(TNode<Context> context);
......
......@@ -440,13 +440,15 @@ TF_BUILTIN(GetIteratorWithFeedbackLazyDeoptContinuation,
IteratorBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
TNode<Smi> callSlot = CAST(Parameter(Descriptor::kCallSlot));
// TODO(v8:10047): Use TaggedIndex here once TurboFan supports it.
TNode<Smi> call_slot_smi = CAST(Parameter(Descriptor::kCallSlot));
TNode<TaggedIndex> call_slot = SmiToTaggedIndex(call_slot_smi);
TNode<FeedbackVector> feedback = CAST(Parameter(Descriptor::kFeedback));
TNode<Object> iteratorMethod = CAST(Parameter(Descriptor::kResult));
TNode<Object> iterator_method = CAST(Parameter(Descriptor::kResult));
TNode<Object> result =
CallBuiltin(Builtins::kCallIteratorWithFeedback, context, receiver,
iteratorMethod, callSlot, feedback);
iterator_method, call_slot, feedback);
Return(result);
}
......
......@@ -168,6 +168,12 @@ Convert<intptr, Smi>(s: Smi): intptr {
Convert<uintptr, PositiveSmi>(ps: PositiveSmi): uintptr {
return Unsigned(SmiUntag(ps));
}
Convert<intptr, TaggedIndex>(ti: TaggedIndex): intptr {
return TaggedIndexToIntPtr(ti);
}
Convert<TaggedIndex, intptr>(i: intptr): TaggedIndex {
return IntPtrToTaggedIndex(i);
}
Convert<intptr, uintptr>(ui: uintptr): intptr {
const i = Signed(ui);
assert(i >= 0);
......
......@@ -52,8 +52,8 @@ namespace iterator {
Context)(JSAny);
transitioning builtin GetIteratorWithFeedback(
context: Context, receiver: JSAny, loadSlot: Smi, callSlot: Smi,
feedback: Undefined|FeedbackVector): JSAny {
context: Context, receiver: JSAny, loadSlot: TaggedIndex,
callSlot: TaggedIndex, feedback: Undefined|FeedbackVector): JSAny {
let iteratorMethod: JSAny;
typeswitch (feedback) {
case (Undefined): {
......@@ -64,8 +64,10 @@ namespace iterator {
context, receiver, IteratorSymbolConstant(), loadSlot, feedback);
}
}
// TODO(v8:10047): Use TaggedIndex here once TurboFan supports it.
const callSlotSmi: Smi = TaggedIndexToSmi(callSlot);
return CallIteratorWithFeedback(
context, receiver, iteratorMethod, callSlot, feedback);
context, receiver, iteratorMethod, callSlotSmi, feedback);
}
transitioning builtin CallIteratorWithFeedback(
......
......@@ -772,6 +772,37 @@ TNode<TaggedIndex> CodeStubAssembler::IntPtrToTaggedIndex(
BitcastWordToTaggedSigned(WordShl(value, IntPtrConstant(kSmiTagSize))));
}
TNode<Smi> CodeStubAssembler::TaggedIndexToSmi(TNode<TaggedIndex> value) {
if (SmiValuesAre32Bits()) {
DCHECK_EQ(kSmiShiftSize, 31);
return BitcastWordToTaggedSigned(
WordShl(BitcastTaggedToWordForTagAndSmiBits(value),
IntPtrConstant(kSmiShiftSize)));
}
DCHECK(SmiValuesAre31Bits());
DCHECK_EQ(kSmiShiftSize, 0);
return ReinterpretCast<Smi>(value);
}
TNode<TaggedIndex> CodeStubAssembler::SmiToTaggedIndex(TNode<Smi> value) {
if (kSystemPointerSize == kInt32Size) {
return ReinterpretCast<TaggedIndex>(value);
}
if (SmiValuesAre32Bits()) {
DCHECK_EQ(kSmiShiftSize, 31);
return ReinterpretCast<TaggedIndex>(BitcastWordToTaggedSigned(
WordSar(BitcastTaggedToWordForTagAndSmiBits(value),
IntPtrConstant(kSmiShiftSize))));
}
DCHECK(SmiValuesAre31Bits());
DCHECK_EQ(kSmiShiftSize, 0);
// Just sign-extend the lower 32 bits.
TNode<Int32T> raw =
TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(value));
return ReinterpretCast<TaggedIndex>(
BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(raw)));
}
TNode<Smi> CodeStubAssembler::NormalizeSmiIndex(TNode<Smi> smi_index) {
if (COMPRESS_POINTERS_BOOL) {
TNode<Int32T> raw =
......@@ -2494,7 +2525,7 @@ TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot(
}
template TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot(
TNode<FeedbackVector> feedback_vector, TNode<Smi> slot,
TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot,
int additional_offset);
template TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot(
TNode<FeedbackVector> feedback_vector, TNode<IntPtrT> slot,
......@@ -9277,17 +9308,18 @@ TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex(
TNode<TIndex> index_node, ElementsKind kind, int base_size) {
// TODO(v8:9708): Remove IntPtrT variant in favor of UintPtrT.
static_assert(std::is_same<TIndex, Smi>::value ||
std::is_same<TIndex, TaggedIndex>::value ||
std::is_same<TIndex, IntPtrT>::value ||
std::is_same<TIndex, UintPtrT>::value,
"Only Smi, UintPtrT or IntPtrT index nodes are allowed");
int element_size_shift = ElementsKindToShiftSize(kind);
int element_size = 1 << element_size_shift;
int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
intptr_t index = 0;
TNode<IntPtrT> intptr_index_node;
bool constant_index = false;
if (std::is_same<TIndex, Smi>::value) {
TNode<Smi> smi_index_node = ReinterpretCast<Smi>(index_node);
int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
element_size_shift -= kSmiShiftBits;
Smi smi_index;
constant_index = ToSmiConstant(smi_index_node, &smi_index);
......@@ -9299,6 +9331,12 @@ TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex(
}
}
intptr_index_node = BitcastTaggedToWordForTagAndSmiBits(smi_index_node);
} else if (std::is_same<TIndex, TaggedIndex>::value) {
TNode<TaggedIndex> tagged_index_node =
ReinterpretCast<TaggedIndex>(index_node);
element_size_shift -= kSmiTagSize;
intptr_index_node = BitcastTaggedToWordForTagAndSmiBits(tagged_index_node);
constant_index = ToIntPtrConstant(intptr_index_node, &index);
} else {
intptr_index_node = ReinterpretCast<IntPtrT>(index_node);
constant_index = ToIntPtrConstant(intptr_index_node, &index);
......@@ -9324,6 +9362,9 @@ CodeStubAssembler::ElementOffsetFromIndex<Smi>(TNode<Smi> index_node,
ElementsKind kind,
int base_size);
template V8_EXPORT_PRIVATE TNode<IntPtrT>
CodeStubAssembler::ElementOffsetFromIndex<TaggedIndex>(
TNode<TaggedIndex> index_node, ElementsKind kind, int base_size);
template V8_EXPORT_PRIVATE TNode<IntPtrT>
CodeStubAssembler::ElementOffsetFromIndex<IntPtrT>(TNode<IntPtrT> index_node,
ElementsKind kind,
int base_size);
......
......@@ -389,6 +389,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<IntPtrT> TaggedIndexToIntPtr(TNode<TaggedIndex> value);
TNode<TaggedIndex> IntPtrToTaggedIndex(TNode<IntPtrT> value);
// TODO(v8:10047): Get rid of these convertions eventually.
TNode<Smi> TaggedIndexToSmi(TNode<TaggedIndex> value);
TNode<TaggedIndex> SmiToTaggedIndex(TNode<Smi> value);
// Pointer compression specific. Returns true if the upper 32 bits of a Smi
// contain the sign of a lower 32 bits (i.e. not corrupted) so that the Smi
......
......@@ -11,6 +11,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/linkage.h"
#include "src/compiler/schedule.h"
#include "src/objects/tagged-index.h"
namespace v8 {
namespace internal {
......@@ -308,6 +309,19 @@ class OperandGenerator {
return Constant(OpParameter<int32_t>(node->op()));
case IrOpcode::kInt64Constant:
return Constant(OpParameter<int64_t>(node->op()));
case IrOpcode::kTaggedIndexConstant: {
// Unencoded index value.
intptr_t value =
static_cast<intptr_t>(OpParameter<int32_t>(node->op()));
DCHECK(TaggedIndex::IsValid(value));
// Generate it as 32/64-bit constant in a tagged form.
Address tagged_index = TaggedIndex::FromIntptr(value).ptr();
if (kSystemPointerSize == kInt32Size) {
return Constant(static_cast<int32_t>(tagged_index));
} else {
return Constant(static_cast<int64_t>(tagged_index));
}
}
case IrOpcode::kFloat32Constant:
return Constant(OpParameter<float>(node->op()));
case IrOpcode::kRelocatableInt32Constant:
......
......@@ -1335,6 +1335,7 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitProjection(node);
case IrOpcode::kInt32Constant:
case IrOpcode::kInt64Constant:
case IrOpcode::kTaggedIndexConstant:
case IrOpcode::kExternalConstant:
case IrOpcode::kRelocatableInt32Constant:
case IrOpcode::kRelocatableInt64Constant:
......
......@@ -33,6 +33,10 @@ class CommonNodeCache final {
return int64_constants_.Find(zone(), value);
}
Node** FindTaggedIndexConstant(int32_t value) {
return tagged_index_constants_.Find(zone(), value);
}
Node** FindFloat32Constant(float value) {
// We canonicalize float constants at the bit representation level.
return float32_constants_.Find(zone(), bit_cast<int32_t>(value));
......@@ -74,6 +78,7 @@ class CommonNodeCache final {
private:
Int32NodeCache int32_constants_;
Int64NodeCache int64_constants_;
Int32NodeCache tagged_index_constants_;
Int32NodeCache float32_constants_;
Int64NodeCache float64_constants_;
IntPtrNodeCache external_constants_;
......
......@@ -1158,6 +1158,13 @@ const Operator* CommonOperatorBuilder::Int64Constant(int64_t value) {
value); // parameter
}
const Operator* CommonOperatorBuilder::TaggedIndexConstant(int32_t value) {
return new (zone()) Operator1<int32_t>( // --
IrOpcode::kTaggedIndexConstant, Operator::kPure, // opcode
"TaggedIndexConstant", // name
0, 0, 0, 1, 0, 0, // counts
value); // parameter
}
const Operator* CommonOperatorBuilder::Float32Constant(volatile float value) {
return new (zone()) Operator1<float>( // --
......
......@@ -491,6 +491,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Int32Constant(int32_t);
const Operator* Int64Constant(int64_t);
const Operator* TaggedIndexConstant(int32_t value);
const Operator* Float32Constant(volatile float);
const Operator* Float64Constant(volatile double);
const Operator* ExternalConstant(const ExternalReference&);
......
......@@ -168,7 +168,8 @@ void JSGenericLowering::LowerJSLoadProperty(Node* node) {
const PropertyAccess& p = PropertyAccessOf(node->op());
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable = Builtins::CallableFor(
isolate(), ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
......@@ -198,7 +199,8 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
ReplaceWithStubCall(node, callable, flags);
return;
}
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable = Builtins::CallableFor(
isolate(), ShouldUseMegamorphicLoadBuiltin(p.feedback(), broker())
......@@ -222,7 +224,8 @@ void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.name()));
node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable = CodeFactory::LoadGlobalIC(isolate(), p.typeof_mode());
ReplaceWithStubCall(node, callable, flags);
......@@ -252,7 +255,8 @@ void JSGenericLowering::LowerJSStoreProperty(Node* node) {
PropertyAccess const& p = PropertyAccessOf(node->op());
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kKeyedStoreICTrampoline);
......@@ -276,7 +280,8 @@ void JSGenericLowering::LowerJSStoreNamed(Node* node) {
ReplaceWithRuntimeCall(node, Runtime::kSetNamedProperty);
return;
}
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kStoreICTrampoline);
......@@ -295,7 +300,8 @@ void JSGenericLowering::LowerJSStoreNamedOwn(Node* node) {
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable = CodeFactory::StoreOwnIC(isolate());
ReplaceWithStubCall(node, callable, flags);
......@@ -313,7 +319,8 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.name()));
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kStoreGlobalICTrampoline);
......@@ -332,7 +339,7 @@ void JSGenericLowering::LowerJSStoreDataPropertyInLiteral(Node* node) {
RelaxControls(node);
node->InsertInputs(zone(), 4, 2);
node->ReplaceInput(4, jsgraph()->HeapConstant(p.feedback().vector));
node->ReplaceInput(5, jsgraph()->SmiConstant(p.feedback().index()));
node->ReplaceInput(5, jsgraph()->TaggedIndexConstant(p.feedback().index()));
ReplaceWithRuntimeCall(node, Runtime::kDefineDataPropertyInLiteral);
}
......@@ -342,7 +349,8 @@ void JSGenericLowering::LowerJSStoreInArrayLiteral(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
FeedbackParameter const& p = FeedbackParameterOf(node->op());
RelaxControls(node);
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
node->InsertInput(zone(), 3,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->InsertInput(zone(), 4, jsgraph()->HeapConstant(p.feedback().vector));
ReplaceWithStubCall(node, callable, flags);
}
......@@ -550,7 +558,8 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector));
node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
// Use the CreateShallowArrayLiteratlr builtin only for shallow boilerplates
......@@ -574,7 +583,8 @@ void JSGenericLowering::LowerJSCreateEmptyLiteralArray(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
FeedbackParameter const& p = FeedbackParameterOf(node->op());
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector));
node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->RemoveInput(4); // control
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kCreateEmptyArrayLiteral);
......@@ -592,7 +602,8 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector));
node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
......@@ -615,7 +626,8 @@ void JSGenericLowering::LowerJSCloneObject(Node* node) {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kCloneObjectIC);
node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.flags()));
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
node->InsertInput(zone(), 2,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->InsertInput(zone(), 3, jsgraph()->HeapConstant(p.feedback().vector));
ReplaceWithStubCall(node, callable, flags);
}
......@@ -630,7 +642,8 @@ void JSGenericLowering::LowerJSCreateLiteralRegExp(Node* node) {
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kCreateRegExpLiteral);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.feedback().vector));
node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
node->InsertInput(zone(), 1,
jsgraph()->TaggedIndexConstant(p.feedback().index()));
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
ReplaceWithStubCall(node, callable, flags);
......
......@@ -1375,6 +1375,7 @@ Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) {
javascript()->LoadNamed(iterator_symbol, p.loadFeedback());
// Lazy deopt of the load iterator property
// TODO(v8:10047): Use TaggedIndexConstant here once deoptimizer supports it.
Node* call_slot = jsgraph()->SmiConstant(p.callFeedback().slot.ToInt());
Node* call_feedback = jsgraph()->HeapConstant(p.callFeedback().vector);
Node* lazy_deopt_parameters[] = {receiver, call_slot, call_feedback};
......
......@@ -32,6 +32,15 @@ Node* MachineGraph::IntPtrConstant(intptr_t value) {
: Int64Constant(static_cast<int64_t>(value));
}
Node* MachineGraph::TaggedIndexConstant(intptr_t value) {
int32_t value32 = static_cast<int32_t>(value);
Node** loc = cache_.FindTaggedIndexConstant(value32);
if (*loc == nullptr) {
*loc = graph()->NewNode(common()->TaggedIndexConstant(value32));
}
return *loc;
}
Node* MachineGraph::RelocatableInt32Constant(int32_t value,
RelocInfo::Mode rmode) {
Node** loc = cache_.FindRelocatableInt32Constant(
......
......@@ -44,6 +44,8 @@ class V8_EXPORT_PRIVATE MachineGraph : public NON_EXPORTED_BASE(ZoneObject) {
// constants is probably not serializable.
Node* IntPtrConstant(intptr_t value);
Node* TaggedIndexConstant(intptr_t value);
Node* RelocatableInt32Constant(int32_t value, RelocInfo::Mode rmode);
Node* RelocatableInt64Constant(int64_t value, RelocInfo::Mode rmode);
Node* RelocatableIntPtrConstant(intptr_t value, RelocInfo::Mode rmode);
......
......@@ -37,6 +37,7 @@
#define CONSTANT_OP_LIST(V) \
V(Int32Constant) \
V(Int64Constant) \
V(TaggedIndexConstant) \
V(Float32Constant) \
V(Float64Constant) \
V(ExternalConstant) \
......
......@@ -770,6 +770,8 @@ Type Typer::Visitor::TypeInt32Constant(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeInt64Constant(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeTaggedIndexConstant(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeRelocatableInt32Constant(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeRelocatableInt64Constant(Node* node) { UNREACHABLE(); }
......
......@@ -406,6 +406,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
}
case IrOpcode::kInt32Constant: // TODO(turbofan): rename Word32Constant?
case IrOpcode::kInt64Constant: // TODO(turbofan): rename Word64Constant?
case IrOpcode::kTaggedIndexConstant:
case IrOpcode::kFloat32Constant:
case IrOpcode::kFloat64Constant:
case IrOpcode::kRelocatableInt32Constant:
......
......@@ -9,6 +9,7 @@
#include "src/handles/handles-inl.h"
#include "src/objects/objects-inl.h" // TODO(jkummerow): Just smi-inl.h.
#include "src/objects/tagged-index.h"
namespace v8 {
namespace internal {
......@@ -18,6 +19,12 @@ int Arguments<T>::smi_at(int index) const {
return Smi::ToInt(Object(*address_of_arg_at(index)));
}
template <ArgumentsType T>
int Arguments<T>::tagged_index_at(int index) const {
Address raw = *address_of_arg_at(index);
return static_cast<int>(TaggedIndex(raw).value());
}
template <ArgumentsType T>
double Arguments<T>::number_at(int index) const {
return (*this)[index].Number();
......
......@@ -47,6 +47,8 @@ class Arguments {
inline int smi_at(int index) const;
inline int tagged_index_at(int index) const;
inline double number_at(int index) const;
inline void set_at(int index, Object value) {
......
This diff is collapsed.
......@@ -71,7 +71,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
struct LoadICParameters {
LoadICParameters(TNode<Context> context,
base::Optional<TNode<Object>> receiver, TNode<Object> name,
TNode<Smi> slot, TNode<HeapObject> vector,
TNode<TaggedIndex> slot, TNode<HeapObject> vector,
base::Optional<TNode<Object>> holder = base::nullopt)
: context_(context),
receiver_(receiver),
......@@ -91,7 +91,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
TNode<Context> context() const { return context_; }
TNode<Object> receiver() const { return receiver_.value(); }
TNode<Object> name() const { return name_; }
TNode<Smi> slot() const { return slot_; }
TNode<TaggedIndex> slot() const { return slot_; }
TNode<HeapObject> vector() const { return vector_; }
TNode<Object> holder() const { return holder_.value(); }
bool receiver_is_null() const { return !receiver_.has_value(); }
......@@ -100,14 +100,14 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
TNode<Context> context_;
base::Optional<TNode<Object>> receiver_;
TNode<Object> name_;
TNode<Smi> slot_;
TNode<TaggedIndex> slot_;
TNode<HeapObject> vector_;
base::Optional<TNode<Object>> holder_;
};
struct LazyLoadICParameters {
LazyLoadICParameters(LazyNode<Context> context, TNode<Object> receiver,
LazyNode<Object> name, LazyNode<Smi> slot,
LazyNode<Object> name, LazyNode<TaggedIndex> slot,
TNode<HeapObject> vector,
base::Optional<TNode<Object>> holder = base::nullopt)
: context_(context),
......@@ -129,7 +129,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
TNode<Context> context() const { return context_(); }
TNode<Object> receiver() const { return receiver_; }
TNode<Object> name() const { return name_(); }
TNode<Smi> slot() const { return slot_(); }
TNode<TaggedIndex> slot() const { return slot_(); }
TNode<HeapObject> vector() const { return vector_; }
TNode<Object> holder() const { return holder_; }
......@@ -137,14 +137,13 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
LazyNode<Context> context_;
TNode<Object> receiver_;
LazyNode<Object> name_;
LazyNode<Smi> slot_;
LazyNode<TaggedIndex> slot_;
TNode<HeapObject> vector_;
TNode<Object> holder_;
};
void LoadGlobalIC(TNode<HeapObject> maybe_feedback_vector,
const LazyNode<Smi>& lazy_smi_slot,
const LazyNode<UintPtrT>& lazy_slot,
const LazyNode<TaggedIndex>& lazy_slot,
const LazyNode<Context>& lazy_context,
const LazyNode<Name>& lazy_name, TypeofMode typeof_mode,
ExitPoint* exit_point);
......@@ -162,8 +161,8 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
struct StoreICParameters : public LoadICParameters {
StoreICParameters(TNode<Context> context,
base::Optional<TNode<Object>> receiver,
TNode<Object> name, TNode<Object> value, TNode<Smi> slot,
TNode<HeapObject> vector)
TNode<Object> name, TNode<Object> value,
TNode<TaggedIndex> slot, TNode<HeapObject> vector)
: LoadICParameters(context, receiver, name, slot, vector),
value_(value) {}
......@@ -243,9 +242,12 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
// IC dispatcher behavior.
// Checks monomorphic case. Returns {feedback} entry of the vector.
TNode<MaybeObject> TryMonomorphicCase(
TNode<Smi> slot, TNode<FeedbackVector> vector, TNode<Map> receiver_map,
Label* if_handler, TVariable<MaybeObject>* var_handler, Label* if_miss);
TNode<MaybeObject> TryMonomorphicCase(TNode<TaggedIndex> slot,
TNode<FeedbackVector> vector,
TNode<Map> receiver_map,
Label* if_handler,
TVariable<MaybeObject>* var_handler,
Label* if_miss);
void HandlePolymorphicCase(TNode<Map> receiver_map,
TNode<WeakFixedArray> feedback, Label* if_handler,
TVariable<MaybeObject>* var_handler,
......@@ -309,14 +311,13 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
// LoadGlobalIC implementation.
void LoadGlobalIC_TryPropertyCellCase(TNode<FeedbackVector> vector,
TNode<UintPtrT> slot,
TNode<TaggedIndex> slot,
const LazyNode<Context>& lazy_context,
ExitPoint* exit_point,
Label* try_handler, Label* miss);
void LoadGlobalIC_TryHandlerCase(TNode<FeedbackVector> vector,
TNode<UintPtrT> slot,
const LazyNode<Smi>& lazy_smi_slot,
TNode<TaggedIndex> slot,
const LazyNode<Context>& lazy_context,
const LazyNode<Name>& lazy_name,
TypeofMode typeof_mode,
......
......@@ -2196,7 +2196,7 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
// Runtime functions don't follow the IC's calling convention.
Handle<Object> receiver = args.at(0);
Handle<Name> key = args.at<Name>(1);
Handle<Smi> slot = args.at<Smi>(2);
Handle<TaggedIndex> slot = args.at<TaggedIndex>(2);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(3);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
......@@ -2248,7 +2248,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
// Runtime functions don't follow the IC's calling convention.
Handle<JSGlobalObject> global = isolate->global_object();
Handle<String> name = args.at<String>(0);
Handle<Smi> slot = args.at<Smi>(1);
Handle<TaggedIndex> slot = args.at<TaggedIndex>(1);
Handle<HeapObject> maybe_vector = args.at<HeapObject>(2);
CONVERT_INT32_ARG_CHECKED(typeof_value, 3);
TypeofMode typeof_mode = static_cast<TypeofMode>(typeof_value);
......@@ -2276,7 +2276,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
Handle<Smi> slot = args.at<Smi>(1);
Handle<TaggedIndex> slot = args.at<TaggedIndex>(1);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind kind = vector->GetKind(vector_slot);
......@@ -2293,7 +2293,7 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
// Runtime functions don't follow the IC's calling convention.
Handle<Object> receiver = args.at(0);
Handle<Object> key = args.at(1);
Handle<Smi> slot = args.at<Smi>(2);
Handle<TaggedIndex> slot = args.at<TaggedIndex>(2);
Handle<HeapObject> maybe_vector = args.at<HeapObject>(3);
Handle<FeedbackVector> vector = Handle<FeedbackVector>();
......@@ -2312,7 +2312,7 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
DCHECK_EQ(5, args.length());
// Runtime functions don't follow the IC's calling convention.
Handle<Object> value = args.at(0);
Handle<Smi> slot = args.at<Smi>(1);
Handle<TaggedIndex> slot = args.at<TaggedIndex>(1);
Handle<HeapObject> maybe_vector = args.at<HeapObject>(2);
Handle<Object> receiver = args.at(3);
Handle<Name> key = args.at<Name>(4);
......@@ -2342,7 +2342,7 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Miss) {
DCHECK_EQ(4, args.length());
// Runtime functions don't follow the IC's calling convention.
Handle<Object> value = args.at(0);
Handle<Smi> slot = args.at<Smi>(1);
Handle<TaggedIndex> slot = args.at<TaggedIndex>(1);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
Handle<Name> key = args.at<Name>(3);
......@@ -2378,7 +2378,7 @@ RUNTIME_FUNCTION(Runtime_StoreGlobalIC_Slow) {
#ifdef DEBUG
{
Handle<Smi> slot = args.at<Smi>(1);
Handle<TaggedIndex> slot = args.at<TaggedIndex>(1);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind slot_kind = vector->GetKind(vector_slot);
......@@ -2426,7 +2426,7 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
DCHECK_EQ(5, args.length());
// Runtime functions don't follow the IC's calling convention.
Handle<Object> value = args.at(0);
Handle<Smi> slot = args.at<Smi>(1);
Handle<TaggedIndex> slot = args.at<TaggedIndex>(1);
Handle<HeapObject> maybe_vector = args.at<HeapObject>(2);
Handle<Object> receiver = args.at(3);
Handle<Object> key = args.at(4);
......@@ -2468,7 +2468,7 @@ RUNTIME_FUNCTION(Runtime_StoreInArrayLiteralIC_Miss) {
DCHECK_EQ(5, args.length());
// Runtime functions don't follow the IC's calling convention.
Handle<Object> value = args.at(0);
Handle<Smi> slot = args.at<Smi>(1);
Handle<TaggedIndex> slot = args.at<TaggedIndex>(1);
Handle<HeapObject> maybe_vector = args.at<HeapObject>(2);
Handle<Object> receiver = args.at(3);
Handle<Object> key = args.at(4);
......@@ -2516,7 +2516,7 @@ RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
Handle<Object> key = args.at(1);
Handle<Object> value = args.at(2);
Handle<Map> map = args.at<Map>(3);
Handle<Smi> slot = args.at<Smi>(4);
Handle<TaggedIndex> slot = args.at<TaggedIndex>(4);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(5);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind kind = vector->GetKind(vector_slot);
......@@ -2639,10 +2639,11 @@ RUNTIME_FUNCTION(Runtime_CloneObjectIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
Handle<Object> source = args.at<Object>(0);
int flags = args.smi_at(1);
CONVERT_SMI_ARG_CHECKED(flags, 1);
if (!MigrateDeprecated(isolate, source)) {
FeedbackSlot slot = FeedbackVector::ToSlot(args.smi_at(2));
CONVERT_TAGGED_INDEX_ARG_CHECKED(index, 2);
FeedbackSlot slot = FeedbackVector::ToSlot(index);
Handle<HeapObject> maybe_vector = args.at<HeapObject>(3);
if (maybe_vector->IsFeedbackVector()) {
FeedbackNexus nexus(Handle<FeedbackVector>::cast(maybe_vector), slot);
......@@ -2726,7 +2727,7 @@ RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptor) {
if (it.IsFound()) return *result;
Handle<Smi> slot = args.at<Smi>(3);
Handle<TaggedIndex> slot = args.at<TaggedIndex>(3);
Handle<FeedbackVector> vector = args.at<FeedbackVector>(4);
FeedbackSlot vector_slot = FeedbackVector::ToSlot(slot->value());
FeedbackSlotKind slot_kind = vector->GetKind(vector_slot);
......@@ -2819,7 +2820,7 @@ RUNTIME_FUNCTION(Runtime_KeyedHasIC_Miss) {
// Runtime functions don't follow the IC's calling convention.
Handle<Object> receiver = args.at(0);
Handle<Object> key = args.at(1);
Handle<Smi> slot = args.at<Smi>(2);
Handle<TaggedIndex> slot = args.at<TaggedIndex>(2);
Handle<HeapObject> maybe_vector = args.at<HeapObject>(3);
Handle<FeedbackVector> vector = Handle<FeedbackVector>();
......
......@@ -1053,7 +1053,7 @@ void KeyedStoreGenericAssembler::StoreIC_NoFeedback() {
TNode<Object> receiver_maybe_smi = CAST(Parameter(Descriptor::kReceiver));
TNode<Object> name = CAST(Parameter(Descriptor::kName));
TNode<Object> value = CAST(Parameter(Descriptor::kValue));
TNode<Smi> slot = CAST(Parameter(Descriptor::kSlot));
TNode<TaggedIndex> slot = CAST(Parameter(Descriptor::kSlot));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Label miss(this, Label::kDeferred), store_property(this);
......
......@@ -624,6 +624,13 @@ TNode<Smi> InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
return SmiTag(Signed(BytecodeOperandIdx(operand_index)));
}
TNode<TaggedIndex> InterpreterAssembler::BytecodeOperandIdxTaggedIndex(
int operand_index) {
TNode<IntPtrT> index =
ChangeInt32ToIntPtr(Signed(BytecodeOperandIdxInt32(operand_index)));
return IntPtrToTaggedIndex(index);
}
TNode<UintPtrT> InterpreterAssembler::BytecodeOperandConstantPoolIdx(
int operand_index, LoadSensitivity needs_poisoning) {
DCHECK_EQ(OperandType::kIdx,
......
......@@ -38,6 +38,9 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Returns the smi index immediate for bytecode operand |operand_index|
// in the current bytecode.
TNode<Smi> BytecodeOperandIdxSmi(int operand_index);
// Returns the TaggedIndex immediate for bytecode operand |operand_index|
// in the current bytecode.
TNode<TaggedIndex> BytecodeOperandIdxTaggedIndex(int operand_index);
// Returns the 32-bit unsigned immediate for bytecode operand |operand_index|
// in the current bytecode.
TNode<Uint32T> BytecodeOperandUImm(int operand_index);
......
This diff is collapsed.
......@@ -149,9 +149,10 @@ bool FeedbackVector::has_optimization_marker() const {
// Conversion from an integer index to either a slot or an ic slot.
// static
FeedbackSlot FeedbackVector::ToSlot(int index) {
DCHECK_GE(index, 0);
return FeedbackSlot(index);
FeedbackSlot FeedbackVector::ToSlot(intptr_t index) {
DCHECK_LE(static_cast<uintptr_t>(index),
static_cast<uintptr_t>(std::numeric_limits<int>::max()));
return FeedbackSlot(static_cast<int>(index));
}
MaybeObject FeedbackVector::Get(FeedbackSlot slot) const {
......
......@@ -232,7 +232,7 @@ class FeedbackVector : public HeapObject {
static int GetIndex(FeedbackSlot slot) { return slot.ToInt(); }
// Conversion from an integer index to the underlying array to a slot.
static inline FeedbackSlot ToSlot(int index);
static inline FeedbackSlot ToSlot(intptr_t index);
inline MaybeObject Get(FeedbackSlot slot) const;
inline MaybeObject Get(const Isolate* isolate, FeedbackSlot slot) const;
inline MaybeObject get(int index) const;
......
......@@ -59,13 +59,7 @@ class TaggedIndex : public Object {
// Returns whether value can be represented in a TaggedIndex.
static inline bool constexpr IsValid(intptr_t value) {
// Is value in range [kMinValue, kMaxValue].
// Use unsigned operations in order to avoid undefined behaviour in case of
// signed integer overflow.
return (static_cast<uintptr_t>(value) -
static_cast<uintptr_t>(kMinValue)) <=
(static_cast<uintptr_t>(kMaxValue) -
static_cast<uintptr_t>(kMinValue));
return kMinValue <= value && value <= kMaxValue;
}
DECL_CAST(TaggedIndex)
......
......@@ -589,7 +589,7 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(HeapObject, maybe_vector, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_TAGGED_INDEX_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(ObjectBoilerplateDescription, description, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
Handle<FeedbackVector> vector;
......@@ -627,7 +627,7 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(HeapObject, maybe_vector, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_TAGGED_INDEX_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(ArrayBoilerplateDescription, elements, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
Handle<FeedbackVector> vector;
......@@ -645,7 +645,7 @@ RUNTIME_FUNCTION(Runtime_CreateRegExpLiteral) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(HeapObject, maybe_vector, 0);
CONVERT_SMI_ARG_CHECKED(index, 1);
CONVERT_TAGGED_INDEX_ARG_CHECKED(index, 1);
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
......
......@@ -885,7 +885,7 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
CONVERT_SMI_ARG_CHECKED(flag, 3);
CONVERT_ARG_HANDLE_CHECKED(HeapObject, maybe_vector, 4);
CONVERT_SMI_ARG_CHECKED(index, 5);
CONVERT_TAGGED_INDEX_ARG_CHECKED(index, 5);
if (!maybe_vector->IsUndefined()) {
DCHECK(maybe_vector->IsFeedbackVector());
......
......@@ -36,9 +36,19 @@ namespace internal {
// Cast the given argument to a Smi and store its value in an int variable
// with the given name. If the argument is not a Smi we crash safely.
#define CONVERT_SMI_ARG_CHECKED(name, index) \
CHECK(args[index].IsSmi()); \
int name = args.smi_at(index);
#define CONVERT_SMI_ARG_CHECKED(name, index) \
CHECK(args[index].IsSmi()); \
int name = args.smi_at(index); \
/* Ensure we have a Smi and not a TaggedIndex */ \
DCHECK_IMPLIES(args[index].IsTaggedIndex(), \
name == TaggedIndex(args[index].ptr()).value());
// Cast the given argument to a TaggedIndex and store its value in an int
// variable with the given name. If the argument is not a TaggedIndex we crash
// safely.
#define CONVERT_TAGGED_INDEX_ARG_CHECKED(name, index) \
CHECK(args[index].IsTaggedIndex()); \
int name = args.tagged_index_at(index);
// Cast the given argument to a double and store it in a variable with
// the given name. If the argument is not a number (as opposed to
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment