Commit 67c951fa authored by Camillo's avatar Camillo Committed by V8 LUCI CQ

Reland "[maglev] Add internalized string compare fast-path"

This is a reland of commit c4301c04:
- Fix thin string in string-compare.js with low gc interval

Original change:
- Rename TryBuildCompareOperationBranch to TryBuildCompareOperation
- Add CheckedInternalizedString conversion Node that checks for string
  inputs and extracts internalised Strings from ThinStrings
- Add BranchIfReferenceCompare Node
- Add runtime functions to create internalised and thin Strings
- Add deopt check to test/mjsunit/maglev/int32-branch.js

Bug: v8:7700
Change-Id: I9221253f6bbeef12297419495c6eaf5096e06278
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3755152Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Camillo Bruni <cbruni@chromium.org>
Cr-Commit-Position: refs/heads/main@{#81680}
parent 1e7e8530
...@@ -417,9 +417,10 @@ void MaglevGraphBuilder::VisitBinarySmiOperation() { ...@@ -417,9 +417,10 @@ void MaglevGraphBuilder::VisitBinarySmiOperation() {
BuildGenericBinarySmiOperationNode<kOperation>(); BuildGenericBinarySmiOperationNode<kOperation>();
} }
bool MaglevGraphBuilder::TryBuildCompareOperationBranch(Operation operation, template <typename CompareControlNode>
ValueNode* left, bool MaglevGraphBuilder::TryBuildCompareOperation(Operation operation,
ValueNode* right) { ValueNode* left,
ValueNode* right) {
// Don't emit the shortcut branch if the next bytecode is a merge target. // Don't emit the shortcut branch if the next bytecode is a merge target.
if (IsOffsetAMergePoint(next_offset())) return false; if (IsOffsetAMergePoint(next_offset())) return false;
...@@ -457,7 +458,7 @@ bool MaglevGraphBuilder::TryBuildCompareOperationBranch(Operation operation, ...@@ -457,7 +458,7 @@ bool MaglevGraphBuilder::TryBuildCompareOperationBranch(Operation operation,
return false; return false;
} }
BasicBlock* block = FinishBlock<BranchIfInt32Compare>( BasicBlock* block = FinishBlock<CompareControlNode>(
next_offset(), {left, right}, operation, &jump_targets_[true_offset], next_offset(), {left, right}, operation, &jump_targets_[true_offset],
&jump_targets_[false_offset]); &jump_targets_[false_offset]);
MergeIntoFrameState(block, iterator_.GetJumpTargetOffset()); MergeIntoFrameState(block, iterator_.GetJumpTargetOffset());
...@@ -477,7 +478,8 @@ void MaglevGraphBuilder::VisitCompareOperation() { ...@@ -477,7 +478,8 @@ void MaglevGraphBuilder::VisitCompareOperation() {
left = LoadRegisterInt32(0); left = LoadRegisterInt32(0);
right = GetAccumulatorInt32(); right = GetAccumulatorInt32();
} }
if (TryBuildCompareOperationBranch(kOperation, left, right)) { if (TryBuildCompareOperation<BranchIfInt32Compare>(kOperation, left,
right)) {
return; return;
} }
SetAccumulator( SetAccumulator(
...@@ -497,6 +499,21 @@ void MaglevGraphBuilder::VisitCompareOperation() { ...@@ -497,6 +499,21 @@ void MaglevGraphBuilder::VisitCompareOperation() {
// return; // return;
} }
break; break;
case CompareOperationHint::kInternalizedString:
DCHECK(kOperation == Operation::kEqual ||
kOperation == Operation::kStrictEqual);
ValueNode *left, *right;
if (IsRegisterEqualToAccumulator(0)) {
left = right = LoadRegister<CheckedInternalizedString>(0);
} else {
left = LoadRegister<CheckedInternalizedString>(0);
right = GetAccumulator<CheckedInternalizedString>();
}
if (TryBuildCompareOperation<BranchIfReferenceCompare>(kOperation, left,
right)) {
return;
}
break;
default: default:
// Fallback to generic node. // Fallback to generic node.
break; break;
......
...@@ -401,6 +401,12 @@ class MaglevGraphBuilder { ...@@ -401,6 +401,12 @@ class MaglevGraphBuilder {
return GetTaggedValueHelper(reg, value); return GetTaggedValueHelper(reg, value);
} }
template <typename ConversionNodeT>
ValueNode* GetValue(interpreter::Register reg) {
ValueNode* value = current_interpreter_frame_.get(reg);
return AddNewConversionNode<ConversionNodeT>(reg, value);
}
ValueNode* GetInt32(interpreter::Register reg) { ValueNode* GetInt32(interpreter::Register reg) {
ValueNode* value = current_interpreter_frame_.get(reg); ValueNode* value = current_interpreter_frame_.get(reg);
switch (value->properties().value_representation()) { switch (value->properties().value_representation()) {
...@@ -447,6 +453,12 @@ class MaglevGraphBuilder { ...@@ -447,6 +453,12 @@ class MaglevGraphBuilder {
UNREACHABLE(); UNREACHABLE();
} }
template <typename ConversionNodeT>
ValueNode* GetAccumulator() {
return GetValue<ConversionNodeT>(
interpreter::Register::virtual_accumulator());
}
ValueNode* GetAccumulatorTagged() { ValueNode* GetAccumulatorTagged() {
return GetTaggedValue(interpreter::Register::virtual_accumulator()); return GetTaggedValue(interpreter::Register::virtual_accumulator());
} }
...@@ -465,6 +477,12 @@ class MaglevGraphBuilder { ...@@ -465,6 +477,12 @@ class MaglevGraphBuilder {
current_interpreter_frame_.accumulator(); current_interpreter_frame_.accumulator();
} }
template <typename ConversionNodeT>
ValueNode* LoadRegister(int operand_index) {
return GetValue<ConversionNodeT>(
iterator_.GetRegisterOperand(operand_index));
}
ValueNode* LoadRegisterTagged(int operand_index) { ValueNode* LoadRegisterTagged(int operand_index) {
return GetTaggedValue(iterator_.GetRegisterOperand(operand_index)); return GetTaggedValue(iterator_.GetRegisterOperand(operand_index));
} }
...@@ -669,8 +687,9 @@ class MaglevGraphBuilder { ...@@ -669,8 +687,9 @@ class MaglevGraphBuilder {
template <Operation kOperation> template <Operation kOperation>
void VisitBinarySmiOperation(); void VisitBinarySmiOperation();
bool TryBuildCompareOperationBranch(Operation operation, ValueNode* left, template <typename CompareControlNode>
ValueNode* right); bool TryBuildCompareOperation(Operation operation, ValueNode* left,
ValueNode* right);
template <Operation kOperation> template <Operation kOperation>
void VisitCompareOperation(); void VisitCompareOperation();
......
...@@ -93,6 +93,7 @@ class MaglevGraphVerifier { ...@@ -93,6 +93,7 @@ class MaglevGraphVerifier {
case Opcode::kCheckMaps: case Opcode::kCheckMaps:
case Opcode::kCheckMapsWithMigration: case Opcode::kCheckMapsWithMigration:
case Opcode::kCheckSmi: case Opcode::kCheckSmi:
case Opcode::kCheckedInternalizedString:
// TODO(victorgomes): Can we check that the input is Boolean? // TODO(victorgomes): Can we check that the input is Boolean?
case Opcode::kBranchIfToBooleanTrue: case Opcode::kBranchIfToBooleanTrue:
case Opcode::kBranchIfTrue: case Opcode::kBranchIfTrue:
...@@ -173,6 +174,11 @@ class MaglevGraphVerifier { ...@@ -173,6 +174,11 @@ class MaglevGraphVerifier {
CheckValueInputIs(node, 0, ValueRepresentation::kInt32); CheckValueInputIs(node, 0, ValueRepresentation::kInt32);
CheckValueInputIs(node, 1, ValueRepresentation::kInt32); CheckValueInputIs(node, 1, ValueRepresentation::kInt32);
break; break;
case Opcode::kBranchIfReferenceCompare:
DCHECK_EQ(node->input_count(), 2);
CheckValueInputIs(node, 0, ValueRepresentation::kTagged);
CheckValueInputIs(node, 1, ValueRepresentation::kTagged);
break;
case Opcode::kFloat64Add: case Opcode::kFloat64Add:
case Opcode::kFloat64Subtract: case Opcode::kFloat64Subtract:
case Opcode::kFloat64Multiply: case Opcode::kFloat64Multiply:
......
...@@ -773,6 +773,51 @@ void CheckMapsWithMigration::PrintParams( ...@@ -773,6 +773,51 @@ void CheckMapsWithMigration::PrintParams(
os << "(" << *map().object() << ")"; os << "(" << *map().object() << ")";
} }
void CheckedInternalizedString::AllocateVreg(
MaglevVregAllocationState* vreg_state) {
UseRegister(object_input());
set_temporaries_needed(1);
DefineSameAsFirst(vreg_state, this);
}
void CheckedInternalizedString::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Register object = ToRegister(object_input());
RegList temps = temporaries();
Register map_tmp = temps.PopFirst();
Condition is_smi = __ CheckSmi(object);
EmitEagerDeoptIf(is_smi, code_gen_state, this);
__ LoadMap(map_tmp, object);
__ RecordComment("Test IsInternalizedString");
__ testw(FieldOperand(map_tmp, Map::kInstanceTypeOffset),
Immediate(kIsNotStringMask | kIsNotInternalizedMask));
static_assert((kStringTag | kInternalizedTag) == 0);
JumpToDeferredIf(
not_zero, code_gen_state,
[](MaglevCodeGenState* code_gen_state, Label* return_label,
Register object, CheckedInternalizedString* node,
EagerDeoptInfo* deopt_info, Register map_tmp) {
__ RecordComment("Deferred Test IsThinString");
__ movw(map_tmp, FieldOperand(map_tmp, Map::kInstanceTypeOffset));
static_assert(kThinStringTagBit > 0);
__ testb(map_tmp, Immediate(kThinStringTagBit));
__ j(zero, &deopt_info->deopt_entry_label);
__ LoadTaggedPointerField(
object, FieldOperand(object, ThinString::kActualOffset));
if (FLAG_debug_code) {
__ RecordComment("DCHECK IsInternalizedString");
__ LoadMap(map_tmp, object);
__ testw(FieldOperand(map_tmp, Map::kInstanceTypeOffset),
Immediate(kIsNotStringMask | kIsNotInternalizedMask));
static_assert((kStringTag | kInternalizedTag) == 0);
__ Check(zero, AbortReason::kUnexpectedValue);
}
__ jmp(return_label);
},
object, this, eager_deopt_info(), map_tmp);
}
void LoadTaggedField::AllocateVreg(MaglevVregAllocationState* vreg_state) { void LoadTaggedField::AllocateVreg(MaglevVregAllocationState* vreg_state) {
UseRegister(object_input()); UseRegister(object_input());
DefineAsRegister(vreg_state, this); DefineAsRegister(vreg_state, this);
...@@ -1867,6 +1912,37 @@ void BranchIfInt32Compare::PrintParams( ...@@ -1867,6 +1912,37 @@ void BranchIfInt32Compare::PrintParams(
os << "(" << operation_ << ")"; os << "(" << operation_ << ")";
} }
void BranchIfReferenceCompare::AllocateVreg(
MaglevVregAllocationState* vreg_state) {
UseRegister(left_input());
UseRegister(right_input());
}
void BranchIfReferenceCompare::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
Register left = ToRegister(left_input());
Register right = ToRegister(right_input());
auto* next_block = state.next_block();
__ cmp_tagged(left, right);
// We don't have any branch probability information, so try to jump
// over whatever the next block emitted is.
if (if_false() == next_block) {
// Jump over the false block if true, otherwise fall through into it.
__ j(ConditionFor(operation_), if_true()->label());
} else {
// Jump to the false block if true.
__ j(NegateCondition(ConditionFor(operation_)), if_false()->label());
// Jump to the true block if it's not the next block.
if (if_true() != next_block) {
__ jmp(if_true()->label());
}
}
}
void BranchIfReferenceCompare::PrintParams(
std::ostream& os, MaglevGraphLabeller* graph_labeller) const {
os << "(" << operation_ << ")";
}
void BranchIfToBooleanTrue::AllocateVreg( void BranchIfToBooleanTrue::AllocateVreg(
MaglevVregAllocationState* vreg_state) { MaglevVregAllocationState* vreg_state) {
UseFixed(condition_input(), UseFixed(condition_input(),
......
...@@ -133,6 +133,7 @@ class CompactInterpreterFrameState; ...@@ -133,6 +133,7 @@ class CompactInterpreterFrameState;
V(RegisterInput) \ V(RegisterInput) \
V(CheckedSmiTag) \ V(CheckedSmiTag) \
V(CheckedSmiUntag) \ V(CheckedSmiUntag) \
V(CheckedInternalizedString) \
V(ChangeInt32ToFloat64) \ V(ChangeInt32ToFloat64) \
V(Float64Box) \ V(Float64Box) \
V(CheckedFloat64Unbox) \ V(CheckedFloat64Unbox) \
...@@ -160,6 +161,7 @@ class CompactInterpreterFrameState; ...@@ -160,6 +161,7 @@ class CompactInterpreterFrameState;
#define CONDITIONAL_CONTROL_NODE_LIST(V) \ #define CONDITIONAL_CONTROL_NODE_LIST(V) \
V(BranchIfTrue) \ V(BranchIfTrue) \
V(BranchIfToBooleanTrue) \ V(BranchIfToBooleanTrue) \
V(BranchIfReferenceCompare) \
V(BranchIfInt32Compare) \ V(BranchIfInt32Compare) \
V(BranchIfFloat64Compare) V(BranchIfFloat64Compare)
...@@ -1803,9 +1805,6 @@ class CheckMapsWithMigration ...@@ -1803,9 +1805,6 @@ class CheckMapsWithMigration
DCHECK(map.is_migration_target()); DCHECK(map.is_migration_target());
} }
// TODO(verwaest): This just calls in deferred code, so probably we'll need to
// mark that to generate stack maps. Mark as call so we at least clear the
// registers since we currently don't properly spill either.
static constexpr OpProperties kProperties = static constexpr OpProperties kProperties =
OpProperties::EagerDeopt() | OpProperties::DeferredCall(); OpProperties::EagerDeopt() | OpProperties::DeferredCall();
...@@ -1822,6 +1821,27 @@ class CheckMapsWithMigration ...@@ -1822,6 +1821,27 @@ class CheckMapsWithMigration
const compiler::MapRef map_; const compiler::MapRef map_;
}; };
class CheckedInternalizedString
: public FixedInputValueNodeT<1, CheckedInternalizedString> {
using Base = FixedInputValueNodeT<1, CheckedInternalizedString>;
public:
explicit CheckedInternalizedString(uint64_t bitfield) : Base(bitfield) {
CHECK_EQ(properties().value_representation(), ValueRepresentation::kTagged);
}
static constexpr OpProperties kProperties = OpProperties::EagerDeopt() |
OpProperties::TaggedValue() |
OpProperties::ConversionNode();
static constexpr int kObjectIndex = 0;
Input& object_input() { return Node::input(kObjectIndex); }
void AllocateVreg(MaglevVregAllocationState*);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const {};
};
class LoadTaggedField : public FixedInputValueNodeT<1, LoadTaggedField> { class LoadTaggedField : public FixedInputValueNodeT<1, LoadTaggedField> {
using Base = FixedInputValueNodeT<1, LoadTaggedField>; using Base = FixedInputValueNodeT<1, LoadTaggedField>;
...@@ -2638,6 +2658,29 @@ class BranchIfFloat64Compare ...@@ -2638,6 +2658,29 @@ class BranchIfFloat64Compare
Operation operation_; Operation operation_;
}; };
class BranchIfReferenceCompare
: public ConditionalControlNodeT<2, BranchIfReferenceCompare> {
using Base = ConditionalControlNodeT<2, BranchIfReferenceCompare>;
public:
static constexpr int kLeftIndex = 0;
static constexpr int kRightIndex = 1;
Input& left_input() { return NodeBase::input(kLeftIndex); }
Input& right_input() { return NodeBase::input(kRightIndex); }
explicit BranchIfReferenceCompare(uint64_t bitfield, Operation operation,
BasicBlockRef* if_true_refs,
BasicBlockRef* if_false_refs)
: Base(bitfield, if_true_refs, if_false_refs), operation_(operation) {}
void AllocateVreg(MaglevVregAllocationState*);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
private:
Operation operation_;
};
} // namespace maglev } // namespace maglev
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -39,6 +39,13 @@ static_assert((kConsStringTag & kIsIndirectStringMask) == kIsIndirectStringTag); ...@@ -39,6 +39,13 @@ static_assert((kConsStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
static_assert((kSlicedStringTag & kIsIndirectStringMask) == static_assert((kSlicedStringTag & kIsIndirectStringMask) ==
kIsIndirectStringTag); kIsIndirectStringTag);
static_assert((kThinStringTag & kIsIndirectStringMask) == kIsIndirectStringTag); static_assert((kThinStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
const uint32_t kThinStringTagBit = 1 << 2;
// Assert that the kThinStringTagBit is only used in kThinStringTag.
static_assert((kSeqStringTag & kThinStringTagBit) == 0);
static_assert((kConsStringTag & kThinStringTagBit) == 0);
static_assert((kExternalStringTag & kThinStringTagBit) == 0);
static_assert((kSlicedStringTag & kThinStringTagBit) == 0);
static_assert((kThinStringTag & kThinStringTagBit) == kThinStringTagBit);
// For strings, bit 3 indicates whether the string consists of two-byte // For strings, bit 3 indicates whether the string consists of two-byte
// characters or one-byte characters. // characters or one-byte characters.
......
...@@ -136,6 +136,34 @@ RUNTIME_FUNCTION(Runtime_ConstructSlicedString) { ...@@ -136,6 +136,34 @@ RUNTIME_FUNCTION(Runtime_ConstructSlicedString) {
return *sliced_string; return *sliced_string;
} }
RUNTIME_FUNCTION(Runtime_ConstructInternalizedString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
Handle<String> string = args.at<String>(0);
CHECK(string->IsOneByteRepresentation());
Handle<String> internalized = isolate->factory()->InternalizeString(string);
CHECK(string->IsInternalizedString());
return *internalized;
}
RUNTIME_FUNCTION(Runtime_ConstructThinString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
Handle<String> string = args.at<String>(0);
CHECK(string->IsOneByteRepresentation());
if (!string->IsConsString()) {
const bool kIsOneByte = true;
string =
isolate->factory()->NewConsString(isolate->factory()->empty_string(),
string, string->length(), kIsOneByte);
}
CHECK(string->IsConsString());
Handle<String> internalized = isolate->factory()->InternalizeString(string);
CHECK_NE(*internalized, *string);
CHECK(string->IsThinString());
return *string;
}
RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) { RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
HandleScope scope(isolate); HandleScope scope(isolate);
DCHECK_EQ(1, args.length()); DCHECK_EQ(1, args.length());
...@@ -1241,8 +1269,8 @@ RUNTIME_FUNCTION(Runtime_TraceExit) { ...@@ -1241,8 +1269,8 @@ RUNTIME_FUNCTION(Runtime_TraceExit) {
RUNTIME_FUNCTION(Runtime_HaveSameMap) { RUNTIME_FUNCTION(Runtime_HaveSameMap) {
SealHandleScope shs(isolate); SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length()); DCHECK_EQ(2, args.length());
auto obj1 = JSObject::cast(args[0]); auto obj1 = HeapObject::cast(args[0]);
auto obj2 = JSObject::cast(args[1]); auto obj2 = HeapObject::cast(args[1]);
return isolate->heap()->ToBoolean(obj1.map() == obj2.map()); return isolate->heap()->ToBoolean(obj1.map() == obj2.map());
} }
...@@ -1628,6 +1656,13 @@ RUNTIME_FUNCTION(Runtime_IsSharedString) { ...@@ -1628,6 +1656,13 @@ RUNTIME_FUNCTION(Runtime_IsSharedString) {
Handle<String>::cast(obj)->IsShared()); Handle<String>::cast(obj)->IsShared());
} }
RUNTIME_FUNCTION(Runtime_IsInternalizedString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
Handle<HeapObject> obj = args.at<HeapObject>(0);
return isolate->heap()->ToBoolean(obj->IsInternalizedString());
}
RUNTIME_FUNCTION(Runtime_SharedGC) { RUNTIME_FUNCTION(Runtime_SharedGC) {
SealHandleScope scope(isolate); SealHandleScope scope(isolate);
isolate->heap()->CollectSharedGarbage(GarbageCollectionReason::kTesting); isolate->heap()->CollectSharedGarbage(GarbageCollectionReason::kTesting);
......
...@@ -485,7 +485,9 @@ namespace internal { ...@@ -485,7 +485,9 @@ namespace internal {
F(CompleteInobjectSlackTracking, 1, 1) \ F(CompleteInobjectSlackTracking, 1, 1) \
F(ConstructConsString, 2, 1) \ F(ConstructConsString, 2, 1) \
F(ConstructDouble, 2, 1) \ F(ConstructDouble, 2, 1) \
F(ConstructInternalizedString, 1, 1) \
F(ConstructSlicedString, 2, 1) \ F(ConstructSlicedString, 2, 1) \
F(ConstructThinString, 1, 1) \
F(DebugPrint, 1, 1) \ F(DebugPrint, 1, 1) \
F(DebugPrintPtr, 1, 1) \ F(DebugPrintPtr, 1, 1) \
F(DebugTrace, 0, 1) \ F(DebugTrace, 0, 1) \
...@@ -536,6 +538,7 @@ namespace internal { ...@@ -536,6 +538,7 @@ namespace internal {
F(IsConcatSpreadableProtector, 0, 1) \ F(IsConcatSpreadableProtector, 0, 1) \
F(IsConcurrentRecompilationSupported, 0, 1) \ F(IsConcurrentRecompilationSupported, 0, 1) \
F(IsDictPropertyConstTrackingEnabled, 0, 1) \ F(IsDictPropertyConstTrackingEnabled, 0, 1) \
F(IsInternalizedString, 1, 1) \
F(IsSameHeapObject, 2, 1) \ F(IsSameHeapObject, 2, 1) \
F(IsSharedString, 1, 1) \ F(IsSharedString, 1, 1) \
F(MapIteratorProtector, 0, 1) \ F(MapIteratorProtector, 0, 1) \
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
// Flags: --allow-natives-syntax --maglev --no-stress-opt // Flags: --allow-natives-syntax --maglev --no-stress-opt --no-always-turbofan
function foo(x,y) { function foo(x,y) {
if (x < y) { if (x < y) {
...@@ -17,6 +17,10 @@ assertEquals(24, foo(6,2)); ...@@ -17,6 +17,10 @@ assertEquals(24, foo(6,2));
%OptimizeMaglevOnNextCall(foo); %OptimizeMaglevOnNextCall(foo);
assertEquals(42, foo(1,2)); assertEquals(42, foo(1,2));
assertEquals(24, foo(6,2)); assertEquals(24, foo(6,2));
assertTrue(isMaglevved(foo));
assertEquals(42, foo(1.1, 2.2));
assertEquals(24, foo(6.6, 2.2));
assertUnoptimized(foo);
function bar(x,y) { function bar(x,y) {
...@@ -32,3 +36,7 @@ assertEquals(24, bar(6,2)); ...@@ -32,3 +36,7 @@ assertEquals(24, bar(6,2));
%OptimizeMaglevOnNextCall(bar); %OptimizeMaglevOnNextCall(bar);
assertEquals(42, bar(1,2)); assertEquals(42, bar(1,2));
assertEquals(24, bar(6,2)); assertEquals(24, bar(6,2));
assertTrue(isMaglevved(bar));
assertEquals(42, bar(1.1, 2.2));
assertEquals(24, bar(6.6, 2.2));
assertUnoptimized(bar);
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --maglev --no-stress-opt --no-always-turbofan
// Flags: --no-always-use-string-forwarding-table
let internalized1234 = %ConstructInternalizedString("1234123412341234");
let nonInternalized1234 = "1234" + "1234" + "1234" + "1234";
let uniqueId = 0;
function warmUpMaglevTestFn(test_fn_src) {
// Create a fresh and uncached function
test_fn_src = test_fn_src.toString();
const pattern = '(x, y) {\n';
assertTrue(test_fn_src.includes(pattern))
parts = test_fn_src.split(pattern)
assertEquals(parts.length, 2)
let test_fn = new Function('x', 'y', `{/*${uniqueId++}*/\n${parts[1]}`);
assertUnoptimized(test_fn);
%PrepareFunctionForOptimization(test_fn);
// Warm up with internalized strings
assertEquals(0, test_fn("1","2"));
assertEquals(1, test_fn("2", "2"));
assertEquals(1, test_fn(internalized1234, internalized1234));
%OptimizeMaglevOnNextCall(test_fn);
assertEquals(0, test_fn("1", "2"));
assertTrue(isMaglevved(test_fn));
return test_fn;
}
function test(test_fn_src, is_strict=false) {
assertEquals(internalized1234, nonInternalized1234);
assertTrue(1234123412341234 == nonInternalized1234);
assertTrue(%IsInternalizedString(internalized1234));
assertFalse(%IsInternalizedString(nonInternalized1234));
assertTrue(%IsInternalizedString(internalized1234));
let test_fn = warmUpMaglevTestFn(test_fn_src);
assertEquals(1, test_fn("1", "1"));
assertTrue(isMaglevved(test_fn));
assertEquals(0, test_fn(internalized1234, "1"));
assertTrue(isMaglevved(test_fn));
// The GC might have already migrated the thin string, create a new one
let thin1234 = %ConstructThinString( "1234" + "1234" + "1234" + "1234");
assertFalse(%IsInternalizedString(thin1234));
assertEquals(1, test_fn(thin1234, "1234123412341234"));
assertTrue(isMaglevved(test_fn));
assertEquals(1, test_fn(thin1234, thin1234));
assertTrue(isMaglevved(test_fn));
assertEquals(1, test_fn(internalized1234, "1234123412341234"));
assertTrue(isMaglevved(test_fn));
if (is_strict) {
assertEquals(0, test_fn(internalized1234, 1234123412341234));
assertUnoptimized(test_fn);
} else {
assertEquals(1, test_fn(internalized1234, 1234123412341234));
assertUnoptimized(test_fn);
}
assertEquals(1, test_fn(internalized1234, "1234123412341234"));
test_fn = warmUpMaglevTestFn(test_fn_src);
assertEquals(1, test_fn(nonInternalized1234, "1234123412341234"));
assertUnoptimized(test_fn);
assertEquals(1, test_fn(nonInternalized1234, "1234123412341234"));
test_fn = warmUpMaglevTestFn(test_fn_src);
assertEquals(1, test_fn(nonInternalized1234, nonInternalized1234));
assertUnoptimized(test_fn);
assertEquals(1, test_fn(nonInternalized1234, nonInternalized1234));
test_fn = warmUpMaglevTestFn(test_fn_src);
assertEquals(0, test_fn(nonInternalized1234, "1"));
assertUnoptimized(test_fn);
assertEquals(0, test_fn(nonInternalized1234, "1"));
}
function equals_fn(x, y) {
if (x == y) {
return 1;
}
return 0;
}
test(equals_fn);
function not_equals_fn(x, y) {
if (x != y) {
return 0;
}
return 1;
}
test(not_equals_fn);
function strict_equals_fn(x, y) {
if (x === y) {
return 1;
}
return 0;
}
test(strict_equals_fn, true);
function strict_not_equals_fn(x, y) {
if (x !== y) {
return 0;
}
return 1;
}
test(strict_not_equals_fn, true);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment