Commit 28e3467a authored by georgia.kouveli's avatar georgia.kouveli Committed by Commit bot

[stubs,interpreter] Optimise SMI loading for 64-bit targets.

Adding new methods to the code stub assembler and interpreter
assembler to combine loading and untagging SMIs, so that on 64-bit
architectures we can avoid loading the full 64 bits and load the
32 interesting bits directly instead.

Review-Url: https://codereview.chromium.org/2183923003
Cr-Commit-Position: refs/heads/master@{#38361}
parent f00b42ae
......@@ -51,11 +51,8 @@ void Builtins::Generate_StringFromCharCode(CodeStubAssembler* assembler) {
assembler->Bind(&if_notoneargument);
{
// Determine the resulting string length.
Node* parent_frame_length =
assembler->Load(MachineType::Pointer(), parent_frame_pointer,
assembler->IntPtrConstant(
ArgumentsAdaptorFrameConstants::kLengthOffset));
Node* length = assembler->SmiToWord(parent_frame_length);
Node* length = assembler->LoadAndUntagSmi(
parent_frame_pointer, ArgumentsAdaptorFrameConstants::kLengthOffset);
// Assume that the resulting string contains only one-byte characters.
Node* result = assembler->AllocateSeqOneByteString(context, length);
......
......@@ -74,7 +74,7 @@ Node* CodeStubAssembler::TheHoleConstant() {
}
Node* CodeStubAssembler::HashSeed() {
return SmiToWord32(LoadRoot(Heap::kHashSeedRootIndex));
return LoadAndUntagToWord32Root(Heap::kHashSeedRootIndex);
}
Node* CodeStubAssembler::StaleRegisterConstant() {
......@@ -914,6 +914,60 @@ Node* CodeStubAssembler::LoadObjectField(Node* object, Node* offset,
return Load(rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
}
Node* CodeStubAssembler::LoadAndUntagObjectField(Node* object, int offset) {
if (Is64()) {
#if V8_TARGET_LITTLE_ENDIAN
offset += kPointerSize / 2;
#endif
return ChangeInt32ToInt64(
LoadObjectField(object, offset, MachineType::Int32()));
} else {
return SmiToWord(LoadObjectField(object, offset, MachineType::AnyTagged()));
}
}
Node* CodeStubAssembler::LoadAndUntagToWord32ObjectField(Node* object,
int offset) {
if (Is64()) {
#if V8_TARGET_LITTLE_ENDIAN
offset += kPointerSize / 2;
#endif
return LoadObjectField(object, offset, MachineType::Int32());
} else {
return SmiToWord32(
LoadObjectField(object, offset, MachineType::AnyTagged()));
}
}
Node* CodeStubAssembler::LoadAndUntagSmi(Node* base, int index) {
if (Is64()) {
#if V8_TARGET_LITTLE_ENDIAN
index += kPointerSize / 2;
#endif
return ChangeInt32ToInt64(
Load(MachineType::Int32(), base, IntPtrConstant(index)));
} else {
return SmiToWord(
Load(MachineType::AnyTagged(), base, IntPtrConstant(index)));
}
}
Node* CodeStubAssembler::LoadAndUntagToWord32Root(
Heap::RootListIndex root_index) {
Node* roots_array_start =
ExternalConstant(ExternalReference::roots_array_start(isolate()));
int index = root_index * kPointerSize;
if (Is64()) {
#if V8_TARGET_LITTLE_ENDIAN
index += kPointerSize / 2;
#endif
return Load(MachineType::Int32(), roots_array_start, IntPtrConstant(index));
} else {
return SmiToWord32(Load(MachineType::AnyTagged(), roots_array_start,
IntPtrConstant(index)));
}
}
Node* CodeStubAssembler::LoadHeapNumberValue(Node* object) {
return LoadObjectField(object, HeapNumber::kValueOffset,
MachineType::Float64());
......@@ -940,8 +994,8 @@ Node* CodeStubAssembler::LoadElements(Node* object) {
return LoadObjectField(object, JSObject::kElementsOffset);
}
Node* CodeStubAssembler::LoadFixedArrayBaseLength(Node* array) {
return LoadObjectField(array, FixedArrayBase::kLengthOffset);
Node* CodeStubAssembler::LoadAndUntagFixedArrayBaseLength(Node* array) {
return LoadAndUntagObjectField(array, FixedArrayBase::kLengthOffset);
}
Node* CodeStubAssembler::LoadMapBitField(Node* map) {
......@@ -1056,6 +1110,25 @@ Node* CodeStubAssembler::LoadFixedArrayElement(Node* object, Node* index_node,
return Load(MachineType::AnyTagged(), object, offset);
}
Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
Node* object, Node* index_node, int additional_offset,
ParameterMode parameter_mode) {
int32_t header_size =
FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
#if V8_TARGET_LITTLE_ENDIAN
if (Is64()) {
header_size += kPointerSize / 2;
}
#endif
Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS,
parameter_mode, header_size);
if (Is64()) {
return Load(MachineType::Int32(), object, offset);
} else {
return SmiToWord32(Load(MachineType::AnyTagged(), object, offset));
}
}
Node* CodeStubAssembler::LoadFixedDoubleArrayElement(
Node* object, Node* index_node, MachineType machine_type,
int additional_offset, ParameterMode parameter_mode) {
......@@ -1915,7 +1988,7 @@ Node* CodeStubAssembler::StringCharCodeAt(Node* string, Node* index) {
{
// The {string} is a SlicedString, continue with its parent.
Node* string_offset =
SmiToWord(LoadObjectField(string, SlicedString::kOffsetOffset));
LoadAndUntagObjectField(string, SlicedString::kOffsetOffset);
Node* string_parent =
LoadObjectField(string, SlicedString::kParentOffset);
var_index.Bind(IntPtrAdd(index, string_offset));
......@@ -2086,8 +2159,8 @@ void CodeStubAssembler::NameDictionaryLookup(Node* dictionary,
DCHECK_EQ(MachineRepresentation::kWord32, var_name_index->rep());
Comment("NameDictionaryLookup");
Node* capacity = SmiToWord32(LoadFixedArrayElement(
dictionary, Int32Constant(Dictionary::kCapacityIndex)));
Node* capacity = LoadAndUntagToWord32FixedArrayElement(
dictionary, Int32Constant(Dictionary::kCapacityIndex));
Node* mask = Int32Sub(capacity, Int32Constant(1));
Node* hash = LoadNameHash(unique_name);
......@@ -2166,8 +2239,8 @@ void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary, Node* key,
DCHECK_EQ(MachineRepresentation::kWord32, var_entry->rep());
Comment("NumberDictionaryLookup");
Node* capacity = SmiToWord32(LoadFixedArrayElement(
dictionary, Int32Constant(Dictionary::kCapacityIndex)));
Node* capacity = LoadAndUntagToWord32FixedArrayElement(
dictionary, Int32Constant(Dictionary::kCapacityIndex));
Node* mask = Int32Sub(capacity, Int32Constant(1));
Node* seed;
......@@ -2358,8 +2431,8 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
(DescriptorArray::kDescriptorValue - DescriptorArray::kDescriptorKey) *
kPointerSize;
Node* details = SmiToWord32(
LoadFixedArrayElement(descriptors, name_index, name_to_details_offset));
Node* details = LoadAndUntagToWord32FixedArrayElement(descriptors, name_index,
name_to_details_offset);
var_details->Bind(details);
Node* location = BitFieldDecode<PropertyDetails::LocationField>(details);
......@@ -2465,8 +2538,8 @@ void CodeStubAssembler::LoadPropertyFromNameDictionary(Node* dictionary,
(NameDictionary::kEntryValueIndex - NameDictionary::kEntryKeyIndex) *
kPointerSize;
Node* details = SmiToWord32(
LoadFixedArrayElement(dictionary, name_index, name_to_details_offset));
Node* details = LoadAndUntagToWord32FixedArrayElement(dictionary, name_index,
name_to_details_offset);
var_details->Bind(details);
var_value->Bind(
......@@ -2494,8 +2567,8 @@ void CodeStubAssembler::LoadPropertyFromGlobalDictionary(Node* dictionary,
var_value->Bind(value);
Node* details =
SmiToWord32(LoadObjectField(property_cell, PropertyCell::kDetailsOffset));
Node* details = LoadAndUntagToWord32ObjectField(property_cell,
PropertyCell::kDetailsOffset);
var_details->Bind(details);
Comment("] LoadPropertyFromGlobalDictionary");
......@@ -2633,9 +2706,9 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
Bind(&if_isobjectorsmi);
{
Node* elements = LoadElements(object);
Node* length = LoadFixedArrayBaseLength(elements);
Node* length = LoadAndUntagFixedArrayBaseLength(elements);
GotoUnless(Uint32LessThan(index, SmiToWord32(length)), if_not_found);
GotoUnless(Uint32LessThan(index, length), if_not_found);
Node* element = LoadFixedArrayElement(elements, index);
Node* the_hole = TheHoleConstant();
......@@ -2644,9 +2717,9 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
Bind(&if_isdouble);
{
Node* elements = LoadElements(object);
Node* length = LoadFixedArrayBaseLength(elements);
Node* length = LoadAndUntagFixedArrayBaseLength(elements);
GotoUnless(Uint32LessThan(index, SmiToWord32(length)), if_not_found);
GotoUnless(Uint32LessThan(index, length), if_not_found);
if (kPointerSize == kDoubleSize) {
Node* element =
......@@ -3053,7 +3126,7 @@ void CodeStubAssembler::HandlePolymorphicCase(
Bind(&next_entry);
}
Node* length = SmiToWord32(LoadFixedArrayBaseLength(feedback));
Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
// Loop from {unroll_count}*kEntrySize to {length}.
Variable var_index(this, MachineRepresentation::kWord32);
......
......@@ -156,6 +156,15 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* LoadObjectField(compiler::Node* object,
compiler::Node* offset,
MachineType rep = MachineType::AnyTagged());
// Load a SMI field and untag it.
compiler::Node* LoadAndUntagObjectField(compiler::Node* object, int offset);
// Load a SMI field, untag it, and convert to Word32.
compiler::Node* LoadAndUntagToWord32ObjectField(compiler::Node* object,
int offset);
// Load a SMI and untag it.
compiler::Node* LoadAndUntagSmi(compiler::Node* base, int index);
// Load a SMI root, untag it, and convert to Word32.
compiler::Node* LoadAndUntagToWord32Root(Heap::RootListIndex root_index);
// Load the floating point value of a HeapNumber.
compiler::Node* LoadHeapNumberValue(compiler::Node* object);
......@@ -170,7 +179,7 @@ class CodeStubAssembler : public compiler::CodeAssembler {
// Load the elements backing store of a JSObject.
compiler::Node* LoadElements(compiler::Node* object);
// Load the length of a fixed array base instance.
compiler::Node* LoadFixedArrayBaseLength(compiler::Node* array);
compiler::Node* LoadAndUntagFixedArrayBaseLength(compiler::Node* array);
// Load the bit field of a Map.
compiler::Node* LoadMapBitField(compiler::Node* map);
// Load bit field 2 of a map.
......@@ -212,6 +221,11 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* object, compiler::Node* int32_index,
int additional_offset = 0,
ParameterMode parameter_mode = INTEGER_PARAMETERS);
// Load an array element from a FixedArray, untag it and return it as Word32.
compiler::Node* LoadAndUntagToWord32FixedArrayElement(
compiler::Node* object, compiler::Node* int32_index,
int additional_offset = 0,
ParameterMode parameter_mode = INTEGER_PARAMETERS);
// Load an array element from a FixedDoubleArray.
compiler::Node* LoadFixedDoubleArrayElement(
compiler::Node* object, compiler::Node* int32_index,
......
......@@ -1531,6 +1531,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Strh:
__ Strh(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64Ldrsw:
__ Ldrsw(i.OutputRegister(), i.MemoryOperand());
break;
case kArm64LdrW:
__ Ldr(i.OutputRegister32(), i.MemoryOperand());
break;
......
......@@ -147,6 +147,7 @@ namespace compiler {
V(Arm64Ldrh) \
V(Arm64Ldrsh) \
V(Arm64Strh) \
V(Arm64Ldrsw) \
V(Arm64LdrW) \
V(Arm64StrW) \
V(Arm64Ldr) \
......
......@@ -144,6 +144,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Ldrsb:
case kArm64Ldrh:
case kArm64Ldrsh:
case kArm64Ldrsw:
case kArm64LdrW:
case kArm64Ldr:
return kIsLoadOperation;
......@@ -236,6 +237,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kArm64Ldrh:
case kArm64Ldrsb:
case kArm64Ldrsh:
case kArm64Ldrsw:
return 11;
case kCheckedLoadInt8:
......
......@@ -161,7 +161,6 @@ void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
g.UseOperand(node->InputAt(1), operand_mode));
}
bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
Node* input_node, InstructionCode* opcode, bool try_ror) {
Arm64OperandGenerator g(selector);
......@@ -458,18 +457,43 @@ int32_t LeftShiftForReducedMultiply(Matcher* m) {
} // namespace
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
MachineRepresentation rep = load_rep.representation();
Arm64OperandGenerator g(this);
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
ImmediateMode immediate_mode, MachineRepresentation rep,
Node* output = nullptr) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
InstructionCode opcode = kArchNop;
ImmediateMode immediate_mode = kNoImmediate;
InstructionOperand inputs[3];
size_t input_count = 0;
InstructionOperand outputs[1];
// If output is not nullptr, use that as the output register. This
// is used when we merge a conversion into the load.
outputs[0] = g.DefineAsRegister(output == nullptr ? node : output);
inputs[0] = g.UseRegister(base);
if (g.CanBeImmediate(index, immediate_mode)) {
input_count = 2;
inputs[1] = g.UseImmediate(index);
opcode |= AddressingModeField::encode(kMode_MRI);
} else if (TryMatchLoadStoreShift(&g, selector, rep, node, index, &inputs[1],
&inputs[2])) {
input_count = 3;
opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
} else {
input_count = 2;
inputs[1] = g.UseRegister(index);
opcode |= AddressingModeField::encode(kMode_MRR);
}
selector->Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
}
void InstructionSelector::VisitLoad(Node* node) {
InstructionCode opcode = kArchNop;
ImmediateMode immediate_mode = kNoImmediate;
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
MachineRepresentation rep = load_rep.representation();
switch (rep) {
case MachineRepresentation::kFloat32:
opcode = kArm64LdrS;
......@@ -502,25 +526,7 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
return;
}
outputs[0] = g.DefineAsRegister(node);
inputs[0] = g.UseRegister(base);
if (g.CanBeImmediate(index, immediate_mode)) {
input_count = 2;
inputs[1] = g.UseImmediate(index);
opcode |= AddressingModeField::encode(kMode_MRI);
} else if (TryMatchLoadStoreShift(&g, this, rep, node, index, &inputs[1],
&inputs[2])) {
input_count = 3;
opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
} else {
input_count = 2;
inputs[1] = g.UseRegister(index);
opcode |= AddressingModeField::encode(kMode_MRR);
}
Emit(opcode, arraysize(outputs), outputs, input_count, inputs);
EmitLoad(this, node, opcode, immediate_mode, rep);
}
......@@ -959,7 +965,7 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
Arm64OperandGenerator g(this);
Int64BinopMatcher m(node);
if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
m.right().IsInRange(32, 63)) {
m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
// There's no need to sign/zero-extend to 64-bit if we shift out the upper
// 32 bits anyway.
Emit(kArm64Lsl, g.DefineAsRegister(node),
......@@ -1563,7 +1569,35 @@ void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
VisitRR(this, kArm64Sxtw, node);
Node* value = node->InputAt(0);
if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
// Generate sign-extending load.
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
MachineRepresentation rep = load_rep.representation();
InstructionCode opcode = kArchNop;
ImmediateMode immediate_mode = kNoImmediate;
switch (rep) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
opcode = load_rep.IsSigned() ? kArm64Ldrsb : kArm64Ldrb;
immediate_mode = kLoadStoreImm8;
break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsSigned() ? kArm64Ldrsh : kArm64Ldrh;
immediate_mode = kLoadStoreImm16;
break;
case MachineRepresentation::kWord32:
opcode = kArm64Ldrsw;
immediate_mode = kLoadStoreImm32;
break;
default:
UNREACHABLE();
return;
}
EmitLoad(this, value, opcode, immediate_mode, rep, node);
} else {
VisitRR(this, kArm64Sxtw, node);
}
}
......
......@@ -392,6 +392,26 @@ Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
return Load(MachineType::AnyTagged(), constant_pool, entry_offset);
}
Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
BytecodeArray::kConstantPoolOffset);
int offset = FixedArray::kHeaderSize - kHeapObjectTag;
#if V8_TARGET_LITTLE_ENDIAN
if (Is64()) {
offset += kPointerSize / 2;
}
#endif
Node* entry_offset =
IntPtrAdd(IntPtrConstant(offset), WordShl(index, kPointerSizeLog2));
if (Is64()) {
return ChangeInt32ToInt64(
Load(MachineType::Int32(), constant_pool, entry_offset));
} else {
return SmiUntag(
Load(MachineType::AnyTagged(), constant_pool, entry_offset));
}
}
Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
return Load(MachineType::AnyTagged(), context,
IntPtrConstant(Context::SlotOffset(slot_index)));
......@@ -927,7 +947,7 @@ Node* InterpreterAssembler::RegisterCount() {
Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
if (FLAG_debug_code) {
Node* array_size = SmiUntag(LoadFixedArrayBaseLength(array));
Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
AbortIfWordNotEqual(
array_size, RegisterCount(), kInvalidRegisterFileInGenerator);
}
......@@ -962,7 +982,7 @@ Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
if (FLAG_debug_code) {
Node* array_size = SmiUntag(LoadFixedArrayBaseLength(array));
Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
AbortIfWordNotEqual(
array_size, RegisterCount(), kInvalidRegisterFileInGenerator);
}
......
......@@ -77,6 +77,9 @@ class InterpreterAssembler : public CodeStubAssembler {
// Load constant at |index| in the constant pool.
compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
// Load and untag constant at |index| in the constant pool.
compiler::Node* LoadAndUntagConstantPoolEntry(compiler::Node* index);
// Load |slot_index| from |context|.
compiler::Node* LoadContextSlot(compiler::Node* context, int slot_index);
compiler::Node* LoadContextSlot(compiler::Node* context,
......
......@@ -1402,8 +1402,7 @@ void Interpreter::DoJump(InterpreterAssembler* assembler) {
// Jump by number of bytes in the Smi in the |idx| entry in the constant pool.
void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) {
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
Node* relative_jump = __ SmiUntag(constant);
Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
__ Jump(relative_jump);
}
......@@ -1425,8 +1424,7 @@ void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) {
void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
Node* relative_jump = __ SmiUntag(constant);
Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
Node* true_value = __ BooleanConstant(true);
__ JumpIfWordEqual(accumulator, true_value, relative_jump);
}
......@@ -1449,8 +1447,7 @@ void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) {
void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
Node* relative_jump = __ SmiUntag(constant);
Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
Node* false_value = __ BooleanConstant(false);
__ JumpIfWordEqual(accumulator, false_value, relative_jump);
}
......@@ -1479,8 +1476,7 @@ void Interpreter::DoJumpIfToBooleanTrueConstant(
InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
Node* relative_jump = __ SmiUntag(constant);
Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
Label if_true(assembler), if_false(assembler);
__ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
__ Bind(&if_true);
......@@ -1513,8 +1509,7 @@ void Interpreter::DoJumpIfToBooleanFalseConstant(
InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
Node* relative_jump = __ SmiUntag(constant);
Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
Label if_true(assembler), if_false(assembler);
__ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
__ Bind(&if_true);
......@@ -1542,8 +1537,7 @@ void Interpreter::DoJumpIfNullConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
Node* relative_jump = __ SmiUntag(constant);
Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
__ JumpIfWordEqual(accumulator, null_value, relative_jump);
}
......@@ -1568,8 +1562,7 @@ void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) {
Node* undefined_value =
__ HeapConstant(isolate_->factory()->undefined_value());
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
Node* relative_jump = __ SmiUntag(constant);
Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
__ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
}
......@@ -1592,8 +1585,7 @@ void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
Node* relative_jump = __ SmiUntag(constant);
Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
__ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
}
......
......@@ -2396,6 +2396,89 @@ TEST_F(InstructionSelectorTest, ChangeUint32ToUint64AfterLoad) {
}
}
TEST_F(InstructionSelectorTest, ChangeInt32ToInt64AfterLoad) {
// For each case, test that the conversion is merged into the load
// operation.
// ChangeInt32ToInt64(Load_Uint8) -> Ldrb
{
StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
MachineType::Int32());
m.Return(m.ChangeInt32ToInt64(
m.Load(MachineType::Uint8(), m.Parameter(0), m.Parameter(1))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Ldrb, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
// ChangeInt32ToInt64(Load_Int8) -> Ldrsb
{
StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
MachineType::Int32());
m.Return(m.ChangeInt32ToInt64(
m.Load(MachineType::Int8(), m.Parameter(0), m.Parameter(1))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Ldrsb, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
// ChangeInt32ToInt64(Load_Uint16) -> Ldrh
{
StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
MachineType::Int32());
m.Return(m.ChangeInt32ToInt64(
m.Load(MachineType::Uint16(), m.Parameter(0), m.Parameter(1))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Ldrh, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
// ChangeInt32ToInt64(Load_Int16) -> Ldrsh
{
StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
MachineType::Int32());
m.Return(m.ChangeInt32ToInt64(
m.Load(MachineType::Int16(), m.Parameter(0), m.Parameter(1))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Ldrsh, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
// ChangeInt32ToInt64(Load_Uint32) -> Ldrsw
{
StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
MachineType::Int32());
m.Return(m.ChangeInt32ToInt64(
m.Load(MachineType::Uint32(), m.Parameter(0), m.Parameter(1))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Ldrsw, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
// ChangeInt32ToInt64(Load_Int32) -> Ldrsw
{
StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
MachineType::Int32());
m.Return(m.ChangeInt32ToInt64(
m.Load(MachineType::Int32(), m.Parameter(0), m.Parameter(1))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Ldrsw, s[0]->arch_opcode());
EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
}
// -----------------------------------------------------------------------------
// Memory access instructions.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment