Commit 049341f0 authored by ahaas's avatar ahaas Committed by Commit bot

[wasm] Implementation of Word32PairShr and Word32PairSar on arm.

Implementation of https://codereview.chromium.org/1768233002 on arm.

R=titzer@chromium.org, v8-arm-ports@googlegroups.com

Review URL: https://codereview.chromium.org/1778893004

Cr-Commit-Position: refs/heads/master@{#34769}
parent 29e0e8e9
......@@ -1147,6 +1147,99 @@ void MacroAssembler::LslPair(Register dst_low, Register dst_high,
}
}
void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
DCHECK(!AreAliased(dst_low, src_high));
DCHECK(!AreAliased(dst_low, shift));
Label less_than_32;
Label done;
rsb(scratch, shift, Operand(32), SetCC);
b(gt, &less_than_32);
// If shift >= 32
and_(scratch, shift, Operand(0x1f));
lsr(dst_low, src_high, Operand(scratch));
mov(dst_high, Operand(0));
jmp(&done);
bind(&less_than_32);
// If shift < 32
lsr(dst_low, src_low, Operand(shift));
orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
lsr(dst_high, src_high, Operand(shift));
bind(&done);
}
void MacroAssembler::LsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_low, src_high));
Label less_than_32;
Label done;
if (shift == 32) {
mov(dst_low, src_high);
mov(dst_high, Operand(0));
} else if (shift > 32) {
shift &= 0x1f;
lsr(dst_low, src_high, Operand(shift));
mov(dst_high, Operand(0));
} else if (shift == 0) {
Move(dst_low, src_low);
Move(dst_high, src_high);
} else {
lsr(dst_low, src_low, Operand(shift));
orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
lsr(dst_high, src_high, Operand(shift));
}
}
void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
DCHECK(!AreAliased(dst_low, src_high));
DCHECK(!AreAliased(dst_low, shift));
Label less_than_32;
Label done;
rsb(scratch, shift, Operand(32), SetCC);
b(gt, &less_than_32);
// If shift >= 32
and_(scratch, shift, Operand(0x1f));
asr(dst_low, src_high, Operand(scratch));
asr(dst_high, src_high, Operand(31));
jmp(&done);
bind(&less_than_32);
// If shift < 32
lsr(dst_low, src_low, Operand(shift));
orr(dst_low, dst_low, Operand(src_high, LSL, scratch));
asr(dst_high, src_high, Operand(shift));
bind(&done);
}
void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
uint32_t shift) {
DCHECK(!AreAliased(dst_low, src_high));
Label less_than_32;
Label done;
if (shift == 32) {
mov(dst_low, src_high);
asr(dst_high, src_high, Operand(31));
} else if (shift > 32) {
shift &= 0x1f;
asr(dst_low, src_high, Operand(shift));
asr(dst_high, src_high, Operand(31));
} else if (shift == 0) {
Move(dst_low, src_low);
Move(dst_high, src_high);
} else {
lsr(dst_low, src_low, Operand(shift));
orr(dst_low, dst_low, Operand(src_high, LSL, 32 - shift));
asr(dst_high, src_high, Operand(shift));
}
}
void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address) {
DCHECK(FLAG_enable_embedded_constant_pool);
......
......@@ -553,6 +553,14 @@ class MacroAssembler: public Assembler {
Register src_high, Register scratch, Register shift);
void LslPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
void LsrPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
void LsrPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
void AsrPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
void AsrPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
// Loads the number from object into dst register.
// If |object| is neither smi nor heap number, |not_number| is jumped to
......
......@@ -807,6 +807,24 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
i.InputRegister(1), kScratchReg, i.InputRegister(2));
}
break;
case kArmLsrPair:
if (instr->InputAt(2)->IsImmediate()) {
__ LsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
i.InputRegister(1), i.InputInt32(2));
} else {
__ LsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
i.InputRegister(1), kScratchReg, i.InputRegister(2));
}
break;
case kArmAsrPair:
if (instr->InputAt(2)->IsImmediate()) {
__ AsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
i.InputRegister(1), i.InputInt32(2));
} else {
__ AsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
i.InputRegister(1), kScratchReg, i.InputRegister(2));
}
break;
case kArmVcmpF32:
if (instr->InputAt(1)->IsDoubleRegister()) {
__ VFPCompareAndSetFlags(i.InputFloat32Register(0),
......
......@@ -48,6 +48,8 @@ namespace compiler {
V(ArmUxtah) \
V(ArmAddPair) \
V(ArmLslPair) \
V(ArmLsrPair) \
V(ArmAsrPair) \
V(ArmVcmpF32) \
V(ArmVaddF32) \
V(ArmVsubF32) \
......
......@@ -50,6 +50,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmRbit:
case kArmAddPair:
case kArmLslPair:
case kArmLsrPair:
case kArmAsrPair:
case kArmVcmpF32:
case kArmVaddF32:
case kArmVsubF32:
......
......@@ -806,9 +806,51 @@ void InstructionSelector::VisitWord32PairShl(Node* node) {
Emit(kArmLslPair, 2, outputs, 3, inputs);
}
void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairShr(Node* node) {
ArmOperandGenerator g(this);
// We use g.UseUniqueRegister here for InputAt(1) and InputAt(2) to to
// guarantee that there is no register aliasing with output register.
Int32Matcher m(node->InputAt(2));
InstructionOperand shift_operand;
if (m.HasValue()) {
shift_operand = g.UseImmediate(m.node());
} else {
shift_operand = g.UseUniqueRegister(m.node());
}
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
g.UseUniqueRegister(node->InputAt(1)),
shift_operand};
InstructionOperand outputs[] = {
g.DefineAsRegister(node),
g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
Emit(kArmLsrPair, 2, outputs, 3, inputs);
}
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairSar(Node* node) {
ArmOperandGenerator g(this);
// We use g.UseUniqueRegister here for InputAt(1) and InputAt(2) to to
// guarantee that there is no register aliasing with output register.
Int32Matcher m(node->InputAt(2));
InstructionOperand shift_operand;
if (m.HasValue()) {
shift_operand = g.UseImmediate(m.node());
} else {
shift_operand = g.UseUniqueRegister(m.node());
}
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
g.UseUniqueRegister(node->InputAt(1)),
shift_operand};
InstructionOperand outputs[] = {
g.DefineAsRegister(node),
g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
Emit(kArmAsrPair, 2, outputs, 3, inputs);
}
void InstructionSelector::VisitWord32Ror(Node* node) {
VisitShift(this, node, TryMatchROR);
......
......@@ -39,52 +39,52 @@
#define MIPS_OR_ARM_OR_X87 false
#endif
#define FOREACH_I64_OPERATOR(V) \
V(DepthFirst, true) \
V(I64Const, true) \
V(I64Return, true) \
V(I64Param, true) \
V(I64LoadStore, true) \
V(I64Add, true) \
V(I64Sub, false) \
V(I64Mul, false) \
V(I64DivS, true) \
V(I64DivU, true) \
V(I64RemS, true) \
V(I64RemU, true) \
V(I64And, true) \
V(I64Ior, true) \
V(I64Xor, true) \
V(I64Shl, !MIPS_OR_X87) \
V(I64ShrU, !MIPS_OR_ARM_OR_X87) \
V(I64ShrS, !MIPS_OR_ARM_OR_X87) \
V(I64Eq, true) \
V(I64Ne, true) \
V(I64LtS, true) \
V(I64LeS, true) \
V(I64LtU, true) \
V(I64LeU, true) \
V(I64GtS, true) \
V(I64GeS, true) \
V(I64GtU, true) \
V(I64GeU, true) \
V(I64Clz, false) \
V(I64Ctz, false) \
V(I64Popcnt, false) \
V(I32ConvertI64, true) \
V(I64SConvertF32, false) \
V(I64SConvertF64, false) \
V(I64UConvertF32, false) \
V(I64UConvertF64, false) \
V(I64SConvertI32, true) \
V(I64UConvertI32, true) \
V(F32SConvertI64, true) \
V(F32UConvertI64, true) \
V(F64SConvertI64, true) \
V(F64UConvertI64, true) \
V(F64ReinterpretI64, false) \
V(I64ReinterpretF64, false) \
V(I64Ror, false) \
#define FOREACH_I64_OPERATOR(V) \
V(DepthFirst, true) \
V(I64Const, true) \
V(I64Return, true) \
V(I64Param, true) \
V(I64LoadStore, true) \
V(I64Add, true) \
V(I64Sub, false) \
V(I64Mul, false) \
V(I64DivS, true) \
V(I64DivU, true) \
V(I64RemS, true) \
V(I64RemU, true) \
V(I64And, true) \
V(I64Ior, true) \
V(I64Xor, true) \
V(I64Shl, !MIPS_OR_X87) \
V(I64ShrU, !MIPS_OR_X87) \
V(I64ShrS, !MIPS_OR_X87) \
V(I64Eq, true) \
V(I64Ne, true) \
V(I64LtS, true) \
V(I64LeS, true) \
V(I64LtU, true) \
V(I64LeU, true) \
V(I64GtS, true) \
V(I64GeS, true) \
V(I64GtU, true) \
V(I64GeU, true) \
V(I64Clz, false) \
V(I64Ctz, false) \
V(I64Popcnt, false) \
V(I32ConvertI64, true) \
V(I64SConvertF32, false) \
V(I64SConvertF64, false) \
V(I64UConvertF32, false) \
V(I64UConvertF64, false) \
V(I64SConvertI32, true) \
V(I64UConvertI32, true) \
V(F32SConvertI64, true) \
V(F32UConvertI64, true) \
V(F64SConvertI64, true) \
V(F64UConvertI64, true) \
V(F64ReinterpretI64, false) \
V(I64ReinterpretF64, false) \
V(I64Ror, false) \
V(I64Rol, false)
#define DECLARE_CONST(name, cond) static const bool kSupported_##name = cond;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment