ARM64: Faster immediate check and fix corner cases

Improve the code used to check for encodable logical immediates, fix some corner
cases associated with moving kWMinInt into W registers, and add tests.

BUG=
R=ulan@chromium.org

Review URL: https://codereview.chromium.org/341123003

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22120 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent 06e082c8
This diff is collapsed.
......@@ -64,17 +64,23 @@ void MacroAssembler::LogicalMacro(const Register& rd,
} else if (operand.IsImmediate()) {
int64_t immediate = operand.ImmediateValue();
unsigned reg_size = rd.SizeInBits();
ASSERT(rd.Is64Bits() || is_uint32(immediate));
// If the operation is NOT, invert the operation and immediate.
if ((op & NOT) == NOT) {
op = static_cast<LogicalOp>(op & ~NOT);
immediate = ~immediate;
if (rd.Is32Bits()) {
immediate &= kWRegMask;
}
}
// Ignore the top 32 bits of an immediate if we're moving to a W register.
if (rd.Is32Bits()) {
// Check that the top 32 bits are consistent.
ASSERT(((immediate >> kWRegSizeInBits) == 0) ||
((immediate >> kWRegSizeInBits) == -1));
immediate &= kWRegMask;
}
ASSERT(rd.Is64Bits() || is_uint32(immediate));
// Special cases for all set or all clear immediates.
if (immediate == 0) {
switch (op) {
......
......@@ -78,6 +78,11 @@ int CountSetBits(uint64_t value, int width) {
}
uint64_t LargestPowerOf2Divisor(uint64_t value) {
return value & -value;
}
int MaskToBit(uint64_t mask) {
ASSERT(CountSetBits(mask, 64) == 1);
return CountTrailingZeros(mask, 64);
......
......@@ -57,6 +57,7 @@ int CountLeadingZeros(uint64_t value, int width);
int CountLeadingSignBits(int64_t value, int width);
int CountTrailingZeros(uint64_t value, int width);
int CountSetBits(uint64_t value, int width);
uint64_t LargestPowerOf2Divisor(uint64_t value);
int MaskToBit(uint64_t mask);
......
......@@ -426,6 +426,9 @@ TEST(mov_imm_w) {
__ Mov(w4, 0x00001234L);
__ Mov(w5, 0x12340000L);
__ Mov(w6, 0x12345678L);
__ Mov(w7, (int32_t)0x80000000);
__ Mov(w8, (int32_t)0xffff0000);
__ Mov(w9, kWMinInt);
END();
RUN();
......@@ -437,6 +440,9 @@ TEST(mov_imm_w) {
ASSERT_EQUAL_64(0x00001234L, x4);
ASSERT_EQUAL_64(0x12340000L, x5);
ASSERT_EQUAL_64(0x12345678L, x6);
ASSERT_EQUAL_64(0x80000000L, x7);
ASSERT_EQUAL_64(0xffff0000L, x8);
ASSERT_EQUAL_32(kWMinInt, w9);
TEARDOWN();
}
......@@ -588,6 +594,9 @@ TEST(bitwise_wide_imm) {
__ Orr(x10, x0, Operand(0x1234567890abcdefUL));
__ Orr(w11, w1, Operand(0x90abcdef));
__ Orr(w12, w0, kWMinInt);
__ Eor(w13, w0, kWMinInt);
END();
RUN();
......@@ -596,6 +605,8 @@ TEST(bitwise_wide_imm) {
ASSERT_EQUAL_64(0xf0f0f0f0f0f0f0f0UL, x1);
ASSERT_EQUAL_64(0x1234567890abcdefUL, x10);
ASSERT_EQUAL_64(0xf0fbfdffUL, x11);
ASSERT_EQUAL_32(kWMinInt, w12);
ASSERT_EQUAL_32(kWMinInt, w13);
TEARDOWN();
}
......@@ -3362,8 +3373,10 @@ TEST(add_sub_wide_imm) {
__ Add(w12, w0, Operand(0x12345678));
__ Add(w13, w1, Operand(0xffffffff));
__ Sub(x20, x0, Operand(0x1234567890abcdefUL));
__ Add(w18, w0, Operand(kWMinInt));
__ Sub(w19, w0, Operand(kWMinInt));
__ Sub(x20, x0, Operand(0x1234567890abcdefUL));
__ Sub(w21, w0, Operand(0x12345678));
END();
......@@ -3375,8 +3388,10 @@ TEST(add_sub_wide_imm) {
ASSERT_EQUAL_32(0x12345678, w12);
ASSERT_EQUAL_64(0x0, x13);
ASSERT_EQUAL_64(-0x1234567890abcdefUL, x20);
ASSERT_EQUAL_32(kWMinInt, w18);
ASSERT_EQUAL_32(kWMinInt, w19);
ASSERT_EQUAL_64(-0x1234567890abcdefUL, x20);
ASSERT_EQUAL_32(-0x12345678, w21);
TEARDOWN();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment