Commit be35a9e7 authored by yangguo@chromium.org's avatar yangguo@chromium.org

Revert "ARM64: Faster immediate check and fix corner cases"

This reverts r22120 due to build breakage of arm64.debug target.

TBR=m.m.capewell@googlemail.com

Review URL: https://codereview.chromium.org/361973002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22123 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
parent d8fb9b9f
This diff is collapsed.
......@@ -64,23 +64,17 @@ void MacroAssembler::LogicalMacro(const Register& rd,
} else if (operand.IsImmediate()) {
int64_t immediate = operand.ImmediateValue();
unsigned reg_size = rd.SizeInBits();
ASSERT(rd.Is64Bits() || is_uint32(immediate));
// If the operation is NOT, invert the operation and immediate.
if ((op & NOT) == NOT) {
op = static_cast<LogicalOp>(op & ~NOT);
immediate = ~immediate;
if (rd.Is32Bits()) {
immediate &= kWRegMask;
}
}
// Ignore the top 32 bits of an immediate if we're moving to a W register.
if (rd.Is32Bits()) {
// Check that the top 32 bits are consistent.
ASSERT(((immediate >> kWRegSizeInBits) == 0) ||
((immediate >> kWRegSizeInBits) == -1));
immediate &= kWRegMask;
}
ASSERT(rd.Is64Bits() || is_uint32(immediate));
// Special cases for all set or all clear immediates.
if (immediate == 0) {
switch (op) {
......
......@@ -78,11 +78,6 @@ int CountSetBits(uint64_t value, int width) {
}
uint64_t LargestPowerOf2Divisor(uint64_t value) {
return value & -value;
}
int MaskToBit(uint64_t mask) {
ASSERT(CountSetBits(mask, 64) == 1);
return CountTrailingZeros(mask, 64);
......
......@@ -57,7 +57,6 @@ int CountLeadingZeros(uint64_t value, int width);
int CountLeadingSignBits(int64_t value, int width);
int CountTrailingZeros(uint64_t value, int width);
int CountSetBits(uint64_t value, int width);
uint64_t LargestPowerOf2Divisor(uint64_t value);
int MaskToBit(uint64_t mask);
......
......@@ -426,9 +426,6 @@ TEST(mov_imm_w) {
__ Mov(w4, 0x00001234L);
__ Mov(w5, 0x12340000L);
__ Mov(w6, 0x12345678L);
__ Mov(w7, (int32_t)0x80000000);
__ Mov(w8, (int32_t)0xffff0000);
__ Mov(w9, kWMinInt);
END();
RUN();
......@@ -440,9 +437,6 @@ TEST(mov_imm_w) {
ASSERT_EQUAL_64(0x00001234L, x4);
ASSERT_EQUAL_64(0x12340000L, x5);
ASSERT_EQUAL_64(0x12345678L, x6);
ASSERT_EQUAL_64(0x80000000L, x7);
ASSERT_EQUAL_64(0xffff0000L, x8);
ASSERT_EQUAL_32(kWMinInt, w9);
TEARDOWN();
}
......@@ -594,9 +588,6 @@ TEST(bitwise_wide_imm) {
__ Orr(x10, x0, Operand(0x1234567890abcdefUL));
__ Orr(w11, w1, Operand(0x90abcdef));
__ Orr(w12, w0, kWMinInt);
__ Eor(w13, w0, kWMinInt);
END();
RUN();
......@@ -605,8 +596,6 @@ TEST(bitwise_wide_imm) {
ASSERT_EQUAL_64(0xf0f0f0f0f0f0f0f0UL, x1);
ASSERT_EQUAL_64(0x1234567890abcdefUL, x10);
ASSERT_EQUAL_64(0xf0fbfdffUL, x11);
ASSERT_EQUAL_32(kWMinInt, w12);
ASSERT_EQUAL_32(kWMinInt, w13);
TEARDOWN();
}
......@@ -3373,10 +3362,8 @@ TEST(add_sub_wide_imm) {
__ Add(w12, w0, Operand(0x12345678));
__ Add(w13, w1, Operand(0xffffffff));
__ Add(w18, w0, Operand(kWMinInt));
__ Sub(w19, w0, Operand(kWMinInt));
__ Sub(x20, x0, Operand(0x1234567890abcdefUL));
__ Sub(w21, w0, Operand(0x12345678));
END();
......@@ -3388,10 +3375,8 @@ TEST(add_sub_wide_imm) {
ASSERT_EQUAL_32(0x12345678, w12);
ASSERT_EQUAL_64(0x0, x13);
ASSERT_EQUAL_32(kWMinInt, w18);
ASSERT_EQUAL_32(kWMinInt, w19);
ASSERT_EQUAL_64(-0x1234567890abcdefUL, x20);
ASSERT_EQUAL_32(-0x12345678, w21);
TEARDOWN();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment