Commit 2e327e90 authored by Joey Gouly's avatar Joey Gouly Committed by Commit Bot

[arm64] Don't use x18 (platform register) in assembler tests

This fixes the assembler tests on Windows arm64, that were previously crashing.

Bug: v8:9235
Change-Id: I616e6eb00bb71d70195787f80f8b54a9ae33abe6
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1634919
Commit-Queue: Martyn Capewell <martyn.capewell@arm.com>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61981}
parent be060ec0
...@@ -370,7 +370,7 @@ TEST(mov) { ...@@ -370,7 +370,7 @@ TEST(mov) {
__ Mov(w13, Operand(w11, LSL, 1)); __ Mov(w13, Operand(w11, LSL, 1));
__ Mov(x14, Operand(x12, LSL, 2)); __ Mov(x14, Operand(x12, LSL, 2));
__ Mov(w15, Operand(w11, LSR, 3)); __ Mov(w15, Operand(w11, LSR, 3));
__ Mov(x18, Operand(x12, LSR, 4)); __ Mov(x28, Operand(x12, LSR, 4));
__ Mov(w19, Operand(w11, ASR, 11)); __ Mov(w19, Operand(w11, ASR, 11));
__ Mov(x20, Operand(x12, ASR, 12)); __ Mov(x20, Operand(x12, ASR, 12));
__ Mov(w21, Operand(w11, ROR, 13)); __ Mov(w21, Operand(w11, ROR, 13));
...@@ -399,7 +399,7 @@ TEST(mov) { ...@@ -399,7 +399,7 @@ TEST(mov) {
CHECK_EQUAL_64(0x00001FFE, x13); CHECK_EQUAL_64(0x00001FFE, x13);
CHECK_EQUAL_64(0x0000000000003FFCUL, x14); CHECK_EQUAL_64(0x0000000000003FFCUL, x14);
CHECK_EQUAL_64(0x000001FF, x15); CHECK_EQUAL_64(0x000001FF, x15);
CHECK_EQUAL_64(0x00000000000000FFUL, x18); CHECK_EQUAL_64(0x00000000000000FFUL, x28);
CHECK_EQUAL_64(0x00000001, x19); CHECK_EQUAL_64(0x00000001, x19);
CHECK_EQUAL_64(0x0, x20); CHECK_EQUAL_64(0x0, x20);
CHECK_EQUAL_64(0x7FF80000, x21); CHECK_EQUAL_64(0x7FF80000, x21);
...@@ -517,7 +517,7 @@ TEST(mov_imm_x) { ...@@ -517,7 +517,7 @@ TEST(mov_imm_x) {
__ Mov(x13, 0x0000000000001234L); __ Mov(x13, 0x0000000000001234L);
__ Mov(x14, 0x0000000012345678L); __ Mov(x14, 0x0000000012345678L);
__ Mov(x15, 0x0000123400005678L); __ Mov(x15, 0x0000123400005678L);
__ Mov(x18, 0x1234000000005678L); __ Mov(x30, 0x1234000000005678L);
__ Mov(x19, 0x1234000056780000L); __ Mov(x19, 0x1234000056780000L);
__ Mov(x20, 0x1234567800000000L); __ Mov(x20, 0x1234567800000000L);
__ Mov(x21, 0x1234000000000000L); __ Mov(x21, 0x1234000000000000L);
...@@ -547,7 +547,7 @@ TEST(mov_imm_x) { ...@@ -547,7 +547,7 @@ TEST(mov_imm_x) {
CHECK_EQUAL_64(0x0000000000001234L, x13); CHECK_EQUAL_64(0x0000000000001234L, x13);
CHECK_EQUAL_64(0x0000000012345678L, x14); CHECK_EQUAL_64(0x0000000012345678L, x14);
CHECK_EQUAL_64(0x0000123400005678L, x15); CHECK_EQUAL_64(0x0000123400005678L, x15);
CHECK_EQUAL_64(0x1234000000005678L, x18); CHECK_EQUAL_64(0x1234000000005678L, x30);
CHECK_EQUAL_64(0x1234000056780000L, x19); CHECK_EQUAL_64(0x1234000056780000L, x19);
CHECK_EQUAL_64(0x1234567800000000L, x20); CHECK_EQUAL_64(0x1234567800000000L, x20);
CHECK_EQUAL_64(0x1234000000000000L, x21); CHECK_EQUAL_64(0x1234000000000000L, x21);
...@@ -1095,27 +1095,27 @@ TEST(mul) { ...@@ -1095,27 +1095,27 @@ TEST(mul) {
START(); START();
__ Mov(x16, 0); __ Mov(x16, 0);
__ Mov(x17, 1); __ Mov(x17, 1);
__ Mov(x18, 0xFFFFFFFF); __ Mov(x15, 0xFFFFFFFF);
__ Mov(x19, 0xFFFFFFFFFFFFFFFFUL); __ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Mul(w0, w16, w16); __ Mul(w0, w16, w16);
__ Mul(w1, w16, w17); __ Mul(w1, w16, w17);
__ Mul(w2, w17, w18); __ Mul(w2, w17, w15);
__ Mul(w3, w18, w19); __ Mul(w3, w15, w19);
__ Mul(x4, x16, x16); __ Mul(x4, x16, x16);
__ Mul(x5, x17, x18); __ Mul(x5, x17, x15);
__ Mul(x6, x18, x19); __ Mul(x6, x15, x19);
__ Mul(x7, x19, x19); __ Mul(x7, x19, x19);
__ Smull(x8, w17, w18); __ Smull(x8, w17, w15);
__ Smull(x9, w18, w18); __ Smull(x9, w15, w15);
__ Smull(x10, w19, w19); __ Smull(x10, w19, w19);
__ Mneg(w11, w16, w16); __ Mneg(w11, w16, w16);
__ Mneg(w12, w16, w17); __ Mneg(w12, w16, w17);
__ Mneg(w13, w17, w18); __ Mneg(w13, w17, w15);
__ Mneg(w14, w18, w19); __ Mneg(w14, w15, w19);
__ Mneg(x20, x16, x16); __ Mneg(x20, x16, x16);
__ Mneg(x21, x17, x18); __ Mneg(x21, x17, x15);
__ Mneg(x22, x18, x19); __ Mneg(x22, x15, x19);
__ Mneg(x23, x19, x19); __ Mneg(x23, x19, x19);
END(); END();
...@@ -1170,33 +1170,33 @@ TEST(madd) { ...@@ -1170,33 +1170,33 @@ TEST(madd) {
START(); START();
__ Mov(x16, 0); __ Mov(x16, 0);
__ Mov(x17, 1); __ Mov(x17, 1);
__ Mov(x18, 0xFFFFFFFF); __ Mov(x28, 0xFFFFFFFF);
__ Mov(x19, 0xFFFFFFFFFFFFFFFFUL); __ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Madd(w0, w16, w16, w16); __ Madd(w0, w16, w16, w16);
__ Madd(w1, w16, w16, w17); __ Madd(w1, w16, w16, w17);
__ Madd(w2, w16, w16, w18); __ Madd(w2, w16, w16, w28);
__ Madd(w3, w16, w16, w19); __ Madd(w3, w16, w16, w19);
__ Madd(w4, w16, w17, w17); __ Madd(w4, w16, w17, w17);
__ Madd(w5, w17, w17, w18); __ Madd(w5, w17, w17, w28);
__ Madd(w6, w17, w17, w19); __ Madd(w6, w17, w17, w19);
__ Madd(w7, w17, w18, w16); __ Madd(w7, w17, w28, w16);
__ Madd(w8, w17, w18, w18); __ Madd(w8, w17, w28, w28);
__ Madd(w9, w18, w18, w17); __ Madd(w9, w28, w28, w17);
__ Madd(w10, w18, w19, w18); __ Madd(w10, w28, w19, w28);
__ Madd(w11, w19, w19, w19); __ Madd(w11, w19, w19, w19);
__ Madd(x12, x16, x16, x16); __ Madd(x12, x16, x16, x16);
__ Madd(x13, x16, x16, x17); __ Madd(x13, x16, x16, x17);
__ Madd(x14, x16, x16, x18); __ Madd(x14, x16, x16, x28);
__ Madd(x15, x16, x16, x19); __ Madd(x15, x16, x16, x19);
__ Madd(x20, x16, x17, x17); __ Madd(x20, x16, x17, x17);
__ Madd(x21, x17, x17, x18); __ Madd(x21, x17, x17, x28);
__ Madd(x22, x17, x17, x19); __ Madd(x22, x17, x17, x19);
__ Madd(x23, x17, x18, x16); __ Madd(x23, x17, x28, x16);
__ Madd(x24, x17, x18, x18); __ Madd(x24, x17, x28, x28);
__ Madd(x25, x18, x18, x17); __ Madd(x25, x28, x28, x17);
__ Madd(x26, x18, x19, x18); __ Madd(x26, x28, x19, x28);
__ Madd(x27, x19, x19, x19); __ Madd(x27, x19, x19, x19);
END(); END();
...@@ -1237,33 +1237,33 @@ TEST(msub) { ...@@ -1237,33 +1237,33 @@ TEST(msub) {
START(); START();
__ Mov(x16, 0); __ Mov(x16, 0);
__ Mov(x17, 1); __ Mov(x17, 1);
__ Mov(x18, 0xFFFFFFFF); __ Mov(x28, 0xFFFFFFFF);
__ Mov(x19, 0xFFFFFFFFFFFFFFFFUL); __ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Msub(w0, w16, w16, w16); __ Msub(w0, w16, w16, w16);
__ Msub(w1, w16, w16, w17); __ Msub(w1, w16, w16, w17);
__ Msub(w2, w16, w16, w18); __ Msub(w2, w16, w16, w28);
__ Msub(w3, w16, w16, w19); __ Msub(w3, w16, w16, w19);
__ Msub(w4, w16, w17, w17); __ Msub(w4, w16, w17, w17);
__ Msub(w5, w17, w17, w18); __ Msub(w5, w17, w17, w28);
__ Msub(w6, w17, w17, w19); __ Msub(w6, w17, w17, w19);
__ Msub(w7, w17, w18, w16); __ Msub(w7, w17, w28, w16);
__ Msub(w8, w17, w18, w18); __ Msub(w8, w17, w28, w28);
__ Msub(w9, w18, w18, w17); __ Msub(w9, w28, w28, w17);
__ Msub(w10, w18, w19, w18); __ Msub(w10, w28, w19, w28);
__ Msub(w11, w19, w19, w19); __ Msub(w11, w19, w19, w19);
__ Msub(x12, x16, x16, x16); __ Msub(x12, x16, x16, x16);
__ Msub(x13, x16, x16, x17); __ Msub(x13, x16, x16, x17);
__ Msub(x14, x16, x16, x18); __ Msub(x14, x16, x16, x28);
__ Msub(x15, x16, x16, x19); __ Msub(x15, x16, x16, x19);
__ Msub(x20, x16, x17, x17); __ Msub(x20, x16, x17, x17);
__ Msub(x21, x17, x17, x18); __ Msub(x21, x17, x17, x28);
__ Msub(x22, x17, x17, x19); __ Msub(x22, x17, x17, x19);
__ Msub(x23, x17, x18, x16); __ Msub(x23, x17, x28, x16);
__ Msub(x24, x17, x18, x18); __ Msub(x24, x17, x28, x28);
__ Msub(x25, x18, x18, x17); __ Msub(x25, x28, x28, x17);
__ Msub(x26, x18, x19, x18); __ Msub(x26, x28, x19, x28);
__ Msub(x27, x19, x19, x19); __ Msub(x27, x19, x19, x19);
END(); END();
...@@ -1349,17 +1349,17 @@ TEST(smaddl_umaddl) { ...@@ -1349,17 +1349,17 @@ TEST(smaddl_umaddl) {
START(); START();
__ Mov(x17, 1); __ Mov(x17, 1);
__ Mov(x18, 0xFFFFFFFF); __ Mov(x28, 0xFFFFFFFF);
__ Mov(x19, 0xFFFFFFFFFFFFFFFFUL); __ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x20, 4); __ Mov(x20, 4);
__ Mov(x21, 0x200000000UL); __ Mov(x21, 0x200000000UL);
__ Smaddl(x9, w17, w18, x20); __ Smaddl(x9, w17, w28, x20);
__ Smaddl(x10, w18, w18, x20); __ Smaddl(x10, w28, w28, x20);
__ Smaddl(x11, w19, w19, x20); __ Smaddl(x11, w19, w19, x20);
__ Smaddl(x12, w19, w19, x21); __ Smaddl(x12, w19, w19, x21);
__ Umaddl(x13, w17, w18, x20); __ Umaddl(x13, w17, w28, x20);
__ Umaddl(x14, w18, w18, x20); __ Umaddl(x14, w28, w28, x20);
__ Umaddl(x15, w19, w19, x20); __ Umaddl(x15, w19, w19, x20);
__ Umaddl(x22, w19, w19, x21); __ Umaddl(x22, w19, w19, x21);
END(); END();
...@@ -1382,17 +1382,17 @@ TEST(smsubl_umsubl) { ...@@ -1382,17 +1382,17 @@ TEST(smsubl_umsubl) {
START(); START();
__ Mov(x17, 1); __ Mov(x17, 1);
__ Mov(x18, 0xFFFFFFFF); __ Mov(x28, 0xFFFFFFFF);
__ Mov(x19, 0xFFFFFFFFFFFFFFFFUL); __ Mov(x19, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x20, 4); __ Mov(x20, 4);
__ Mov(x21, 0x200000000UL); __ Mov(x21, 0x200000000UL);
__ Smsubl(x9, w17, w18, x20); __ Smsubl(x9, w17, w28, x20);
__ Smsubl(x10, w18, w18, x20); __ Smsubl(x10, w28, w28, x20);
__ Smsubl(x11, w19, w19, x20); __ Smsubl(x11, w19, w19, x20);
__ Smsubl(x12, w19, w19, x21); __ Smsubl(x12, w19, w19, x21);
__ Umsubl(x13, w17, w18, x20); __ Umsubl(x13, w17, w28, x20);
__ Umsubl(x14, w18, w18, x20); __ Umsubl(x14, w28, w28, x20);
__ Umsubl(x15, w19, w19, x20); __ Umsubl(x15, w19, w19, x20);
__ Umsubl(x22, w19, w19, x21); __ Umsubl(x22, w19, w19, x21);
END(); END();
...@@ -1416,7 +1416,7 @@ TEST(div) { ...@@ -1416,7 +1416,7 @@ TEST(div) {
START(); START();
__ Mov(x16, 1); __ Mov(x16, 1);
__ Mov(x17, 0xFFFFFFFF); __ Mov(x17, 0xFFFFFFFF);
__ Mov(x18, 0xFFFFFFFFFFFFFFFFUL); __ Mov(x30, 0xFFFFFFFFFFFFFFFFUL);
__ Mov(x19, 0x80000000); __ Mov(x19, 0x80000000);
__ Mov(x20, 0x8000000000000000UL); __ Mov(x20, 0x8000000000000000UL);
__ Mov(x21, 2); __ Mov(x21, 2);
...@@ -1425,13 +1425,13 @@ TEST(div) { ...@@ -1425,13 +1425,13 @@ TEST(div) {
__ Udiv(w1, w17, w16); __ Udiv(w1, w17, w16);
__ Sdiv(w2, w16, w16); __ Sdiv(w2, w16, w16);
__ Sdiv(w3, w16, w17); __ Sdiv(w3, w16, w17);
__ Sdiv(w4, w17, w18); __ Sdiv(w4, w17, w30);
__ Udiv(x5, x16, x16); __ Udiv(x5, x16, x16);
__ Udiv(x6, x17, x18); __ Udiv(x6, x17, x30);
__ Sdiv(x7, x16, x16); __ Sdiv(x7, x16, x16);
__ Sdiv(x8, x16, x17); __ Sdiv(x8, x16, x17);
__ Sdiv(x9, x17, x18); __ Sdiv(x9, x17, x30);
__ Udiv(w10, w19, w21); __ Udiv(w10, w19, w21);
__ Sdiv(w11, w19, w21); __ Sdiv(w11, w19, w21);
...@@ -1442,16 +1442,16 @@ TEST(div) { ...@@ -1442,16 +1442,16 @@ TEST(div) {
__ Udiv(w22, w19, w17); __ Udiv(w22, w19, w17);
__ Sdiv(w23, w19, w17); __ Sdiv(w23, w19, w17);
__ Udiv(x24, x20, x18); __ Udiv(x24, x20, x30);
__ Sdiv(x25, x20, x18); __ Sdiv(x25, x20, x30);
__ Udiv(x26, x16, x21); __ Udiv(x26, x16, x21);
__ Sdiv(x27, x16, x21); __ Sdiv(x27, x16, x21);
__ Udiv(x28, x18, x21); __ Udiv(x28, x30, x21);
__ Sdiv(x29, x18, x21); __ Sdiv(x29, x30, x21);
__ Mov(x17, 0); __ Mov(x17, 0);
__ Udiv(w18, w16, w17); __ Udiv(w30, w16, w17);
__ Sdiv(w19, w16, w17); __ Sdiv(w19, w16, w17);
__ Udiv(x20, x16, x17); __ Udiv(x20, x16, x17);
__ Sdiv(x21, x16, x17); __ Sdiv(x21, x16, x17);
...@@ -1483,7 +1483,7 @@ TEST(div) { ...@@ -1483,7 +1483,7 @@ TEST(div) {
CHECK_EQUAL_64(0, x27); CHECK_EQUAL_64(0, x27);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x28); CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x28);
CHECK_EQUAL_64(0, x29); CHECK_EQUAL_64(0, x29);
CHECK_EQUAL_64(0, x18); CHECK_EQUAL_64(0, x30);
CHECK_EQUAL_64(0, x19); CHECK_EQUAL_64(0, x19);
CHECK_EQUAL_64(0, x20); CHECK_EQUAL_64(0, x20);
CHECK_EQUAL_64(0, x21); CHECK_EQUAL_64(0, x21);
...@@ -1899,17 +1899,17 @@ TEST(compare_branch) { ...@@ -1899,17 +1899,17 @@ TEST(compare_branch) {
__ Mov(x3, 1); __ Mov(x3, 1);
__ Bind(&nzf_end); __ Bind(&nzf_end);
__ Mov(x18, 0xFFFFFFFF00000000UL); __ Mov(x19, 0xFFFFFFFF00000000UL);
Label a, a_end; Label a, a_end;
__ Cbz(w18, &a); __ Cbz(w19, &a);
__ B(&a_end); __ B(&a_end);
__ Bind(&a); __ Bind(&a);
__ Mov(x4, 1); __ Mov(x4, 1);
__ Bind(&a_end); __ Bind(&a_end);
Label b, b_end; Label b, b_end;
__ Cbnz(w18, &b); __ Cbnz(w19, &b);
__ B(&b_end); __ B(&b_end);
__ Bind(&b); __ Bind(&b);
__ Mov(x5, 1); __ Mov(x5, 1);
...@@ -2357,17 +2357,17 @@ TEST(ldr_str_offset) { ...@@ -2357,17 +2357,17 @@ TEST(ldr_str_offset) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, dst_base); __ Mov(x19, dst_base);
__ Ldr(w0, MemOperand(x17)); __ Ldr(w0, MemOperand(x17));
__ Str(w0, MemOperand(x18)); __ Str(w0, MemOperand(x19));
__ Ldr(w1, MemOperand(x17, 4)); __ Ldr(w1, MemOperand(x17, 4));
__ Str(w1, MemOperand(x18, 12)); __ Str(w1, MemOperand(x19, 12));
__ Ldr(x2, MemOperand(x17, 8)); __ Ldr(x2, MemOperand(x17, 8));
__ Str(x2, MemOperand(x18, 16)); __ Str(x2, MemOperand(x19, 16));
__ Ldrb(w3, MemOperand(x17, 1)); __ Ldrb(w3, MemOperand(x17, 1));
__ Strb(w3, MemOperand(x18, 25)); __ Strb(w3, MemOperand(x19, 25));
__ Ldrh(w4, MemOperand(x17, 2)); __ Ldrh(w4, MemOperand(x17, 2));
__ Strh(w4, MemOperand(x18, 33)); __ Strh(w4, MemOperand(x19, 33));
END(); END();
RUN(); RUN();
...@@ -2383,7 +2383,7 @@ TEST(ldr_str_offset) { ...@@ -2383,7 +2383,7 @@ TEST(ldr_str_offset) {
CHECK_EQUAL_64(0x7654, x4); CHECK_EQUAL_64(0x7654, x4);
CHECK_EQUAL_64(0x765400, dst[4]); CHECK_EQUAL_64(0x765400, dst[4]);
CHECK_EQUAL_64(src_base, x17); CHECK_EQUAL_64(src_base, x17);
CHECK_EQUAL_64(dst_base, x18); CHECK_EQUAL_64(dst_base, x19);
} }
TEST(ldr_str_wide) { TEST(ldr_str_wide) {
...@@ -2443,7 +2443,7 @@ TEST(ldr_str_preindex) { ...@@ -2443,7 +2443,7 @@ TEST(ldr_str_preindex) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, dst_base); __ Mov(x28, dst_base);
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Mov(x20, dst_base); __ Mov(x20, dst_base);
__ Mov(x21, src_base + 16); __ Mov(x21, src_base + 16);
...@@ -2453,7 +2453,7 @@ TEST(ldr_str_preindex) { ...@@ -2453,7 +2453,7 @@ TEST(ldr_str_preindex) {
__ Mov(x25, src_base); __ Mov(x25, src_base);
__ Mov(x26, dst_base); __ Mov(x26, dst_base);
__ Ldr(w0, MemOperand(x17, 4, PreIndex)); __ Ldr(w0, MemOperand(x17, 4, PreIndex));
__ Str(w0, MemOperand(x18, 12, PreIndex)); __ Str(w0, MemOperand(x28, 12, PreIndex));
__ Ldr(x1, MemOperand(x19, 8, PreIndex)); __ Ldr(x1, MemOperand(x19, 8, PreIndex));
__ Str(x1, MemOperand(x20, 16, PreIndex)); __ Str(x1, MemOperand(x20, 16, PreIndex));
__ Ldr(w2, MemOperand(x21, -4, PreIndex)); __ Ldr(w2, MemOperand(x21, -4, PreIndex));
...@@ -2477,7 +2477,7 @@ TEST(ldr_str_preindex) { ...@@ -2477,7 +2477,7 @@ TEST(ldr_str_preindex) {
CHECK_EQUAL_64(0x9876, x4); CHECK_EQUAL_64(0x9876, x4);
CHECK_EQUAL_64(0x987600, dst[5]); CHECK_EQUAL_64(0x987600, dst[5]);
CHECK_EQUAL_64(src_base + 4, x17); CHECK_EQUAL_64(src_base + 4, x17);
CHECK_EQUAL_64(dst_base + 12, x18); CHECK_EQUAL_64(dst_base + 12, x28);
CHECK_EQUAL_64(src_base + 8, x19); CHECK_EQUAL_64(src_base + 8, x19);
CHECK_EQUAL_64(dst_base + 16, x20); CHECK_EQUAL_64(dst_base + 16, x20);
CHECK_EQUAL_64(src_base + 12, x21); CHECK_EQUAL_64(src_base + 12, x21);
...@@ -2499,7 +2499,7 @@ TEST(ldr_str_postindex) { ...@@ -2499,7 +2499,7 @@ TEST(ldr_str_postindex) {
START(); START();
__ Mov(x17, src_base + 4); __ Mov(x17, src_base + 4);
__ Mov(x18, dst_base + 12); __ Mov(x28, dst_base + 12);
__ Mov(x19, src_base + 8); __ Mov(x19, src_base + 8);
__ Mov(x20, dst_base + 16); __ Mov(x20, dst_base + 16);
__ Mov(x21, src_base + 8); __ Mov(x21, src_base + 8);
...@@ -2509,7 +2509,7 @@ TEST(ldr_str_postindex) { ...@@ -2509,7 +2509,7 @@ TEST(ldr_str_postindex) {
__ Mov(x25, src_base + 3); __ Mov(x25, src_base + 3);
__ Mov(x26, dst_base + 41); __ Mov(x26, dst_base + 41);
__ Ldr(w0, MemOperand(x17, 4, PostIndex)); __ Ldr(w0, MemOperand(x17, 4, PostIndex));
__ Str(w0, MemOperand(x18, 12, PostIndex)); __ Str(w0, MemOperand(x28, 12, PostIndex));
__ Ldr(x1, MemOperand(x19, 8, PostIndex)); __ Ldr(x1, MemOperand(x19, 8, PostIndex));
__ Str(x1, MemOperand(x20, 16, PostIndex)); __ Str(x1, MemOperand(x20, 16, PostIndex));
__ Ldr(x2, MemOperand(x21, -8, PostIndex)); __ Ldr(x2, MemOperand(x21, -8, PostIndex));
...@@ -2533,7 +2533,7 @@ TEST(ldr_str_postindex) { ...@@ -2533,7 +2533,7 @@ TEST(ldr_str_postindex) {
CHECK_EQUAL_64(0x9876, x4); CHECK_EQUAL_64(0x9876, x4);
CHECK_EQUAL_64(0x987600, dst[5]); CHECK_EQUAL_64(0x987600, dst[5]);
CHECK_EQUAL_64(src_base + 8, x17); CHECK_EQUAL_64(src_base + 8, x17);
CHECK_EQUAL_64(dst_base + 24, x18); CHECK_EQUAL_64(dst_base + 24, x28);
CHECK_EQUAL_64(src_base + 16, x19); CHECK_EQUAL_64(src_base + 16, x19);
CHECK_EQUAL_64(dst_base + 32, x20); CHECK_EQUAL_64(dst_base + 32, x20);
CHECK_EQUAL_64(src_base, x21); CHECK_EQUAL_64(src_base, x21);
...@@ -2591,7 +2591,7 @@ TEST(load_store_regoffset) { ...@@ -2591,7 +2591,7 @@ TEST(load_store_regoffset) {
START(); START();
__ Mov(x16, src_base); __ Mov(x16, src_base);
__ Mov(x17, dst_base); __ Mov(x17, dst_base);
__ Mov(x18, src_base + 3 * sizeof(src[0])); __ Mov(x21, src_base + 3 * sizeof(src[0]));
__ Mov(x19, dst_base + 3 * sizeof(dst[0])); __ Mov(x19, dst_base + 3 * sizeof(dst[0]));
__ Mov(x20, dst_base + 4 * sizeof(dst[0])); __ Mov(x20, dst_base + 4 * sizeof(dst[0]));
__ Mov(x24, 0); __ Mov(x24, 0);
...@@ -2603,9 +2603,9 @@ TEST(load_store_regoffset) { ...@@ -2603,9 +2603,9 @@ TEST(load_store_regoffset) {
__ Ldr(w0, MemOperand(x16, x24)); __ Ldr(w0, MemOperand(x16, x24));
__ Ldr(x1, MemOperand(x16, x25)); __ Ldr(x1, MemOperand(x16, x25));
__ Ldr(w2, MemOperand(x18, x26)); __ Ldr(w2, MemOperand(x21, x26));
__ Ldr(w3, MemOperand(x18, x27, SXTW)); __ Ldr(w3, MemOperand(x21, x27, SXTW));
__ Ldr(w4, MemOperand(x18, x28, SXTW, 2)); __ Ldr(w4, MemOperand(x21, x28, SXTW, 2));
__ Str(w0, MemOperand(x17, x24)); __ Str(w0, MemOperand(x17, x24));
__ Str(x1, MemOperand(x17, x25)); __ Str(x1, MemOperand(x17, x25));
__ Str(w2, MemOperand(x20, x29, SXTW, 2)); __ Str(w2, MemOperand(x20, x29, SXTW, 2));
...@@ -2635,13 +2635,13 @@ TEST(load_store_float) { ...@@ -2635,13 +2635,13 @@ TEST(load_store_float) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, dst_base); __ Mov(x28, dst_base);
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Mov(x20, dst_base); __ Mov(x20, dst_base);
__ Mov(x21, src_base); __ Mov(x21, src_base);
__ Mov(x22, dst_base); __ Mov(x22, dst_base);
__ Ldr(s0, MemOperand(x17, sizeof(src[0]))); __ Ldr(s0, MemOperand(x17, sizeof(src[0])));
__ Str(s0, MemOperand(x18, sizeof(dst[0]), PostIndex)); __ Str(s0, MemOperand(x28, sizeof(dst[0]), PostIndex));
__ Ldr(s1, MemOperand(x19, sizeof(src[0]), PostIndex)); __ Ldr(s1, MemOperand(x19, sizeof(src[0]), PostIndex));
__ Str(s1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex)); __ Str(s1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
__ Ldr(s2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex)); __ Ldr(s2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
...@@ -2657,7 +2657,7 @@ TEST(load_store_float) { ...@@ -2657,7 +2657,7 @@ TEST(load_store_float) {
CHECK_EQUAL_FP32(3.0, s2); CHECK_EQUAL_FP32(3.0, s2);
CHECK_EQUAL_FP32(3.0, dst[1]); CHECK_EQUAL_FP32(3.0, dst[1]);
CHECK_EQUAL_64(src_base, x17); CHECK_EQUAL_64(src_base, x17);
CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18); CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x28);
CHECK_EQUAL_64(src_base + sizeof(src[0]), x19); CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20); CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21); CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
...@@ -2675,13 +2675,13 @@ TEST(load_store_double) { ...@@ -2675,13 +2675,13 @@ TEST(load_store_double) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, dst_base); __ Mov(x28, dst_base);
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Mov(x20, dst_base); __ Mov(x20, dst_base);
__ Mov(x21, src_base); __ Mov(x21, src_base);
__ Mov(x22, dst_base); __ Mov(x22, dst_base);
__ Ldr(d0, MemOperand(x17, sizeof(src[0]))); __ Ldr(d0, MemOperand(x17, sizeof(src[0])));
__ Str(d0, MemOperand(x18, sizeof(dst[0]), PostIndex)); __ Str(d0, MemOperand(x28, sizeof(dst[0]), PostIndex));
__ Ldr(d1, MemOperand(x19, sizeof(src[0]), PostIndex)); __ Ldr(d1, MemOperand(x19, sizeof(src[0]), PostIndex));
__ Str(d1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex)); __ Str(d1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
__ Ldr(d2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex)); __ Ldr(d2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
...@@ -2697,7 +2697,7 @@ TEST(load_store_double) { ...@@ -2697,7 +2697,7 @@ TEST(load_store_double) {
CHECK_EQUAL_FP64(3.0, d2); CHECK_EQUAL_FP64(3.0, d2);
CHECK_EQUAL_FP64(3.0, dst[1]); CHECK_EQUAL_FP64(3.0, dst[1]);
CHECK_EQUAL_64(src_base, x17); CHECK_EQUAL_64(src_base, x17);
CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18); CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x28);
CHECK_EQUAL_64(src_base + sizeof(src[0]), x19); CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20); CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21); CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
...@@ -2715,13 +2715,13 @@ TEST(load_store_b) { ...@@ -2715,13 +2715,13 @@ TEST(load_store_b) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, dst_base); __ Mov(x28, dst_base);
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Mov(x20, dst_base); __ Mov(x20, dst_base);
__ Mov(x21, src_base); __ Mov(x21, src_base);
__ Mov(x22, dst_base); __ Mov(x22, dst_base);
__ Ldr(b0, MemOperand(x17, sizeof(src[0]))); __ Ldr(b0, MemOperand(x17, sizeof(src[0])));
__ Str(b0, MemOperand(x18, sizeof(dst[0]), PostIndex)); __ Str(b0, MemOperand(x28, sizeof(dst[0]), PostIndex));
__ Ldr(b1, MemOperand(x19, sizeof(src[0]), PostIndex)); __ Ldr(b1, MemOperand(x19, sizeof(src[0]), PostIndex));
__ Str(b1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex)); __ Str(b1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
__ Ldr(b2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex)); __ Ldr(b2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
...@@ -2737,7 +2737,7 @@ TEST(load_store_b) { ...@@ -2737,7 +2737,7 @@ TEST(load_store_b) {
CHECK_EQUAL_128(0, 0x34, q2); CHECK_EQUAL_128(0, 0x34, q2);
CHECK_EQUAL_64(0x34, dst[1]); CHECK_EQUAL_64(0x34, dst[1]);
CHECK_EQUAL_64(src_base, x17); CHECK_EQUAL_64(src_base, x17);
CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18); CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x28);
CHECK_EQUAL_64(src_base + sizeof(src[0]), x19); CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20); CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21); CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
...@@ -2755,13 +2755,13 @@ TEST(load_store_h) { ...@@ -2755,13 +2755,13 @@ TEST(load_store_h) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, dst_base); __ Mov(x28, dst_base);
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Mov(x20, dst_base); __ Mov(x20, dst_base);
__ Mov(x21, src_base); __ Mov(x21, src_base);
__ Mov(x22, dst_base); __ Mov(x22, dst_base);
__ Ldr(h0, MemOperand(x17, sizeof(src[0]))); __ Ldr(h0, MemOperand(x17, sizeof(src[0])));
__ Str(h0, MemOperand(x18, sizeof(dst[0]), PostIndex)); __ Str(h0, MemOperand(x28, sizeof(dst[0]), PostIndex));
__ Ldr(h1, MemOperand(x19, sizeof(src[0]), PostIndex)); __ Ldr(h1, MemOperand(x19, sizeof(src[0]), PostIndex));
__ Str(h1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex)); __ Str(h1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
__ Ldr(h2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex)); __ Ldr(h2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
...@@ -2777,7 +2777,7 @@ TEST(load_store_h) { ...@@ -2777,7 +2777,7 @@ TEST(load_store_h) {
CHECK_EQUAL_128(0, 0x3456, q2); CHECK_EQUAL_128(0, 0x3456, q2);
CHECK_EQUAL_64(0x3456, dst[1]); CHECK_EQUAL_64(0x3456, dst[1]);
CHECK_EQUAL_64(src_base, x17); CHECK_EQUAL_64(src_base, x17);
CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18); CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x28);
CHECK_EQUAL_64(src_base + sizeof(src[0]), x19); CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20); CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21); CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
...@@ -2800,13 +2800,13 @@ TEST(load_store_q) { ...@@ -2800,13 +2800,13 @@ TEST(load_store_q) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, dst_base); __ Mov(x28, dst_base);
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Mov(x20, dst_base); __ Mov(x20, dst_base);
__ Mov(x21, src_base); __ Mov(x21, src_base);
__ Mov(x22, dst_base); __ Mov(x22, dst_base);
__ Ldr(q0, MemOperand(x17, 16)); __ Ldr(q0, MemOperand(x17, 16));
__ Str(q0, MemOperand(x18, 16, PostIndex)); __ Str(q0, MemOperand(x28, 16, PostIndex));
__ Ldr(q1, MemOperand(x19, 16, PostIndex)); __ Ldr(q1, MemOperand(x19, 16, PostIndex));
__ Str(q1, MemOperand(x20, 32, PreIndex)); __ Str(q1, MemOperand(x20, 32, PreIndex));
__ Ldr(q2, MemOperand(x21, 32, PreIndex)); __ Ldr(q2, MemOperand(x21, 32, PreIndex));
...@@ -2825,7 +2825,7 @@ TEST(load_store_q) { ...@@ -2825,7 +2825,7 @@ TEST(load_store_q) {
CHECK_EQUAL_64(0x02E0CEAC8A684624, dst[2]); CHECK_EQUAL_64(0x02E0CEAC8A684624, dst[2]);
CHECK_EQUAL_64(0x200EECCAA8866442, dst[3]); CHECK_EQUAL_64(0x200EECCAA8866442, dst[3]);
CHECK_EQUAL_64(src_base, x17); CHECK_EQUAL_64(src_base, x17);
CHECK_EQUAL_64(dst_base + 16, x18); CHECK_EQUAL_64(dst_base + 16, x28);
CHECK_EQUAL_64(src_base + 16, x19); CHECK_EQUAL_64(src_base + 16, x19);
CHECK_EQUAL_64(dst_base + 32, x20); CHECK_EQUAL_64(dst_base + 32, x20);
CHECK_EQUAL_64(src_base + 32, x21); CHECK_EQUAL_64(src_base + 32, x21);
...@@ -2892,7 +2892,7 @@ TEST(neon_ld1_d_postindex) { ...@@ -2892,7 +2892,7 @@ TEST(neon_ld1_d_postindex) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base + 1); __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2); __ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3); __ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4); __ Mov(x21, src_base + 4);
...@@ -2900,7 +2900,7 @@ TEST(neon_ld1_d_postindex) { ...@@ -2900,7 +2900,7 @@ TEST(neon_ld1_d_postindex) {
__ Mov(x23, 1); __ Mov(x23, 1);
__ Ldr(q2, MemOperand(x17)); // Initialise top 64-bits of Q register. __ Ldr(q2, MemOperand(x17)); // Initialise top 64-bits of Q register.
__ Ld1(v2.V8B(), MemOperand(x17, x23, PostIndex)); __ Ld1(v2.V8B(), MemOperand(x17, x23, PostIndex));
__ Ld1(v3.V8B(), v4.V8B(), MemOperand(x18, 16, PostIndex)); __ Ld1(v3.V8B(), v4.V8B(), MemOperand(x28, 16, PostIndex));
__ Ld1(v5.V4H(), v6.V4H(), v7.V4H(), MemOperand(x19, 24, PostIndex)); __ Ld1(v5.V4H(), v6.V4H(), v7.V4H(), MemOperand(x19, 24, PostIndex));
__ Ld1(v16.V2S(), v17.V2S(), v18.V2S(), v19.V2S(), __ Ld1(v16.V2S(), v17.V2S(), v18.V2S(), v19.V2S(),
MemOperand(x20, 32, PostIndex)); MemOperand(x20, 32, PostIndex));
...@@ -2931,7 +2931,7 @@ TEST(neon_ld1_d_postindex) { ...@@ -2931,7 +2931,7 @@ TEST(neon_ld1_d_postindex) {
CHECK_EQUAL_128(0, 0x1C1B1A1918171615, q22); CHECK_EQUAL_128(0, 0x1C1B1A1918171615, q22);
CHECK_EQUAL_128(0, 0x24232221201F1E1D, q23); CHECK_EQUAL_128(0, 0x24232221201F1E1D, q23);
CHECK_EQUAL_64(src_base + 1, x17); CHECK_EQUAL_64(src_base + 1, x17);
CHECK_EQUAL_64(src_base + 1 + 16, x18); CHECK_EQUAL_64(src_base + 1 + 16, x28);
CHECK_EQUAL_64(src_base + 2 + 24, x19); CHECK_EQUAL_64(src_base + 2 + 24, x19);
CHECK_EQUAL_64(src_base + 3 + 32, x20); CHECK_EQUAL_64(src_base + 3 + 32, x20);
CHECK_EQUAL_64(src_base + 4 + 32, x21); CHECK_EQUAL_64(src_base + 4 + 32, x21);
...@@ -2991,13 +2991,13 @@ TEST(neon_ld1_q_postindex) { ...@@ -2991,13 +2991,13 @@ TEST(neon_ld1_q_postindex) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base + 1); __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2); __ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3); __ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4); __ Mov(x21, src_base + 4);
__ Mov(x22, 1); __ Mov(x22, 1);
__ Ld1(v2.V16B(), MemOperand(x17, x22, PostIndex)); __ Ld1(v2.V16B(), MemOperand(x17, x22, PostIndex));
__ Ld1(v3.V16B(), v4.V16B(), MemOperand(x18, 32, PostIndex)); __ Ld1(v3.V16B(), v4.V16B(), MemOperand(x28, 32, PostIndex));
__ Ld1(v5.V8H(), v6.V8H(), v7.V8H(), MemOperand(x19, 48, PostIndex)); __ Ld1(v5.V8H(), v6.V8H(), v7.V8H(), MemOperand(x19, 48, PostIndex));
__ Ld1(v16.V4S(), v17.V4S(), v18.V4S(), v19.V4S(), __ Ld1(v16.V4S(), v17.V4S(), v18.V4S(), v19.V4S(),
MemOperand(x20, 64, PostIndex)); MemOperand(x20, 64, PostIndex));
...@@ -3022,7 +3022,7 @@ TEST(neon_ld1_q_postindex) { ...@@ -3022,7 +3022,7 @@ TEST(neon_ld1_q_postindex) {
CHECK_EQUAL_128(0x333231302F2E2D2C, 0x2B2A292827262524, q0); CHECK_EQUAL_128(0x333231302F2E2D2C, 0x2B2A292827262524, q0);
CHECK_EQUAL_128(0x434241403F3E3D3C, 0x3B3A393837363534, q1); CHECK_EQUAL_128(0x434241403F3E3D3C, 0x3B3A393837363534, q1);
CHECK_EQUAL_64(src_base + 1, x17); CHECK_EQUAL_64(src_base + 1, x17);
CHECK_EQUAL_64(src_base + 1 + 32, x18); CHECK_EQUAL_64(src_base + 1 + 32, x28);
CHECK_EQUAL_64(src_base + 2 + 48, x19); CHECK_EQUAL_64(src_base + 2 + 48, x19);
CHECK_EQUAL_64(src_base + 3 + 64, x20); CHECK_EQUAL_64(src_base + 3 + 64, x20);
CHECK_EQUAL_64(src_base + 4 + 64, x21); CHECK_EQUAL_64(src_base + 4 + 64, x21);
...@@ -3135,13 +3135,13 @@ TEST(neon_ld2_d_postindex) { ...@@ -3135,13 +3135,13 @@ TEST(neon_ld2_d_postindex) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base + 1); __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2); __ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3); __ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4); __ Mov(x21, src_base + 4);
__ Mov(x22, 1); __ Mov(x22, 1);
__ Ld2(v2.V8B(), v3.V8B(), MemOperand(x17, x22, PostIndex)); __ Ld2(v2.V8B(), v3.V8B(), MemOperand(x17, x22, PostIndex));
__ Ld2(v4.V8B(), v5.V8B(), MemOperand(x18, 16, PostIndex)); __ Ld2(v4.V8B(), v5.V8B(), MemOperand(x28, 16, PostIndex));
__ Ld2(v5.V4H(), v6.V4H(), MemOperand(x19, 16, PostIndex)); __ Ld2(v5.V4H(), v6.V4H(), MemOperand(x19, 16, PostIndex));
__ Ld2(v16.V2S(), v17.V2S(), MemOperand(x20, 16, PostIndex)); __ Ld2(v16.V2S(), v17.V2S(), MemOperand(x20, 16, PostIndex));
__ Ld2(v31.V2S(), v0.V2S(), MemOperand(x21, 16, PostIndex)); __ Ld2(v31.V2S(), v0.V2S(), MemOperand(x21, 16, PostIndex));
...@@ -3160,7 +3160,7 @@ TEST(neon_ld2_d_postindex) { ...@@ -3160,7 +3160,7 @@ TEST(neon_ld2_d_postindex) {
CHECK_EQUAL_128(0, 0x131211100B0A0908, q0); CHECK_EQUAL_128(0, 0x131211100B0A0908, q0);
CHECK_EQUAL_64(src_base + 1, x17); CHECK_EQUAL_64(src_base + 1, x17);
CHECK_EQUAL_64(src_base + 1 + 16, x18); CHECK_EQUAL_64(src_base + 1 + 16, x28);
CHECK_EQUAL_64(src_base + 2 + 16, x19); CHECK_EQUAL_64(src_base + 2 + 16, x19);
CHECK_EQUAL_64(src_base + 3 + 16, x20); CHECK_EQUAL_64(src_base + 3 + 16, x20);
CHECK_EQUAL_64(src_base + 4 + 16, x21); CHECK_EQUAL_64(src_base + 4 + 16, x21);
...@@ -3215,13 +3215,13 @@ TEST(neon_ld2_q_postindex) { ...@@ -3215,13 +3215,13 @@ TEST(neon_ld2_q_postindex) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base + 1); __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2); __ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3); __ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4); __ Mov(x21, src_base + 4);
__ Mov(x22, 1); __ Mov(x22, 1);
__ Ld2(v2.V16B(), v3.V16B(), MemOperand(x17, x22, PostIndex)); __ Ld2(v2.V16B(), v3.V16B(), MemOperand(x17, x22, PostIndex));
__ Ld2(v4.V16B(), v5.V16B(), MemOperand(x18, 32, PostIndex)); __ Ld2(v4.V16B(), v5.V16B(), MemOperand(x28, 32, PostIndex));
__ Ld2(v6.V8H(), v7.V8H(), MemOperand(x19, 32, PostIndex)); __ Ld2(v6.V8H(), v7.V8H(), MemOperand(x19, 32, PostIndex));
__ Ld2(v16.V4S(), v17.V4S(), MemOperand(x20, 32, PostIndex)); __ Ld2(v16.V4S(), v17.V4S(), MemOperand(x20, 32, PostIndex));
__ Ld2(v31.V2D(), v0.V2D(), MemOperand(x21, 32, PostIndex)); __ Ld2(v31.V2D(), v0.V2D(), MemOperand(x21, 32, PostIndex));
...@@ -3241,7 +3241,7 @@ TEST(neon_ld2_q_postindex) { ...@@ -3241,7 +3241,7 @@ TEST(neon_ld2_q_postindex) {
CHECK_EQUAL_128(0x232221201F1E1D1C, 0x131211100F0E0D0C, q0); CHECK_EQUAL_128(0x232221201F1E1D1C, 0x131211100F0E0D0C, q0);
CHECK_EQUAL_64(src_base + 1, x17); CHECK_EQUAL_64(src_base + 1, x17);
CHECK_EQUAL_64(src_base + 1 + 32, x18); CHECK_EQUAL_64(src_base + 1 + 32, x28);
CHECK_EQUAL_64(src_base + 2 + 32, x19); CHECK_EQUAL_64(src_base + 2 + 32, x19);
CHECK_EQUAL_64(src_base + 3 + 32, x20); CHECK_EQUAL_64(src_base + 3 + 32, x20);
CHECK_EQUAL_64(src_base + 4 + 32, x21); CHECK_EQUAL_64(src_base + 4 + 32, x21);
...@@ -3337,7 +3337,7 @@ TEST(neon_ld2_lane_postindex) { ...@@ -3337,7 +3337,7 @@ TEST(neon_ld2_lane_postindex) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base); __ Mov(x28, src_base);
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Mov(x20, src_base); __ Mov(x20, src_base);
__ Mov(x21, src_base); __ Mov(x21, src_base);
...@@ -3351,7 +3351,7 @@ TEST(neon_ld2_lane_postindex) { ...@@ -3351,7 +3351,7 @@ TEST(neon_ld2_lane_postindex) {
} }
for (int i = 7; i >= 0; i--) { for (int i = 7; i >= 0; i--) {
__ Ld2(v2.H(), v3.H(), i, MemOperand(x18, 4, PostIndex)); __ Ld2(v2.H(), v3.H(), i, MemOperand(x28, 4, PostIndex));
} }
for (int i = 3; i >= 0; i--) { for (int i = 3; i >= 0; i--) {
...@@ -3409,7 +3409,7 @@ TEST(neon_ld2_lane_postindex) { ...@@ -3409,7 +3409,7 @@ TEST(neon_ld2_lane_postindex) {
CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x1716151413121110, q15); CHECK_EQUAL_128(0x0F0E0D0C0B0A0908, 0x1716151413121110, q15);
CHECK_EQUAL_64(src_base + 32, x17); CHECK_EQUAL_64(src_base + 32, x17);
CHECK_EQUAL_64(src_base + 32, x18); CHECK_EQUAL_64(src_base + 32, x28);
CHECK_EQUAL_64(src_base + 32, x19); CHECK_EQUAL_64(src_base + 32, x19);
CHECK_EQUAL_64(src_base + 32, x20); CHECK_EQUAL_64(src_base + 32, x20);
CHECK_EQUAL_64(src_base + 1, x21); CHECK_EQUAL_64(src_base + 1, x21);
...@@ -3430,7 +3430,6 @@ TEST(neon_ld2_alllanes) { ...@@ -3430,7 +3430,6 @@ TEST(neon_ld2_alllanes) {
START(); START();
__ Mov(x17, src_base + 1); __ Mov(x17, src_base + 1);
__ Mov(x18, 1);
__ Ld2r(v0.V8B(), v1.V8B(), MemOperand(x17)); __ Ld2r(v0.V8B(), v1.V8B(), MemOperand(x17));
__ Add(x17, x17, 2); __ Add(x17, x17, 2);
__ Ld2r(v2.V16B(), v3.V16B(), MemOperand(x17)); __ Ld2r(v2.V16B(), v3.V16B(), MemOperand(x17));
...@@ -3476,12 +3475,12 @@ TEST(neon_ld2_alllanes_postindex) { ...@@ -3476,12 +3475,12 @@ TEST(neon_ld2_alllanes_postindex) {
START(); START();
__ Mov(x17, src_base + 1); __ Mov(x17, src_base + 1);
__ Mov(x18, 1); __ Mov(x19, 1);
__ Ld2r(v0.V8B(), v1.V8B(), MemOperand(x17, 2, PostIndex)); __ Ld2r(v0.V8B(), v1.V8B(), MemOperand(x17, 2, PostIndex));
__ Ld2r(v2.V16B(), v3.V16B(), MemOperand(x17, x18, PostIndex)); __ Ld2r(v2.V16B(), v3.V16B(), MemOperand(x17, x19, PostIndex));
__ Ld2r(v4.V4H(), v5.V4H(), MemOperand(x17, x18, PostIndex)); __ Ld2r(v4.V4H(), v5.V4H(), MemOperand(x17, x19, PostIndex));
__ Ld2r(v6.V8H(), v7.V8H(), MemOperand(x17, 4, PostIndex)); __ Ld2r(v6.V8H(), v7.V8H(), MemOperand(x17, 4, PostIndex));
__ Ld2r(v8_.V2S(), v9.V2S(), MemOperand(x17, x18, PostIndex)); __ Ld2r(v8_.V2S(), v9.V2S(), MemOperand(x17, x19, PostIndex));
__ Ld2r(v10.V4S(), v11.V4S(), MemOperand(x17, 8, PostIndex)); __ Ld2r(v10.V4S(), v11.V4S(), MemOperand(x17, 8, PostIndex));
__ Ld2r(v12.V2D(), v13.V2D(), MemOperand(x17, 16, PostIndex)); __ Ld2r(v12.V2D(), v13.V2D(), MemOperand(x17, 16, PostIndex));
END(); END();
...@@ -3554,13 +3553,13 @@ TEST(neon_ld3_d_postindex) { ...@@ -3554,13 +3553,13 @@ TEST(neon_ld3_d_postindex) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base + 1); __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2); __ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3); __ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4); __ Mov(x21, src_base + 4);
__ Mov(x22, 1); __ Mov(x22, 1);
__ Ld3(v2.V8B(), v3.V8B(), v4.V8B(), MemOperand(x17, x22, PostIndex)); __ Ld3(v2.V8B(), v3.V8B(), v4.V8B(), MemOperand(x17, x22, PostIndex));
__ Ld3(v5.V8B(), v6.V8B(), v7.V8B(), MemOperand(x18, 24, PostIndex)); __ Ld3(v5.V8B(), v6.V8B(), v7.V8B(), MemOperand(x28, 24, PostIndex));
__ Ld3(v8_.V4H(), v9.V4H(), v10.V4H(), MemOperand(x19, 24, PostIndex)); __ Ld3(v8_.V4H(), v9.V4H(), v10.V4H(), MemOperand(x19, 24, PostIndex));
__ Ld3(v11.V2S(), v12.V2S(), v13.V2S(), MemOperand(x20, 24, PostIndex)); __ Ld3(v11.V2S(), v12.V2S(), v13.V2S(), MemOperand(x20, 24, PostIndex));
__ Ld3(v31.V2S(), v0.V2S(), v1.V2S(), MemOperand(x21, 24, PostIndex)); __ Ld3(v31.V2S(), v0.V2S(), v1.V2S(), MemOperand(x21, 24, PostIndex));
...@@ -3585,7 +3584,7 @@ TEST(neon_ld3_d_postindex) { ...@@ -3585,7 +3584,7 @@ TEST(neon_ld3_d_postindex) {
CHECK_EQUAL_128(0, 0x1B1A19180F0E0D0C, q1); CHECK_EQUAL_128(0, 0x1B1A19180F0E0D0C, q1);
CHECK_EQUAL_64(src_base + 1, x17); CHECK_EQUAL_64(src_base + 1, x17);
CHECK_EQUAL_64(src_base + 1 + 24, x18); CHECK_EQUAL_64(src_base + 1 + 24, x28);
CHECK_EQUAL_64(src_base + 2 + 24, x19); CHECK_EQUAL_64(src_base + 2 + 24, x19);
CHECK_EQUAL_64(src_base + 3 + 24, x20); CHECK_EQUAL_64(src_base + 3 + 24, x20);
CHECK_EQUAL_64(src_base + 4 + 24, x21); CHECK_EQUAL_64(src_base + 4 + 24, x21);
...@@ -3645,14 +3644,14 @@ TEST(neon_ld3_q_postindex) { ...@@ -3645,14 +3644,14 @@ TEST(neon_ld3_q_postindex) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base + 1); __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2); __ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3); __ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4); __ Mov(x21, src_base + 4);
__ Mov(x22, 1); __ Mov(x22, 1);
__ Ld3(v2.V16B(), v3.V16B(), v4.V16B(), MemOperand(x17, x22, PostIndex)); __ Ld3(v2.V16B(), v3.V16B(), v4.V16B(), MemOperand(x17, x22, PostIndex));
__ Ld3(v5.V16B(), v6.V16B(), v7.V16B(), MemOperand(x18, 48, PostIndex)); __ Ld3(v5.V16B(), v6.V16B(), v7.V16B(), MemOperand(x28, 48, PostIndex));
__ Ld3(v8_.V8H(), v9.V8H(), v10.V8H(), MemOperand(x19, 48, PostIndex)); __ Ld3(v8_.V8H(), v9.V8H(), v10.V8H(), MemOperand(x19, 48, PostIndex));
__ Ld3(v11.V4S(), v12.V4S(), v13.V4S(), MemOperand(x20, 48, PostIndex)); __ Ld3(v11.V4S(), v12.V4S(), v13.V4S(), MemOperand(x20, 48, PostIndex));
__ Ld3(v31.V2D(), v0.V2D(), v1.V2D(), MemOperand(x21, 48, PostIndex)); __ Ld3(v31.V2D(), v0.V2D(), v1.V2D(), MemOperand(x21, 48, PostIndex));
...@@ -3677,7 +3676,7 @@ TEST(neon_ld3_q_postindex) { ...@@ -3677,7 +3676,7 @@ TEST(neon_ld3_q_postindex) {
CHECK_EQUAL_128(0x333231302F2E2D2C, 0x1B1A191817161514, q1); CHECK_EQUAL_128(0x333231302F2E2D2C, 0x1B1A191817161514, q1);
CHECK_EQUAL_64(src_base + 1, x17); CHECK_EQUAL_64(src_base + 1, x17);
CHECK_EQUAL_64(src_base + 1 + 48, x18); CHECK_EQUAL_64(src_base + 1 + 48, x28);
CHECK_EQUAL_64(src_base + 2 + 48, x19); CHECK_EQUAL_64(src_base + 2 + 48, x19);
CHECK_EQUAL_64(src_base + 3 + 48, x20); CHECK_EQUAL_64(src_base + 3 + 48, x20);
CHECK_EQUAL_64(src_base + 4 + 48, x21); CHECK_EQUAL_64(src_base + 4 + 48, x21);
...@@ -3781,7 +3780,7 @@ TEST(neon_ld3_lane_postindex) { ...@@ -3781,7 +3780,7 @@ TEST(neon_ld3_lane_postindex) {
// Test loading whole register by element. // Test loading whole register by element.
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base); __ Mov(x28, src_base);
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Mov(x20, src_base); __ Mov(x20, src_base);
__ Mov(x21, src_base); __ Mov(x21, src_base);
...@@ -3793,7 +3792,7 @@ TEST(neon_ld3_lane_postindex) { ...@@ -3793,7 +3792,7 @@ TEST(neon_ld3_lane_postindex) {
} }
for (int i = 7; i >= 0; i--) { for (int i = 7; i >= 0; i--) {
__ Ld3(v3.H(), v4.H(), v5.H(), i, MemOperand(x18, 6, PostIndex)); __ Ld3(v3.H(), v4.H(), v5.H(), i, MemOperand(x28, 6, PostIndex));
} }
for (int i = 3; i >= 0; i--) { for (int i = 3; i >= 0; i--) {
...@@ -3863,7 +3862,7 @@ TEST(neon_ld3_lane_postindex) { ...@@ -3863,7 +3862,7 @@ TEST(neon_ld3_lane_postindex) {
CHECK_EQUAL_128(0x1716151413121110, 0x2726252423222120, q23); CHECK_EQUAL_128(0x1716151413121110, 0x2726252423222120, q23);
CHECK_EQUAL_64(src_base + 48, x17); CHECK_EQUAL_64(src_base + 48, x17);
CHECK_EQUAL_64(src_base + 48, x18); CHECK_EQUAL_64(src_base + 48, x28);
CHECK_EQUAL_64(src_base + 48, x19); CHECK_EQUAL_64(src_base + 48, x19);
CHECK_EQUAL_64(src_base + 48, x20); CHECK_EQUAL_64(src_base + 48, x20);
CHECK_EQUAL_64(src_base + 1, x21); CHECK_EQUAL_64(src_base + 1, x21);
...@@ -3884,7 +3883,6 @@ TEST(neon_ld3_alllanes) { ...@@ -3884,7 +3883,6 @@ TEST(neon_ld3_alllanes) {
START(); START();
__ Mov(x17, src_base + 1); __ Mov(x17, src_base + 1);
__ Mov(x18, 1);
__ Ld3r(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x17)); __ Ld3r(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x17));
__ Add(x17, x17, 3); __ Add(x17, x17, 3);
__ Ld3r(v3.V16B(), v4.V16B(), v5.V16B(), MemOperand(x17)); __ Ld3r(v3.V16B(), v4.V16B(), v5.V16B(), MemOperand(x17));
...@@ -3934,17 +3932,15 @@ TEST(neon_ld3_alllanes_postindex) { ...@@ -3934,17 +3932,15 @@ TEST(neon_ld3_alllanes_postindex) {
src[i] = i; src[i] = i;
} }
uintptr_t src_base = reinterpret_cast<uintptr_t>(src); uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
__ Mov(x17, src_base + 1);
__ Mov(x18, 1);
START(); START();
__ Mov(x17, src_base + 1); __ Mov(x17, src_base + 1);
__ Mov(x18, 1); __ Mov(x19, 1);
__ Ld3r(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x17, 3, PostIndex)); __ Ld3r(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x17, 3, PostIndex));
__ Ld3r(v3.V16B(), v4.V16B(), v5.V16B(), MemOperand(x17, x18, PostIndex)); __ Ld3r(v3.V16B(), v4.V16B(), v5.V16B(), MemOperand(x17, x19, PostIndex));
__ Ld3r(v6.V4H(), v7.V4H(), v8_.V4H(), MemOperand(x17, x18, PostIndex)); __ Ld3r(v6.V4H(), v7.V4H(), v8_.V4H(), MemOperand(x17, x19, PostIndex));
__ Ld3r(v9.V8H(), v10.V8H(), v11.V8H(), MemOperand(x17, 6, PostIndex)); __ Ld3r(v9.V8H(), v10.V8H(), v11.V8H(), MemOperand(x17, 6, PostIndex));
__ Ld3r(v12.V2S(), v13.V2S(), v14.V2S(), MemOperand(x17, x18, PostIndex)); __ Ld3r(v12.V2S(), v13.V2S(), v14.V2S(), MemOperand(x17, x19, PostIndex));
__ Ld3r(v15.V4S(), v16.V4S(), v17.V4S(), MemOperand(x17, 12, PostIndex)); __ Ld3r(v15.V4S(), v16.V4S(), v17.V4S(), MemOperand(x17, 12, PostIndex));
__ Ld3r(v18.V2D(), v19.V2D(), v20.V2D(), MemOperand(x17, 24, PostIndex)); __ Ld3r(v18.V2D(), v19.V2D(), v20.V2D(), MemOperand(x17, 24, PostIndex));
END(); END();
...@@ -4027,7 +4023,7 @@ TEST(neon_ld4_d_postindex) { ...@@ -4027,7 +4023,7 @@ TEST(neon_ld4_d_postindex) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base + 1); __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2); __ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3); __ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4); __ Mov(x21, src_base + 4);
...@@ -4035,7 +4031,7 @@ TEST(neon_ld4_d_postindex) { ...@@ -4035,7 +4031,7 @@ TEST(neon_ld4_d_postindex) {
__ Ld4(v2.V8B(), v3.V8B(), v4.V8B(), v5.V8B(), __ Ld4(v2.V8B(), v3.V8B(), v4.V8B(), v5.V8B(),
MemOperand(x17, x22, PostIndex)); MemOperand(x17, x22, PostIndex));
__ Ld4(v6.V8B(), v7.V8B(), v8_.V8B(), v9.V8B(), __ Ld4(v6.V8B(), v7.V8B(), v8_.V8B(), v9.V8B(),
MemOperand(x18, 32, PostIndex)); MemOperand(x28, 32, PostIndex));
__ Ld4(v10.V4H(), v11.V4H(), v12.V4H(), v13.V4H(), __ Ld4(v10.V4H(), v11.V4H(), v12.V4H(), v13.V4H(),
MemOperand(x19, 32, PostIndex)); MemOperand(x19, 32, PostIndex));
__ Ld4(v14.V2S(), v15.V2S(), v16.V2S(), v17.V2S(), __ Ld4(v14.V2S(), v15.V2S(), v16.V2S(), v17.V2S(),
...@@ -4068,7 +4064,7 @@ TEST(neon_ld4_d_postindex) { ...@@ -4068,7 +4064,7 @@ TEST(neon_ld4_d_postindex) {
CHECK_EQUAL_128(0, 0x2322212013121110, q1); CHECK_EQUAL_128(0, 0x2322212013121110, q1);
CHECK_EQUAL_64(src_base + 1, x17); CHECK_EQUAL_64(src_base + 1, x17);
CHECK_EQUAL_64(src_base + 1 + 32, x18); CHECK_EQUAL_64(src_base + 1 + 32, x28);
CHECK_EQUAL_64(src_base + 2 + 32, x19); CHECK_EQUAL_64(src_base + 2 + 32, x19);
CHECK_EQUAL_64(src_base + 3 + 32, x20); CHECK_EQUAL_64(src_base + 3 + 32, x20);
CHECK_EQUAL_64(src_base + 4 + 32, x21); CHECK_EQUAL_64(src_base + 4 + 32, x21);
...@@ -4133,7 +4129,7 @@ TEST(neon_ld4_q_postindex) { ...@@ -4133,7 +4129,7 @@ TEST(neon_ld4_q_postindex) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base + 1); __ Mov(x28, src_base + 1);
__ Mov(x19, src_base + 2); __ Mov(x19, src_base + 2);
__ Mov(x20, src_base + 3); __ Mov(x20, src_base + 3);
__ Mov(x21, src_base + 4); __ Mov(x21, src_base + 4);
...@@ -4142,7 +4138,7 @@ TEST(neon_ld4_q_postindex) { ...@@ -4142,7 +4138,7 @@ TEST(neon_ld4_q_postindex) {
__ Ld4(v2.V16B(), v3.V16B(), v4.V16B(), v5.V16B(), __ Ld4(v2.V16B(), v3.V16B(), v4.V16B(), v5.V16B(),
MemOperand(x17, x22, PostIndex)); MemOperand(x17, x22, PostIndex));
__ Ld4(v6.V16B(), v7.V16B(), v8_.V16B(), v9.V16B(), __ Ld4(v6.V16B(), v7.V16B(), v8_.V16B(), v9.V16B(),
MemOperand(x18, 64, PostIndex)); MemOperand(x28, 64, PostIndex));
__ Ld4(v10.V8H(), v11.V8H(), v12.V8H(), v13.V8H(), __ Ld4(v10.V8H(), v11.V8H(), v12.V8H(), v13.V8H(),
MemOperand(x19, 64, PostIndex)); MemOperand(x19, 64, PostIndex));
__ Ld4(v14.V4S(), v15.V4S(), v16.V4S(), v17.V4S(), __ Ld4(v14.V4S(), v15.V4S(), v16.V4S(), v17.V4S(),
...@@ -4175,7 +4171,7 @@ TEST(neon_ld4_q_postindex) { ...@@ -4175,7 +4171,7 @@ TEST(neon_ld4_q_postindex) {
CHECK_EQUAL_128(0x434241403F3E3D3C, 0x232221201F1E1D1C, q1); CHECK_EQUAL_128(0x434241403F3E3D3C, 0x232221201F1E1D1C, q1);
CHECK_EQUAL_64(src_base + 1, x17); CHECK_EQUAL_64(src_base + 1, x17);
CHECK_EQUAL_64(src_base + 1 + 64, x18); CHECK_EQUAL_64(src_base + 1 + 64, x28);
CHECK_EQUAL_64(src_base + 2 + 64, x19); CHECK_EQUAL_64(src_base + 2 + 64, x19);
CHECK_EQUAL_64(src_base + 3 + 64, x20); CHECK_EQUAL_64(src_base + 3 + 64, x20);
CHECK_EQUAL_64(src_base + 4 + 64, x21); CHECK_EQUAL_64(src_base + 4 + 64, x21);
...@@ -4304,9 +4300,9 @@ TEST(neon_ld4_lane_postindex) { ...@@ -4304,9 +4300,9 @@ TEST(neon_ld4_lane_postindex) {
__ Ld4(v0.B(), v1.B(), v2.B(), v3.B(), i, MemOperand(x17, 4, PostIndex)); __ Ld4(v0.B(), v1.B(), v2.B(), v3.B(), i, MemOperand(x17, 4, PostIndex));
} }
__ Mov(x18, src_base); __ Mov(x28, src_base);
for (int i = 7; i >= 0; i--) { for (int i = 7; i >= 0; i--) {
__ Ld4(v4.H(), v5.H(), v6.H(), v7.H(), i, MemOperand(x18, 8, PostIndex)); __ Ld4(v4.H(), v5.H(), v6.H(), v7.H(), i, MemOperand(x28, 8, PostIndex));
} }
__ Mov(x19, src_base); __ Mov(x19, src_base);
...@@ -4401,7 +4397,7 @@ TEST(neon_ld4_lane_postindex) { ...@@ -4401,7 +4397,7 @@ TEST(neon_ld4_lane_postindex) {
CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x3736353433323130, q31); CHECK_EQUAL_128(0x1F1E1D1C1B1A1918, 0x3736353433323130, q31);
CHECK_EQUAL_64(src_base + 64, x17); CHECK_EQUAL_64(src_base + 64, x17);
CHECK_EQUAL_64(src_base + 64, x18); CHECK_EQUAL_64(src_base + 64, x28);
CHECK_EQUAL_64(src_base + 64, x19); CHECK_EQUAL_64(src_base + 64, x19);
CHECK_EQUAL_64(src_base + 64, x20); CHECK_EQUAL_64(src_base + 64, x20);
CHECK_EQUAL_64(src_base + 1, x21); CHECK_EQUAL_64(src_base + 1, x21);
...@@ -4422,7 +4418,6 @@ TEST(neon_ld4_alllanes) { ...@@ -4422,7 +4418,6 @@ TEST(neon_ld4_alllanes) {
START(); START();
__ Mov(x17, src_base + 1); __ Mov(x17, src_base + 1);
__ Mov(x18, 1);
__ Ld4r(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), MemOperand(x17)); __ Ld4r(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), MemOperand(x17));
__ Add(x17, x17, 4); __ Add(x17, x17, 4);
__ Ld4r(v4.V16B(), v5.V16B(), v6.V16B(), v7.V16B(), MemOperand(x17)); __ Ld4r(v4.V16B(), v5.V16B(), v6.V16B(), v7.V16B(), MemOperand(x17));
...@@ -4480,22 +4475,20 @@ TEST(neon_ld4_alllanes_postindex) { ...@@ -4480,22 +4475,20 @@ TEST(neon_ld4_alllanes_postindex) {
src[i] = i; src[i] = i;
} }
uintptr_t src_base = reinterpret_cast<uintptr_t>(src); uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
__ Mov(x17, src_base + 1);
__ Mov(x18, 1);
START(); START();
__ Mov(x17, src_base + 1); __ Mov(x17, src_base + 1);
__ Mov(x18, 1); __ Mov(x19, 1);
__ Ld4r(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), __ Ld4r(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(),
MemOperand(x17, 4, PostIndex)); MemOperand(x17, 4, PostIndex));
__ Ld4r(v4.V16B(), v5.V16B(), v6.V16B(), v7.V16B(), __ Ld4r(v4.V16B(), v5.V16B(), v6.V16B(), v7.V16B(),
MemOperand(x17, x18, PostIndex)); MemOperand(x17, x19, PostIndex));
__ Ld4r(v8_.V4H(), v9.V4H(), v10.V4H(), v11.V4H(), __ Ld4r(v8_.V4H(), v9.V4H(), v10.V4H(), v11.V4H(),
MemOperand(x17, x18, PostIndex)); MemOperand(x17, x19, PostIndex));
__ Ld4r(v12.V8H(), v13.V8H(), v14.V8H(), v15.V8H(), __ Ld4r(v12.V8H(), v13.V8H(), v14.V8H(), v15.V8H(),
MemOperand(x17, 8, PostIndex)); MemOperand(x17, 8, PostIndex));
__ Ld4r(v16.V2S(), v17.V2S(), v18.V2S(), v19.V2S(), __ Ld4r(v16.V2S(), v17.V2S(), v18.V2S(), v19.V2S(),
MemOperand(x17, x18, PostIndex)); MemOperand(x17, x19, PostIndex));
__ Ld4r(v20.V4S(), v21.V4S(), v22.V4S(), v23.V4S(), __ Ld4r(v20.V4S(), v21.V4S(), v22.V4S(), v23.V4S(),
MemOperand(x17, 16, PostIndex)); MemOperand(x17, 16, PostIndex));
__ Ld4r(v24.V2D(), v25.V2D(), v26.V2D(), v27.V2D(), __ Ld4r(v24.V2D(), v25.V2D(), v26.V2D(), v27.V2D(),
...@@ -4547,32 +4540,32 @@ TEST(neon_st1_lane) { ...@@ -4547,32 +4540,32 @@ TEST(neon_st1_lane) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, -16); __ Mov(x19, -16);
__ Ldr(q0, MemOperand(x17)); __ Ldr(q0, MemOperand(x17));
for (int i = 15; i >= 0; i--) { for (int i = 15; i >= 0; i--) {
__ St1(v0.B(), i, MemOperand(x17)); __ St1(v0.B(), i, MemOperand(x17));
__ Add(x17, x17, 1); __ Add(x17, x17, 1);
} }
__ Ldr(q1, MemOperand(x17, x18)); __ Ldr(q1, MemOperand(x17, x19));
for (int i = 7; i >= 0; i--) { for (int i = 7; i >= 0; i--) {
__ St1(v0.H(), i, MemOperand(x17)); __ St1(v0.H(), i, MemOperand(x17));
__ Add(x17, x17, 2); __ Add(x17, x17, 2);
} }
__ Ldr(q2, MemOperand(x17, x18)); __ Ldr(q2, MemOperand(x17, x19));
for (int i = 3; i >= 0; i--) { for (int i = 3; i >= 0; i--) {
__ St1(v0.S(), i, MemOperand(x17)); __ St1(v0.S(), i, MemOperand(x17));
__ Add(x17, x17, 4); __ Add(x17, x17, 4);
} }
__ Ldr(q3, MemOperand(x17, x18)); __ Ldr(q3, MemOperand(x17, x19));
for (int i = 1; i >= 0; i--) { for (int i = 1; i >= 0; i--) {
__ St1(v0.D(), i, MemOperand(x17)); __ St1(v0.D(), i, MemOperand(x17));
__ Add(x17, x17, 8); __ Add(x17, x17, 8);
} }
__ Ldr(q4, MemOperand(x17, x18)); __ Ldr(q4, MemOperand(x17, x19));
END(); END();
...@@ -4595,17 +4588,17 @@ TEST(neon_st2_lane) { ...@@ -4595,17 +4588,17 @@ TEST(neon_st2_lane) {
START(); START();
__ Mov(x17, dst_base); __ Mov(x17, dst_base);
__ Mov(x18, dst_base); __ Mov(x19, dst_base);
__ Movi(v0.V2D(), 0x0001020304050607, 0x08090A0B0C0D0E0F); __ Movi(v0.V2D(), 0x0001020304050607, 0x08090A0B0C0D0E0F);
__ Movi(v1.V2D(), 0x1011121314151617, 0x18191A1B1C1D1E1F); __ Movi(v1.V2D(), 0x1011121314151617, 0x18191A1B1C1D1E1F);
// Test B stores with and without post index. // Test B stores with and without post index.
for (int i = 15; i >= 0; i--) { for (int i = 15; i >= 0; i--) {
__ St2(v0.B(), v1.B(), i, MemOperand(x18)); __ St2(v0.B(), v1.B(), i, MemOperand(x19));
__ Add(x18, x18, 2); __ Add(x19, x19, 2);
} }
for (int i = 15; i >= 0; i--) { for (int i = 15; i >= 0; i--) {
__ St2(v0.B(), v1.B(), i, MemOperand(x18, 2, PostIndex)); __ St2(v0.B(), v1.B(), i, MemOperand(x19, 2, PostIndex));
} }
__ Ldr(q2, MemOperand(x17, 0 * 16)); __ Ldr(q2, MemOperand(x17, 0 * 16));
__ Ldr(q3, MemOperand(x17, 1 * 16)); __ Ldr(q3, MemOperand(x17, 1 * 16));
...@@ -4615,11 +4608,11 @@ TEST(neon_st2_lane) { ...@@ -4615,11 +4608,11 @@ TEST(neon_st2_lane) {
// Test H stores with and without post index. // Test H stores with and without post index.
__ Mov(x0, 4); __ Mov(x0, 4);
for (int i = 7; i >= 0; i--) { for (int i = 7; i >= 0; i--) {
__ St2(v0.H(), v1.H(), i, MemOperand(x18)); __ St2(v0.H(), v1.H(), i, MemOperand(x19));
__ Add(x18, x18, 4); __ Add(x19, x19, 4);
} }
for (int i = 7; i >= 0; i--) { for (int i = 7; i >= 0; i--) {
__ St2(v0.H(), v1.H(), i, MemOperand(x18, x0, PostIndex)); __ St2(v0.H(), v1.H(), i, MemOperand(x19, x0, PostIndex));
} }
__ Ldr(q6, MemOperand(x17, 4 * 16)); __ Ldr(q6, MemOperand(x17, 4 * 16));
__ Ldr(q7, MemOperand(x17, 5 * 16)); __ Ldr(q7, MemOperand(x17, 5 * 16));
...@@ -4628,11 +4621,11 @@ TEST(neon_st2_lane) { ...@@ -4628,11 +4621,11 @@ TEST(neon_st2_lane) {
// Test S stores with and without post index. // Test S stores with and without post index.
for (int i = 3; i >= 0; i--) { for (int i = 3; i >= 0; i--) {
__ St2(v0.S(), v1.S(), i, MemOperand(x18)); __ St2(v0.S(), v1.S(), i, MemOperand(x19));
__ Add(x18, x18, 8); __ Add(x19, x19, 8);
} }
for (int i = 3; i >= 0; i--) { for (int i = 3; i >= 0; i--) {
__ St2(v0.S(), v1.S(), i, MemOperand(x18, 8, PostIndex)); __ St2(v0.S(), v1.S(), i, MemOperand(x19, 8, PostIndex));
} }
__ Ldr(q18, MemOperand(x17, 8 * 16)); __ Ldr(q18, MemOperand(x17, 8 * 16));
__ Ldr(q19, MemOperand(x17, 9 * 16)); __ Ldr(q19, MemOperand(x17, 9 * 16));
...@@ -4641,11 +4634,11 @@ TEST(neon_st2_lane) { ...@@ -4641,11 +4634,11 @@ TEST(neon_st2_lane) {
// Test D stores with and without post index. // Test D stores with and without post index.
__ Mov(x0, 16); __ Mov(x0, 16);
__ St2(v0.D(), v1.D(), 1, MemOperand(x18)); __ St2(v0.D(), v1.D(), 1, MemOperand(x19));
__ Add(x18, x18, 16); __ Add(x19, x19, 16);
__ St2(v0.D(), v1.D(), 0, MemOperand(x18, 16, PostIndex)); __ St2(v0.D(), v1.D(), 0, MemOperand(x19, 16, PostIndex));
__ St2(v0.D(), v1.D(), 1, MemOperand(x18, x0, PostIndex)); __ St2(v0.D(), v1.D(), 1, MemOperand(x19, x0, PostIndex));
__ St2(v0.D(), v1.D(), 0, MemOperand(x18, x0, PostIndex)); __ St2(v0.D(), v1.D(), 0, MemOperand(x19, x0, PostIndex));
__ Ldr(q22, MemOperand(x17, 12 * 16)); __ Ldr(q22, MemOperand(x17, 12 * 16));
__ Ldr(q23, MemOperand(x17, 13 * 16)); __ Ldr(q23, MemOperand(x17, 13 * 16));
__ Ldr(q24, MemOperand(x17, 14 * 16)); __ Ldr(q24, MemOperand(x17, 14 * 16));
...@@ -4686,18 +4679,18 @@ TEST(neon_st3_lane) { ...@@ -4686,18 +4679,18 @@ TEST(neon_st3_lane) {
START(); START();
__ Mov(x17, dst_base); __ Mov(x17, dst_base);
__ Mov(x18, dst_base); __ Mov(x19, dst_base);
__ Movi(v0.V2D(), 0x0001020304050607, 0x08090A0B0C0D0E0F); __ Movi(v0.V2D(), 0x0001020304050607, 0x08090A0B0C0D0E0F);
__ Movi(v1.V2D(), 0x1011121314151617, 0x18191A1B1C1D1E1F); __ Movi(v1.V2D(), 0x1011121314151617, 0x18191A1B1C1D1E1F);
__ Movi(v2.V2D(), 0x2021222324252627, 0x28292A2B2C2D2E2F); __ Movi(v2.V2D(), 0x2021222324252627, 0x28292A2B2C2D2E2F);
// Test B stores with and without post index. // Test B stores with and without post index.
for (int i = 15; i >= 0; i--) { for (int i = 15; i >= 0; i--) {
__ St3(v0.B(), v1.B(), v2.B(), i, MemOperand(x18)); __ St3(v0.B(), v1.B(), v2.B(), i, MemOperand(x19));
__ Add(x18, x18, 3); __ Add(x19, x19, 3);
} }
for (int i = 15; i >= 0; i--) { for (int i = 15; i >= 0; i--) {
__ St3(v0.B(), v1.B(), v2.B(), i, MemOperand(x18, 3, PostIndex)); __ St3(v0.B(), v1.B(), v2.B(), i, MemOperand(x19, 3, PostIndex));
} }
__ Ldr(q3, MemOperand(x17, 0 * 16)); __ Ldr(q3, MemOperand(x17, 0 * 16));
__ Ldr(q4, MemOperand(x17, 1 * 16)); __ Ldr(q4, MemOperand(x17, 1 * 16));
...@@ -4709,11 +4702,11 @@ TEST(neon_st3_lane) { ...@@ -4709,11 +4702,11 @@ TEST(neon_st3_lane) {
// Test H stores with and without post index. // Test H stores with and without post index.
__ Mov(x0, 6); __ Mov(x0, 6);
for (int i = 7; i >= 0; i--) { for (int i = 7; i >= 0; i--) {
__ St3(v0.H(), v1.H(), v2.H(), i, MemOperand(x18)); __ St3(v0.H(), v1.H(), v2.H(), i, MemOperand(x19));
__ Add(x18, x18, 6); __ Add(x19, x19, 6);
} }
for (int i = 7; i >= 0; i--) { for (int i = 7; i >= 0; i--) {
__ St3(v0.H(), v1.H(), v2.H(), i, MemOperand(x18, x0, PostIndex)); __ St3(v0.H(), v1.H(), v2.H(), i, MemOperand(x19, x0, PostIndex));
} }
__ Ldr(q17, MemOperand(x17, 6 * 16)); __ Ldr(q17, MemOperand(x17, 6 * 16));
__ Ldr(q18, MemOperand(x17, 7 * 16)); __ Ldr(q18, MemOperand(x17, 7 * 16));
...@@ -4724,11 +4717,11 @@ TEST(neon_st3_lane) { ...@@ -4724,11 +4717,11 @@ TEST(neon_st3_lane) {
// Test S stores with and without post index. // Test S stores with and without post index.
for (int i = 3; i >= 0; i--) { for (int i = 3; i >= 0; i--) {
__ St3(v0.S(), v1.S(), v2.S(), i, MemOperand(x18)); __ St3(v0.S(), v1.S(), v2.S(), i, MemOperand(x19));
__ Add(x18, x18, 12); __ Add(x19, x19, 12);
} }
for (int i = 3; i >= 0; i--) { for (int i = 3; i >= 0; i--) {
__ St3(v0.S(), v1.S(), v2.S(), i, MemOperand(x18, 12, PostIndex)); __ St3(v0.S(), v1.S(), v2.S(), i, MemOperand(x19, 12, PostIndex));
} }
__ Ldr(q23, MemOperand(x17, 12 * 16)); __ Ldr(q23, MemOperand(x17, 12 * 16));
__ Ldr(q24, MemOperand(x17, 13 * 16)); __ Ldr(q24, MemOperand(x17, 13 * 16));
...@@ -4739,10 +4732,10 @@ TEST(neon_st3_lane) { ...@@ -4739,10 +4732,10 @@ TEST(neon_st3_lane) {
// Test D stores with and without post index. // Test D stores with and without post index.
__ Mov(x0, 24); __ Mov(x0, 24);
__ St3(v0.D(), v1.D(), v2.D(), 1, MemOperand(x18)); __ St3(v0.D(), v1.D(), v2.D(), 1, MemOperand(x19));
__ Add(x18, x18, 24); __ Add(x19, x19, 24);
__ St3(v0.D(), v1.D(), v2.D(), 0, MemOperand(x18, 24, PostIndex)); __ St3(v0.D(), v1.D(), v2.D(), 0, MemOperand(x19, 24, PostIndex));
__ St3(v0.D(), v1.D(), v2.D(), 1, MemOperand(x18, x0, PostIndex)); __ St3(v0.D(), v1.D(), v2.D(), 1, MemOperand(x19, x0, PostIndex));
__ Ldr(q29, MemOperand(x17, 18 * 16)); __ Ldr(q29, MemOperand(x17, 18 * 16));
__ Ldr(q30, MemOperand(x17, 19 * 16)); __ Ldr(q30, MemOperand(x17, 19 * 16));
__ Ldr(q31, MemOperand(x17, 20 * 16)); __ Ldr(q31, MemOperand(x17, 20 * 16));
...@@ -4783,7 +4776,7 @@ TEST(neon_st4_lane) { ...@@ -4783,7 +4776,7 @@ TEST(neon_st4_lane) {
START(); START();
__ Mov(x17, dst_base); __ Mov(x17, dst_base);
__ Mov(x18, dst_base); __ Mov(x19, dst_base);
__ Movi(v0.V2D(), 0x0001020304050607, 0x08090A0B0C0D0E0F); __ Movi(v0.V2D(), 0x0001020304050607, 0x08090A0B0C0D0E0F);
__ Movi(v1.V2D(), 0x1011121314151617, 0x18191A1B1C1D1E1F); __ Movi(v1.V2D(), 0x1011121314151617, 0x18191A1B1C1D1E1F);
__ Movi(v2.V2D(), 0x2021222324252627, 0x28292A2B2C2D2E2F); __ Movi(v2.V2D(), 0x2021222324252627, 0x28292A2B2C2D2E2F);
...@@ -4791,8 +4784,8 @@ TEST(neon_st4_lane) { ...@@ -4791,8 +4784,8 @@ TEST(neon_st4_lane) {
// Test B stores without post index. // Test B stores without post index.
for (int i = 15; i >= 0; i--) { for (int i = 15; i >= 0; i--) {
__ St4(v0.B(), v1.B(), v2.B(), v3.B(), i, MemOperand(x18)); __ St4(v0.B(), v1.B(), v2.B(), v3.B(), i, MemOperand(x19));
__ Add(x18, x18, 4); __ Add(x19, x19, 4);
} }
__ Ldr(q4, MemOperand(x17, 0 * 16)); __ Ldr(q4, MemOperand(x17, 0 * 16));
__ Ldr(q5, MemOperand(x17, 1 * 16)); __ Ldr(q5, MemOperand(x17, 1 * 16));
...@@ -4802,7 +4795,7 @@ TEST(neon_st4_lane) { ...@@ -4802,7 +4795,7 @@ TEST(neon_st4_lane) {
// Test H stores with post index. // Test H stores with post index.
__ Mov(x0, 8); __ Mov(x0, 8);
for (int i = 7; i >= 0; i--) { for (int i = 7; i >= 0; i--) {
__ St4(v0.H(), v1.H(), v2.H(), v3.H(), i, MemOperand(x18, x0, PostIndex)); __ St4(v0.H(), v1.H(), v2.H(), v3.H(), i, MemOperand(x19, x0, PostIndex));
} }
__ Ldr(q16, MemOperand(x17, 4 * 16)); __ Ldr(q16, MemOperand(x17, 4 * 16));
__ Ldr(q17, MemOperand(x17, 5 * 16)); __ Ldr(q17, MemOperand(x17, 5 * 16));
...@@ -4811,8 +4804,8 @@ TEST(neon_st4_lane) { ...@@ -4811,8 +4804,8 @@ TEST(neon_st4_lane) {
// Test S stores without post index. // Test S stores without post index.
for (int i = 3; i >= 0; i--) { for (int i = 3; i >= 0; i--) {
__ St4(v0.S(), v1.S(), v2.S(), v3.S(), i, MemOperand(x18)); __ St4(v0.S(), v1.S(), v2.S(), v3.S(), i, MemOperand(x19));
__ Add(x18, x18, 16); __ Add(x19, x19, 16);
} }
__ Ldr(q20, MemOperand(x17, 8 * 16)); __ Ldr(q20, MemOperand(x17, 8 * 16));
__ Ldr(q21, MemOperand(x17, 9 * 16)); __ Ldr(q21, MemOperand(x17, 9 * 16));
...@@ -4821,8 +4814,8 @@ TEST(neon_st4_lane) { ...@@ -4821,8 +4814,8 @@ TEST(neon_st4_lane) {
// Test D stores with post index. // Test D stores with post index.
__ Mov(x0, 32); __ Mov(x0, 32);
__ St4(v0.D(), v1.D(), v2.D(), v3.D(), 0, MemOperand(x18, 32, PostIndex)); __ St4(v0.D(), v1.D(), v2.D(), v3.D(), 0, MemOperand(x19, 32, PostIndex));
__ St4(v0.D(), v1.D(), v2.D(), v3.D(), 1, MemOperand(x18, x0, PostIndex)); __ St4(v0.D(), v1.D(), v2.D(), v3.D(), 1, MemOperand(x19, x0, PostIndex));
__ Ldr(q24, MemOperand(x17, 12 * 16)); __ Ldr(q24, MemOperand(x17, 12 * 16));
__ Ldr(q25, MemOperand(x17, 13 * 16)); __ Ldr(q25, MemOperand(x17, 13 * 16));
...@@ -4865,7 +4858,7 @@ TEST(neon_ld1_lane_postindex) { ...@@ -4865,7 +4858,7 @@ TEST(neon_ld1_lane_postindex) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base); __ Mov(x28, src_base);
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Mov(x20, src_base); __ Mov(x20, src_base);
__ Mov(x21, src_base); __ Mov(x21, src_base);
...@@ -4879,7 +4872,7 @@ TEST(neon_ld1_lane_postindex) { ...@@ -4879,7 +4872,7 @@ TEST(neon_ld1_lane_postindex) {
} }
for (int i = 7; i >= 0; i--) { for (int i = 7; i >= 0; i--) {
__ Ld1(v1.H(), i, MemOperand(x18, 2, PostIndex)); __ Ld1(v1.H(), i, MemOperand(x28, 2, PostIndex));
} }
for (int i = 3; i >= 0; i--) { for (int i = 3; i >= 0; i--) {
...@@ -4920,7 +4913,7 @@ TEST(neon_ld1_lane_postindex) { ...@@ -4920,7 +4913,7 @@ TEST(neon_ld1_lane_postindex) {
CHECK_EQUAL_128(0x0F0E0D0C03020100, 0x0706050403020100, q6); CHECK_EQUAL_128(0x0F0E0D0C03020100, 0x0706050403020100, q6);
CHECK_EQUAL_128(0x0706050403020100, 0x0706050403020100, q7); CHECK_EQUAL_128(0x0706050403020100, 0x0706050403020100, q7);
CHECK_EQUAL_64(src_base + 16, x17); CHECK_EQUAL_64(src_base + 16, x17);
CHECK_EQUAL_64(src_base + 16, x18); CHECK_EQUAL_64(src_base + 16, x28);
CHECK_EQUAL_64(src_base + 16, x19); CHECK_EQUAL_64(src_base + 16, x19);
CHECK_EQUAL_64(src_base + 16, x20); CHECK_EQUAL_64(src_base + 16, x20);
CHECK_EQUAL_64(src_base + 1, x21); CHECK_EQUAL_64(src_base + 1, x21);
...@@ -4941,28 +4934,28 @@ TEST(neon_st1_lane_postindex) { ...@@ -4941,28 +4934,28 @@ TEST(neon_st1_lane_postindex) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, -16); __ Mov(x19, -16);
__ Ldr(q0, MemOperand(x17)); __ Ldr(q0, MemOperand(x17));
for (int i = 15; i >= 0; i--) { for (int i = 15; i >= 0; i--) {
__ St1(v0.B(), i, MemOperand(x17, 1, PostIndex)); __ St1(v0.B(), i, MemOperand(x17, 1, PostIndex));
} }
__ Ldr(q1, MemOperand(x17, x18)); __ Ldr(q1, MemOperand(x17, x19));
for (int i = 7; i >= 0; i--) { for (int i = 7; i >= 0; i--) {
__ St1(v0.H(), i, MemOperand(x17, 2, PostIndex)); __ St1(v0.H(), i, MemOperand(x17, 2, PostIndex));
} }
__ Ldr(q2, MemOperand(x17, x18)); __ Ldr(q2, MemOperand(x17, x19));
for (int i = 3; i >= 0; i--) { for (int i = 3; i >= 0; i--) {
__ St1(v0.S(), i, MemOperand(x17, 4, PostIndex)); __ St1(v0.S(), i, MemOperand(x17, 4, PostIndex));
} }
__ Ldr(q3, MemOperand(x17, x18)); __ Ldr(q3, MemOperand(x17, x19));
for (int i = 1; i >= 0; i--) { for (int i = 1; i >= 0; i--) {
__ St1(v0.D(), i, MemOperand(x17, 8, PostIndex)); __ St1(v0.D(), i, MemOperand(x17, 8, PostIndex));
} }
__ Ldr(q4, MemOperand(x17, x18)); __ Ldr(q4, MemOperand(x17, x19));
END(); END();
...@@ -5027,12 +5020,12 @@ TEST(neon_ld1_alllanes_postindex) { ...@@ -5027,12 +5020,12 @@ TEST(neon_ld1_alllanes_postindex) {
START(); START();
__ Mov(x17, src_base + 1); __ Mov(x17, src_base + 1);
__ Mov(x18, 1); __ Mov(x19, 1);
__ Ld1r(v0.V8B(), MemOperand(x17, 1, PostIndex)); __ Ld1r(v0.V8B(), MemOperand(x17, 1, PostIndex));
__ Ld1r(v1.V16B(), MemOperand(x17, x18, PostIndex)); __ Ld1r(v1.V16B(), MemOperand(x17, x19, PostIndex));
__ Ld1r(v2.V4H(), MemOperand(x17, x18, PostIndex)); __ Ld1r(v2.V4H(), MemOperand(x17, x19, PostIndex));
__ Ld1r(v3.V8H(), MemOperand(x17, 2, PostIndex)); __ Ld1r(v3.V8H(), MemOperand(x17, 2, PostIndex));
__ Ld1r(v4.V2S(), MemOperand(x17, x18, PostIndex)); __ Ld1r(v4.V2S(), MemOperand(x17, x19, PostIndex));
__ Ld1r(v5.V4S(), MemOperand(x17, 4, PostIndex)); __ Ld1r(v5.V4S(), MemOperand(x17, 4, PostIndex));
__ Ld1r(v6.V2D(), MemOperand(x17, 8, PostIndex)); __ Ld1r(v6.V2D(), MemOperand(x17, 8, PostIndex));
END(); END();
...@@ -5116,7 +5109,7 @@ TEST(neon_st1_d_postindex) { ...@@ -5116,7 +5109,7 @@ TEST(neon_st1_d_postindex) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, -8); __ Mov(x28, -8);
__ Mov(x19, -16); __ Mov(x19, -16);
__ Mov(x20, -24); __ Mov(x20, -24);
__ Mov(x21, -32); __ Mov(x21, -32);
...@@ -5127,7 +5120,7 @@ TEST(neon_st1_d_postindex) { ...@@ -5127,7 +5120,7 @@ TEST(neon_st1_d_postindex) {
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ St1(v0.V8B(), MemOperand(x17, 8, PostIndex)); __ St1(v0.V8B(), MemOperand(x17, 8, PostIndex));
__ Ldr(d16, MemOperand(x17, x18)); __ Ldr(d16, MemOperand(x17, x28));
__ St1(v0.V8B(), v1.V8B(), MemOperand(x17, 16, PostIndex)); __ St1(v0.V8B(), v1.V8B(), MemOperand(x17, 16, PostIndex));
__ Ldr(q17, MemOperand(x17, x19)); __ Ldr(q17, MemOperand(x17, x19));
...@@ -5135,7 +5128,7 @@ TEST(neon_st1_d_postindex) { ...@@ -5135,7 +5128,7 @@ TEST(neon_st1_d_postindex) {
__ St1(v0.V4H(), v1.V4H(), v2.V4H(), MemOperand(x17, 24, PostIndex)); __ St1(v0.V4H(), v1.V4H(), v2.V4H(), MemOperand(x17, 24, PostIndex));
__ Ldr(d18, MemOperand(x17, x20)); __ Ldr(d18, MemOperand(x17, x20));
__ Ldr(d19, MemOperand(x17, x19)); __ Ldr(d19, MemOperand(x17, x19));
__ Ldr(d20, MemOperand(x17, x18)); __ Ldr(d20, MemOperand(x17, x28));
__ St1(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(), __ St1(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(),
MemOperand(x17, 32, PostIndex)); MemOperand(x17, 32, PostIndex));
...@@ -5223,7 +5216,7 @@ TEST(neon_st1_q_postindex) { ...@@ -5223,7 +5216,7 @@ TEST(neon_st1_q_postindex) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, -16); __ Mov(x28, -16);
__ Mov(x19, -32); __ Mov(x19, -32);
__ Mov(x20, -48); __ Mov(x20, -48);
__ Mov(x21, -64); __ Mov(x21, -64);
...@@ -5233,23 +5226,23 @@ TEST(neon_st1_q_postindex) { ...@@ -5233,23 +5226,23 @@ TEST(neon_st1_q_postindex) {
__ Ldr(q3, MemOperand(x17, 16, PostIndex)); __ Ldr(q3, MemOperand(x17, 16, PostIndex));
__ St1(v0.V16B(), MemOperand(x17, 16, PostIndex)); __ St1(v0.V16B(), MemOperand(x17, 16, PostIndex));
__ Ldr(q16, MemOperand(x17, x18)); __ Ldr(q16, MemOperand(x17, x28));
__ St1(v0.V8H(), v1.V8H(), MemOperand(x17, 32, PostIndex)); __ St1(v0.V8H(), v1.V8H(), MemOperand(x17, 32, PostIndex));
__ Ldr(q17, MemOperand(x17, x19)); __ Ldr(q17, MemOperand(x17, x19));
__ Ldr(q18, MemOperand(x17, x18)); __ Ldr(q18, MemOperand(x17, x28));
__ St1(v0.V4S(), v1.V4S(), v2.V4S(), MemOperand(x17, 48, PostIndex)); __ St1(v0.V4S(), v1.V4S(), v2.V4S(), MemOperand(x17, 48, PostIndex));
__ Ldr(q19, MemOperand(x17, x20)); __ Ldr(q19, MemOperand(x17, x20));
__ Ldr(q20, MemOperand(x17, x19)); __ Ldr(q20, MemOperand(x17, x19));
__ Ldr(q21, MemOperand(x17, x18)); __ Ldr(q21, MemOperand(x17, x28));
__ St1(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(), __ St1(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(),
MemOperand(x17, 64, PostIndex)); MemOperand(x17, 64, PostIndex));
__ Ldr(q22, MemOperand(x17, x21)); __ Ldr(q22, MemOperand(x17, x21));
__ Ldr(q23, MemOperand(x17, x20)); __ Ldr(q23, MemOperand(x17, x20));
__ Ldr(q24, MemOperand(x17, x19)); __ Ldr(q24, MemOperand(x17, x19));
__ Ldr(q25, MemOperand(x17, x18)); __ Ldr(q25, MemOperand(x17, x28));
END(); END();
...@@ -5279,15 +5272,15 @@ TEST(neon_st2_d) { ...@@ -5279,15 +5272,15 @@ TEST(neon_st2_d) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base); __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex)); __ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex)); __ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ St2(v0.V8B(), v1.V8B(), MemOperand(x18)); __ St2(v0.V8B(), v1.V8B(), MemOperand(x19));
__ Add(x18, x18, 22); __ Add(x19, x19, 22);
__ St2(v0.V4H(), v1.V4H(), MemOperand(x18)); __ St2(v0.V4H(), v1.V4H(), MemOperand(x19));
__ Add(x18, x18, 11); __ Add(x19, x19, 11);
__ St2(v0.V2S(), v1.V2S(), MemOperand(x18)); __ St2(v0.V2S(), v1.V2S(), MemOperand(x19));
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex)); __ Ldr(q0, MemOperand(x19, 16, PostIndex));
...@@ -5318,13 +5311,13 @@ TEST(neon_st2_d_postindex) { ...@@ -5318,13 +5311,13 @@ TEST(neon_st2_d_postindex) {
START(); START();
__ Mov(x22, 5); __ Mov(x22, 5);
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base); __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex)); __ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex)); __ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ St2(v0.V8B(), v1.V8B(), MemOperand(x18, x22, PostIndex)); __ St2(v0.V8B(), v1.V8B(), MemOperand(x19, x22, PostIndex));
__ St2(v0.V4H(), v1.V4H(), MemOperand(x18, 16, PostIndex)); __ St2(v0.V4H(), v1.V4H(), MemOperand(x19, 16, PostIndex));
__ St2(v0.V2S(), v1.V2S(), MemOperand(x18)); __ St2(v0.V2S(), v1.V2S(), MemOperand(x19));
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex)); __ Ldr(q0, MemOperand(x19, 16, PostIndex));
...@@ -5352,17 +5345,17 @@ TEST(neon_st2_q) { ...@@ -5352,17 +5345,17 @@ TEST(neon_st2_q) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base); __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex)); __ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex)); __ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ St2(v0.V16B(), v1.V16B(), MemOperand(x18)); __ St2(v0.V16B(), v1.V16B(), MemOperand(x19));
__ Add(x18, x18, 8); __ Add(x19, x19, 8);
__ St2(v0.V8H(), v1.V8H(), MemOperand(x18)); __ St2(v0.V8H(), v1.V8H(), MemOperand(x19));
__ Add(x18, x18, 22); __ Add(x19, x19, 22);
__ St2(v0.V4S(), v1.V4S(), MemOperand(x18)); __ St2(v0.V4S(), v1.V4S(), MemOperand(x19));
__ Add(x18, x18, 2); __ Add(x19, x19, 2);
__ St2(v0.V2D(), v1.V2D(), MemOperand(x18)); __ St2(v0.V2D(), v1.V2D(), MemOperand(x19));
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex)); __ Ldr(q0, MemOperand(x19, 16, PostIndex));
...@@ -5393,14 +5386,14 @@ TEST(neon_st2_q_postindex) { ...@@ -5393,14 +5386,14 @@ TEST(neon_st2_q_postindex) {
START(); START();
__ Mov(x22, 5); __ Mov(x22, 5);
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base); __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex)); __ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex)); __ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ St2(v0.V16B(), v1.V16B(), MemOperand(x18, x22, PostIndex)); __ St2(v0.V16B(), v1.V16B(), MemOperand(x19, x22, PostIndex));
__ St2(v0.V8H(), v1.V8H(), MemOperand(x18, 32, PostIndex)); __ St2(v0.V8H(), v1.V8H(), MemOperand(x19, 32, PostIndex));
__ St2(v0.V4S(), v1.V4S(), MemOperand(x18, x22, PostIndex)); __ St2(v0.V4S(), v1.V4S(), MemOperand(x19, x22, PostIndex));
__ St2(v0.V2D(), v1.V2D(), MemOperand(x18)); __ St2(v0.V2D(), v1.V2D(), MemOperand(x19));
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex)); __ Ldr(q0, MemOperand(x19, 16, PostIndex));
...@@ -5432,16 +5425,16 @@ TEST(neon_st3_d) { ...@@ -5432,16 +5425,16 @@ TEST(neon_st3_d) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base); __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex)); __ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex)); __ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex)); __ Ldr(q2, MemOperand(x17, 16, PostIndex));
__ St3(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x18)); __ St3(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x19));
__ Add(x18, x18, 3); __ Add(x19, x19, 3);
__ St3(v0.V4H(), v1.V4H(), v2.V4H(), MemOperand(x18)); __ St3(v0.V4H(), v1.V4H(), v2.V4H(), MemOperand(x19));
__ Add(x18, x18, 2); __ Add(x19, x19, 2);
__ St3(v0.V2S(), v1.V2S(), v2.V2S(), MemOperand(x18)); __ St3(v0.V2S(), v1.V2S(), v2.V2S(), MemOperand(x19));
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex)); __ Ldr(q0, MemOperand(x19, 16, PostIndex));
...@@ -5468,14 +5461,14 @@ TEST(neon_st3_d_postindex) { ...@@ -5468,14 +5461,14 @@ TEST(neon_st3_d_postindex) {
START(); START();
__ Mov(x22, 5); __ Mov(x22, 5);
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base); __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex)); __ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex)); __ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex)); __ Ldr(q2, MemOperand(x17, 16, PostIndex));
__ St3(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x18, x22, PostIndex)); __ St3(v0.V8B(), v1.V8B(), v2.V8B(), MemOperand(x19, x22, PostIndex));
__ St3(v0.V4H(), v1.V4H(), v2.V4H(), MemOperand(x18, 24, PostIndex)); __ St3(v0.V4H(), v1.V4H(), v2.V4H(), MemOperand(x19, 24, PostIndex));
__ St3(v0.V2S(), v1.V2S(), v2.V2S(), MemOperand(x18)); __ St3(v0.V2S(), v1.V2S(), v2.V2S(), MemOperand(x19));
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex)); __ Ldr(q0, MemOperand(x19, 16, PostIndex));
...@@ -5505,18 +5498,18 @@ TEST(neon_st3_q) { ...@@ -5505,18 +5498,18 @@ TEST(neon_st3_q) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base); __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex)); __ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex)); __ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex)); __ Ldr(q2, MemOperand(x17, 16, PostIndex));
__ St3(v0.V16B(), v1.V16B(), v2.V16B(), MemOperand(x18)); __ St3(v0.V16B(), v1.V16B(), v2.V16B(), MemOperand(x19));
__ Add(x18, x18, 5); __ Add(x19, x19, 5);
__ St3(v0.V8H(), v1.V8H(), v2.V8H(), MemOperand(x18)); __ St3(v0.V8H(), v1.V8H(), v2.V8H(), MemOperand(x19));
__ Add(x18, x18, 12); __ Add(x19, x19, 12);
__ St3(v0.V4S(), v1.V4S(), v2.V4S(), MemOperand(x18)); __ St3(v0.V4S(), v1.V4S(), v2.V4S(), MemOperand(x19));
__ Add(x18, x18, 22); __ Add(x19, x19, 22);
__ St3(v0.V2D(), v1.V2D(), v2.V2D(), MemOperand(x18)); __ St3(v0.V2D(), v1.V2D(), v2.V2D(), MemOperand(x19));
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex)); __ Ldr(q0, MemOperand(x19, 16, PostIndex));
...@@ -5551,15 +5544,15 @@ TEST(neon_st3_q_postindex) { ...@@ -5551,15 +5544,15 @@ TEST(neon_st3_q_postindex) {
START(); START();
__ Mov(x22, 5); __ Mov(x22, 5);
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base); __ Mov(x28, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex)); __ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex)); __ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex)); __ Ldr(q2, MemOperand(x17, 16, PostIndex));
__ St3(v0.V16B(), v1.V16B(), v2.V16B(), MemOperand(x18, x22, PostIndex)); __ St3(v0.V16B(), v1.V16B(), v2.V16B(), MemOperand(x28, x22, PostIndex));
__ St3(v0.V8H(), v1.V8H(), v2.V8H(), MemOperand(x18, 48, PostIndex)); __ St3(v0.V8H(), v1.V8H(), v2.V8H(), MemOperand(x28, 48, PostIndex));
__ St3(v0.V4S(), v1.V4S(), v2.V4S(), MemOperand(x18, x22, PostIndex)); __ St3(v0.V4S(), v1.V4S(), v2.V4S(), MemOperand(x28, x22, PostIndex));
__ St3(v0.V2D(), v1.V2D(), v2.V2D(), MemOperand(x18)); __ St3(v0.V2D(), v1.V2D(), v2.V2D(), MemOperand(x28));
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex)); __ Ldr(q0, MemOperand(x19, 16, PostIndex));
...@@ -5595,17 +5588,17 @@ TEST(neon_st4_d) { ...@@ -5595,17 +5588,17 @@ TEST(neon_st4_d) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base); __ Mov(x28, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex)); __ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex)); __ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex)); __ Ldr(q2, MemOperand(x17, 16, PostIndex));
__ Ldr(q3, MemOperand(x17, 16, PostIndex)); __ Ldr(q3, MemOperand(x17, 16, PostIndex));
__ St4(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), MemOperand(x18)); __ St4(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), MemOperand(x28));
__ Add(x18, x18, 12); __ Add(x28, x28, 12);
__ St4(v0.V4H(), v1.V4H(), v2.V4H(), v3.V4H(), MemOperand(x18)); __ St4(v0.V4H(), v1.V4H(), v2.V4H(), v3.V4H(), MemOperand(x28));
__ Add(x18, x18, 15); __ Add(x28, x28, 15);
__ St4(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(), MemOperand(x18)); __ St4(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(), MemOperand(x28));
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex)); __ Ldr(q0, MemOperand(x19, 16, PostIndex));
...@@ -5636,17 +5629,17 @@ TEST(neon_st4_d_postindex) { ...@@ -5636,17 +5629,17 @@ TEST(neon_st4_d_postindex) {
START(); START();
__ Mov(x22, 5); __ Mov(x22, 5);
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base); __ Mov(x28, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex)); __ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex)); __ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex)); __ Ldr(q2, MemOperand(x17, 16, PostIndex));
__ Ldr(q3, MemOperand(x17, 16, PostIndex)); __ Ldr(q3, MemOperand(x17, 16, PostIndex));
__ St4(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(), __ St4(v0.V8B(), v1.V8B(), v2.V8B(), v3.V8B(),
MemOperand(x18, x22, PostIndex)); MemOperand(x28, x22, PostIndex));
__ St4(v0.V4H(), v1.V4H(), v2.V4H(), v3.V4H(), __ St4(v0.V4H(), v1.V4H(), v2.V4H(), v3.V4H(),
MemOperand(x18, 32, PostIndex)); MemOperand(x28, 32, PostIndex));
__ St4(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(), MemOperand(x18)); __ St4(v0.V2S(), v1.V2S(), v2.V2S(), v3.V2S(), MemOperand(x28));
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex)); __ Ldr(q0, MemOperand(x19, 16, PostIndex));
...@@ -5678,20 +5671,20 @@ TEST(neon_st4_q) { ...@@ -5678,20 +5671,20 @@ TEST(neon_st4_q) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base); __ Mov(x28, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex)); __ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex)); __ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex)); __ Ldr(q2, MemOperand(x17, 16, PostIndex));
__ Ldr(q3, MemOperand(x17, 16, PostIndex)); __ Ldr(q3, MemOperand(x17, 16, PostIndex));
__ St4(v0.V16B(), v1.V16B(), v2.V16B(), v3.V16B(), MemOperand(x18)); __ St4(v0.V16B(), v1.V16B(), v2.V16B(), v3.V16B(), MemOperand(x28));
__ Add(x18, x18, 5); __ Add(x28, x28, 5);
__ St4(v0.V8H(), v1.V8H(), v2.V8H(), v3.V8H(), MemOperand(x18)); __ St4(v0.V8H(), v1.V8H(), v2.V8H(), v3.V8H(), MemOperand(x28));
__ Add(x18, x18, 12); __ Add(x28, x28, 12);
__ St4(v0.V4S(), v1.V4S(), v2.V4S(), v3.V4S(), MemOperand(x18)); __ St4(v0.V4S(), v1.V4S(), v2.V4S(), v3.V4S(), MemOperand(x28));
__ Add(x18, x18, 22); __ Add(x28, x28, 22);
__ St4(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(), MemOperand(x18)); __ St4(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(), MemOperand(x28));
__ Add(x18, x18, 10); __ Add(x28, x28, 10);
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex)); __ Ldr(q0, MemOperand(x19, 16, PostIndex));
...@@ -5728,19 +5721,19 @@ TEST(neon_st4_q_postindex) { ...@@ -5728,19 +5721,19 @@ TEST(neon_st4_q_postindex) {
START(); START();
__ Mov(x22, 5); __ Mov(x22, 5);
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, src_base); __ Mov(x28, src_base);
__ Ldr(q0, MemOperand(x17, 16, PostIndex)); __ Ldr(q0, MemOperand(x17, 16, PostIndex));
__ Ldr(q1, MemOperand(x17, 16, PostIndex)); __ Ldr(q1, MemOperand(x17, 16, PostIndex));
__ Ldr(q2, MemOperand(x17, 16, PostIndex)); __ Ldr(q2, MemOperand(x17, 16, PostIndex));
__ Ldr(q3, MemOperand(x17, 16, PostIndex)); __ Ldr(q3, MemOperand(x17, 16, PostIndex));
__ St4(v0.V16B(), v1.V16B(), v2.V16B(), v3.V16B(), __ St4(v0.V16B(), v1.V16B(), v2.V16B(), v3.V16B(),
MemOperand(x18, x22, PostIndex)); MemOperand(x28, x22, PostIndex));
__ St4(v0.V8H(), v1.V8H(), v2.V8H(), v3.V8H(), __ St4(v0.V8H(), v1.V8H(), v2.V8H(), v3.V8H(),
MemOperand(x18, 64, PostIndex)); MemOperand(x28, 64, PostIndex));
__ St4(v0.V4S(), v1.V4S(), v2.V4S(), v3.V4S(), __ St4(v0.V4S(), v1.V4S(), v2.V4S(), v3.V4S(),
MemOperand(x18, x22, PostIndex)); MemOperand(x28, x22, PostIndex));
__ St4(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(), MemOperand(x18)); __ St4(v0.V2D(), v1.V2D(), v2.V2D(), v3.V2D(), MemOperand(x28));
__ Mov(x19, src_base); __ Mov(x19, src_base);
__ Ldr(q0, MemOperand(x19, 16, PostIndex)); __ Ldr(q0, MemOperand(x19, 16, PostIndex));
...@@ -6065,13 +6058,13 @@ TEST(ldp_stp_offset) { ...@@ -6065,13 +6058,13 @@ TEST(ldp_stp_offset) {
START(); START();
__ Mov(x16, src_base); __ Mov(x16, src_base);
__ Mov(x17, dst_base); __ Mov(x17, dst_base);
__ Mov(x18, src_base + 24); __ Mov(x28, src_base + 24);
__ Mov(x19, dst_base + 56); __ Mov(x19, dst_base + 56);
__ Ldp(w0, w1, MemOperand(x16)); __ Ldp(w0, w1, MemOperand(x16));
__ Ldp(w2, w3, MemOperand(x16, 4)); __ Ldp(w2, w3, MemOperand(x16, 4));
__ Ldp(x4, x5, MemOperand(x16, 8)); __ Ldp(x4, x5, MemOperand(x16, 8));
__ Ldp(w6, w7, MemOperand(x18, -12)); __ Ldp(w6, w7, MemOperand(x28, -12));
__ Ldp(x8, x9, MemOperand(x18, -16)); __ Ldp(x8, x9, MemOperand(x28, -16));
__ Stp(w0, w1, MemOperand(x17)); __ Stp(w0, w1, MemOperand(x17));
__ Stp(w2, w3, MemOperand(x17, 8)); __ Stp(w2, w3, MemOperand(x17, 8));
__ Stp(x4, x5, MemOperand(x17, 16)); __ Stp(x4, x5, MemOperand(x17, 16));
...@@ -6100,7 +6093,7 @@ TEST(ldp_stp_offset) { ...@@ -6100,7 +6093,7 @@ TEST(ldp_stp_offset) {
CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, dst[6]); CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, dst[6]);
CHECK_EQUAL_64(src_base, x16); CHECK_EQUAL_64(src_base, x16);
CHECK_EQUAL_64(dst_base, x17); CHECK_EQUAL_64(dst_base, x17);
CHECK_EQUAL_64(src_base + 24, x18); CHECK_EQUAL_64(src_base + 24, x28);
CHECK_EQUAL_64(dst_base + 56, x19); CHECK_EQUAL_64(dst_base + 56, x19);
} }
...@@ -6120,13 +6113,13 @@ TEST(ldp_stp_offset_wide) { ...@@ -6120,13 +6113,13 @@ TEST(ldp_stp_offset_wide) {
START(); START();
__ Mov(x20, src_base - base_offset); __ Mov(x20, src_base - base_offset);
__ Mov(x21, dst_base - base_offset); __ Mov(x21, dst_base - base_offset);
__ Mov(x18, src_base + base_offset + 24); __ Mov(x28, src_base + base_offset + 24);
__ Mov(x19, dst_base + base_offset + 56); __ Mov(x19, dst_base + base_offset + 56);
__ Ldp(w0, w1, MemOperand(x20, base_offset)); __ Ldp(w0, w1, MemOperand(x20, base_offset));
__ Ldp(w2, w3, MemOperand(x20, base_offset + 4)); __ Ldp(w2, w3, MemOperand(x20, base_offset + 4));
__ Ldp(x4, x5, MemOperand(x20, base_offset + 8)); __ Ldp(x4, x5, MemOperand(x20, base_offset + 8));
__ Ldp(w6, w7, MemOperand(x18, -12 - base_offset)); __ Ldp(w6, w7, MemOperand(x28, -12 - base_offset));
__ Ldp(x8, x9, MemOperand(x18, -16 - base_offset)); __ Ldp(x8, x9, MemOperand(x28, -16 - base_offset));
__ Stp(w0, w1, MemOperand(x21, base_offset)); __ Stp(w0, w1, MemOperand(x21, base_offset));
__ Stp(w2, w3, MemOperand(x21, base_offset + 8)); __ Stp(w2, w3, MemOperand(x21, base_offset + 8));
__ Stp(x4, x5, MemOperand(x21, base_offset + 16)); __ Stp(x4, x5, MemOperand(x21, base_offset + 16));
...@@ -6155,7 +6148,7 @@ TEST(ldp_stp_offset_wide) { ...@@ -6155,7 +6148,7 @@ TEST(ldp_stp_offset_wide) {
CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, dst[6]); CHECK_EQUAL_64(0xFFEEDDCCBBAA9988UL, dst[6]);
CHECK_EQUAL_64(src_base - base_offset, x20); CHECK_EQUAL_64(src_base - base_offset, x20);
CHECK_EQUAL_64(dst_base - base_offset, x21); CHECK_EQUAL_64(dst_base - base_offset, x21);
CHECK_EQUAL_64(src_base + base_offset + 24, x18); CHECK_EQUAL_64(src_base + base_offset + 24, x28);
CHECK_EQUAL_64(dst_base + base_offset + 56, x19); CHECK_EQUAL_64(dst_base + base_offset + 56, x19);
} }
...@@ -6172,7 +6165,7 @@ TEST(ldp_stp_preindex) { ...@@ -6172,7 +6165,7 @@ TEST(ldp_stp_preindex) {
START(); START();
__ Mov(x16, src_base); __ Mov(x16, src_base);
__ Mov(x17, dst_base); __ Mov(x17, dst_base);
__ Mov(x18, dst_base + 16); __ Mov(x28, dst_base + 16);
__ Ldp(w0, w1, MemOperand(x16, 4, PreIndex)); __ Ldp(w0, w1, MemOperand(x16, 4, PreIndex));
__ Mov(x19, x16); __ Mov(x19, x16);
__ Ldp(w2, w3, MemOperand(x16, -4, PreIndex)); __ Ldp(w2, w3, MemOperand(x16, -4, PreIndex));
...@@ -6182,9 +6175,9 @@ TEST(ldp_stp_preindex) { ...@@ -6182,9 +6175,9 @@ TEST(ldp_stp_preindex) {
__ Ldp(x4, x5, MemOperand(x16, 8, PreIndex)); __ Ldp(x4, x5, MemOperand(x16, 8, PreIndex));
__ Mov(x21, x16); __ Mov(x21, x16);
__ Ldp(x6, x7, MemOperand(x16, -8, PreIndex)); __ Ldp(x6, x7, MemOperand(x16, -8, PreIndex));
__ Stp(x7, x6, MemOperand(x18, 8, PreIndex)); __ Stp(x7, x6, MemOperand(x28, 8, PreIndex));
__ Mov(x22, x18); __ Mov(x22, x28);
__ Stp(x5, x4, MemOperand(x18, -8, PreIndex)); __ Stp(x5, x4, MemOperand(x28, -8, PreIndex));
END(); END();
RUN(); RUN();
...@@ -6204,7 +6197,7 @@ TEST(ldp_stp_preindex) { ...@@ -6204,7 +6197,7 @@ TEST(ldp_stp_preindex) {
CHECK_EQUAL_64(0x0011223344556677UL, dst[4]); CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
CHECK_EQUAL_64(src_base, x16); CHECK_EQUAL_64(src_base, x16);
CHECK_EQUAL_64(dst_base, x17); CHECK_EQUAL_64(dst_base, x17);
CHECK_EQUAL_64(dst_base + 16, x18); CHECK_EQUAL_64(dst_base + 16, x28);
CHECK_EQUAL_64(src_base + 4, x19); CHECK_EQUAL_64(src_base + 4, x19);
CHECK_EQUAL_64(dst_base + 4, x20); CHECK_EQUAL_64(dst_base + 4, x20);
CHECK_EQUAL_64(src_base + 8, x21); CHECK_EQUAL_64(src_base + 8, x21);
...@@ -6227,7 +6220,7 @@ TEST(ldp_stp_preindex_wide) { ...@@ -6227,7 +6220,7 @@ TEST(ldp_stp_preindex_wide) {
START(); START();
__ Mov(x24, src_base - base_offset); __ Mov(x24, src_base - base_offset);
__ Mov(x25, dst_base + base_offset); __ Mov(x25, dst_base + base_offset);
__ Mov(x18, dst_base + base_offset + 16); __ Mov(x28, dst_base + base_offset + 16);
__ Ldp(w0, w1, MemOperand(x24, base_offset + 4, PreIndex)); __ Ldp(w0, w1, MemOperand(x24, base_offset + 4, PreIndex));
__ Mov(x19, x24); __ Mov(x19, x24);
__ Mov(x24, src_base - base_offset + 4); __ Mov(x24, src_base - base_offset + 4);
...@@ -6241,10 +6234,10 @@ TEST(ldp_stp_preindex_wide) { ...@@ -6241,10 +6234,10 @@ TEST(ldp_stp_preindex_wide) {
__ Mov(x21, x24); __ Mov(x21, x24);
__ Mov(x24, src_base - base_offset + 8); __ Mov(x24, src_base - base_offset + 8);
__ Ldp(x6, x7, MemOperand(x24, base_offset - 8, PreIndex)); __ Ldp(x6, x7, MemOperand(x24, base_offset - 8, PreIndex));
__ Stp(x7, x6, MemOperand(x18, 8 - base_offset, PreIndex)); __ Stp(x7, x6, MemOperand(x28, 8 - base_offset, PreIndex));
__ Mov(x22, x18); __ Mov(x22, x28);
__ Mov(x18, dst_base + base_offset + 16 + 8); __ Mov(x28, dst_base + base_offset + 16 + 8);
__ Stp(x5, x4, MemOperand(x18, -8 - base_offset, PreIndex)); __ Stp(x5, x4, MemOperand(x28, -8 - base_offset, PreIndex));
END(); END();
RUN(); RUN();
...@@ -6264,7 +6257,7 @@ TEST(ldp_stp_preindex_wide) { ...@@ -6264,7 +6257,7 @@ TEST(ldp_stp_preindex_wide) {
CHECK_EQUAL_64(0x0011223344556677UL, dst[4]); CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
CHECK_EQUAL_64(src_base, x24); CHECK_EQUAL_64(src_base, x24);
CHECK_EQUAL_64(dst_base, x25); CHECK_EQUAL_64(dst_base, x25);
CHECK_EQUAL_64(dst_base + 16, x18); CHECK_EQUAL_64(dst_base + 16, x28);
CHECK_EQUAL_64(src_base + 4, x19); CHECK_EQUAL_64(src_base + 4, x19);
CHECK_EQUAL_64(dst_base + 4, x20); CHECK_EQUAL_64(dst_base + 4, x20);
CHECK_EQUAL_64(src_base + 8, x21); CHECK_EQUAL_64(src_base + 8, x21);
...@@ -6284,7 +6277,7 @@ TEST(ldp_stp_postindex) { ...@@ -6284,7 +6277,7 @@ TEST(ldp_stp_postindex) {
START(); START();
__ Mov(x16, src_base); __ Mov(x16, src_base);
__ Mov(x17, dst_base); __ Mov(x17, dst_base);
__ Mov(x18, dst_base + 16); __ Mov(x28, dst_base + 16);
__ Ldp(w0, w1, MemOperand(x16, 4, PostIndex)); __ Ldp(w0, w1, MemOperand(x16, 4, PostIndex));
__ Mov(x19, x16); __ Mov(x19, x16);
__ Ldp(w2, w3, MemOperand(x16, -4, PostIndex)); __ Ldp(w2, w3, MemOperand(x16, -4, PostIndex));
...@@ -6294,9 +6287,9 @@ TEST(ldp_stp_postindex) { ...@@ -6294,9 +6287,9 @@ TEST(ldp_stp_postindex) {
__ Ldp(x4, x5, MemOperand(x16, 8, PostIndex)); __ Ldp(x4, x5, MemOperand(x16, 8, PostIndex));
__ Mov(x21, x16); __ Mov(x21, x16);
__ Ldp(x6, x7, MemOperand(x16, -8, PostIndex)); __ Ldp(x6, x7, MemOperand(x16, -8, PostIndex));
__ Stp(x7, x6, MemOperand(x18, 8, PostIndex)); __ Stp(x7, x6, MemOperand(x28, 8, PostIndex));
__ Mov(x22, x18); __ Mov(x22, x28);
__ Stp(x5, x4, MemOperand(x18, -8, PostIndex)); __ Stp(x5, x4, MemOperand(x28, -8, PostIndex));
END(); END();
RUN(); RUN();
...@@ -6316,7 +6309,7 @@ TEST(ldp_stp_postindex) { ...@@ -6316,7 +6309,7 @@ TEST(ldp_stp_postindex) {
CHECK_EQUAL_64(0x0011223344556677UL, dst[4]); CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
CHECK_EQUAL_64(src_base, x16); CHECK_EQUAL_64(src_base, x16);
CHECK_EQUAL_64(dst_base, x17); CHECK_EQUAL_64(dst_base, x17);
CHECK_EQUAL_64(dst_base + 16, x18); CHECK_EQUAL_64(dst_base + 16, x28);
CHECK_EQUAL_64(src_base + 4, x19); CHECK_EQUAL_64(src_base + 4, x19);
CHECK_EQUAL_64(dst_base + 4, x20); CHECK_EQUAL_64(dst_base + 4, x20);
CHECK_EQUAL_64(src_base + 8, x21); CHECK_EQUAL_64(src_base + 8, x21);
...@@ -6339,7 +6332,7 @@ TEST(ldp_stp_postindex_wide) { ...@@ -6339,7 +6332,7 @@ TEST(ldp_stp_postindex_wide) {
START(); START();
__ Mov(x24, src_base); __ Mov(x24, src_base);
__ Mov(x25, dst_base); __ Mov(x25, dst_base);
__ Mov(x18, dst_base + 16); __ Mov(x28, dst_base + 16);
__ Ldp(w0, w1, MemOperand(x24, base_offset + 4, PostIndex)); __ Ldp(w0, w1, MemOperand(x24, base_offset + 4, PostIndex));
__ Mov(x19, x24); __ Mov(x19, x24);
__ Sub(x24, x24, base_offset); __ Sub(x24, x24, base_offset);
...@@ -6353,10 +6346,10 @@ TEST(ldp_stp_postindex_wide) { ...@@ -6353,10 +6346,10 @@ TEST(ldp_stp_postindex_wide) {
__ Mov(x21, x24); __ Mov(x21, x24);
__ Sub(x24, x24, base_offset); __ Sub(x24, x24, base_offset);
__ Ldp(x6, x7, MemOperand(x24, base_offset - 8, PostIndex)); __ Ldp(x6, x7, MemOperand(x24, base_offset - 8, PostIndex));
__ Stp(x7, x6, MemOperand(x18, 8 - base_offset, PostIndex)); __ Stp(x7, x6, MemOperand(x28, 8 - base_offset, PostIndex));
__ Mov(x22, x18); __ Mov(x22, x28);
__ Add(x18, x18, base_offset); __ Add(x28, x28, base_offset);
__ Stp(x5, x4, MemOperand(x18, -8 - base_offset, PostIndex)); __ Stp(x5, x4, MemOperand(x28, -8 - base_offset, PostIndex));
END(); END();
RUN(); RUN();
...@@ -6376,7 +6369,7 @@ TEST(ldp_stp_postindex_wide) { ...@@ -6376,7 +6369,7 @@ TEST(ldp_stp_postindex_wide) {
CHECK_EQUAL_64(0x0011223344556677UL, dst[4]); CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
CHECK_EQUAL_64(src_base + base_offset, x24); CHECK_EQUAL_64(src_base + base_offset, x24);
CHECK_EQUAL_64(dst_base - base_offset, x25); CHECK_EQUAL_64(dst_base - base_offset, x25);
CHECK_EQUAL_64(dst_base - base_offset + 16, x18); CHECK_EQUAL_64(dst_base - base_offset + 16, x28);
CHECK_EQUAL_64(src_base + base_offset + 4, x19); CHECK_EQUAL_64(src_base + base_offset + 4, x19);
CHECK_EQUAL_64(dst_base - base_offset + 4, x20); CHECK_EQUAL_64(dst_base - base_offset + 4, x20);
CHECK_EQUAL_64(src_base + base_offset + 8, x21); CHECK_EQUAL_64(src_base + base_offset + 8, x21);
...@@ -6412,14 +6405,14 @@ TEST(ldur_stur) { ...@@ -6412,14 +6405,14 @@ TEST(ldur_stur) {
START(); START();
__ Mov(x17, src_base); __ Mov(x17, src_base);
__ Mov(x18, dst_base); __ Mov(x28, dst_base);
__ Mov(x19, src_base + 16); __ Mov(x19, src_base + 16);
__ Mov(x20, dst_base + 32); __ Mov(x20, dst_base + 32);
__ Mov(x21, dst_base + 40); __ Mov(x21, dst_base + 40);
__ Ldr(w0, MemOperand(x17, 1)); __ Ldr(w0, MemOperand(x17, 1));
__ Str(w0, MemOperand(x18, 2)); __ Str(w0, MemOperand(x28, 2));
__ Ldr(x1, MemOperand(x17, 3)); __ Ldr(x1, MemOperand(x17, 3));
__ Str(x1, MemOperand(x18, 9)); __ Str(x1, MemOperand(x28, 9));
__ Ldr(w2, MemOperand(x19, -9)); __ Ldr(w2, MemOperand(x19, -9));
__ Str(w2, MemOperand(x20, -5)); __ Str(w2, MemOperand(x20, -5));
__ Ldrb(w3, MemOperand(x19, -1)); __ Ldrb(w3, MemOperand(x19, -1));
...@@ -6438,7 +6431,7 @@ TEST(ldur_stur) { ...@@ -6438,7 +6431,7 @@ TEST(ldur_stur) {
CHECK_EQUAL_64(0x00000001, x3); CHECK_EQUAL_64(0x00000001, x3);
CHECK_EQUAL_64(0x0100000000000000L, dst[4]); CHECK_EQUAL_64(0x0100000000000000L, dst[4]);
CHECK_EQUAL_64(src_base, x17); CHECK_EQUAL_64(src_base, x17);
CHECK_EQUAL_64(dst_base, x18); CHECK_EQUAL_64(dst_base, x28);
CHECK_EQUAL_64(src_base + 16, x19); CHECK_EQUAL_64(src_base + 16, x19);
CHECK_EQUAL_64(dst_base + 32, x20); CHECK_EQUAL_64(dst_base + 32, x20);
} }
...@@ -6682,7 +6675,7 @@ TEST(add_sub_wide_imm) { ...@@ -6682,7 +6675,7 @@ TEST(add_sub_wide_imm) {
__ Add(w12, w0, Operand(0x12345678)); __ Add(w12, w0, Operand(0x12345678));
__ Add(w13, w1, Operand(0xFFFFFFFF)); __ Add(w13, w1, Operand(0xFFFFFFFF));
__ Add(w18, w0, Operand(kWMinInt)); __ Add(w28, w0, Operand(kWMinInt));
__ Sub(w19, w0, Operand(kWMinInt)); __ Sub(w19, w0, Operand(kWMinInt));
__ Sub(x20, x0, Operand(0x1234567890ABCDEFUL)); __ Sub(x20, x0, Operand(0x1234567890ABCDEFUL));
...@@ -6697,7 +6690,7 @@ TEST(add_sub_wide_imm) { ...@@ -6697,7 +6690,7 @@ TEST(add_sub_wide_imm) {
CHECK_EQUAL_32(0x12345678, w12); CHECK_EQUAL_32(0x12345678, w12);
CHECK_EQUAL_64(0x0, x13); CHECK_EQUAL_64(0x0, x13);
CHECK_EQUAL_32(kWMinInt, w18); CHECK_EQUAL_32(kWMinInt, w28);
CHECK_EQUAL_32(kWMinInt, w19); CHECK_EQUAL_32(kWMinInt, w19);
CHECK_EQUAL_64(-0x1234567890ABCDEFLL, x20); CHECK_EQUAL_64(-0x1234567890ABCDEFLL, x20);
...@@ -6720,7 +6713,7 @@ TEST(add_sub_shifted) { ...@@ -6720,7 +6713,7 @@ TEST(add_sub_shifted) {
__ Add(x13, x0, Operand(x1, ASR, 8)); __ Add(x13, x0, Operand(x1, ASR, 8));
__ Add(x14, x0, Operand(x2, ASR, 8)); __ Add(x14, x0, Operand(x2, ASR, 8));
__ Add(w15, w0, Operand(w1, ASR, 8)); __ Add(w15, w0, Operand(w1, ASR, 8));
__ Add(w18, w3, Operand(w1, ROR, 8)); __ Add(w28, w3, Operand(w1, ROR, 8));
__ Add(x19, x3, Operand(x1, ROR, 8)); __ Add(x19, x3, Operand(x1, ROR, 8));
__ Sub(x20, x3, Operand(x2)); __ Sub(x20, x3, Operand(x2));
...@@ -6741,7 +6734,7 @@ TEST(add_sub_shifted) { ...@@ -6741,7 +6734,7 @@ TEST(add_sub_shifted) {
CHECK_EQUAL_64(0x000123456789ABCDL, x13); CHECK_EQUAL_64(0x000123456789ABCDL, x13);
CHECK_EQUAL_64(0xFFFEDCBA98765432L, x14); CHECK_EQUAL_64(0xFFFEDCBA98765432L, x14);
CHECK_EQUAL_64(0xFF89ABCD, x15); CHECK_EQUAL_64(0xFF89ABCD, x15);
CHECK_EQUAL_64(0xEF89ABCC, x18); CHECK_EQUAL_64(0xEF89ABCC, x28);
CHECK_EQUAL_64(0xEF0123456789ABCCL, x19); CHECK_EQUAL_64(0xEF0123456789ABCCL, x19);
CHECK_EQUAL_64(0x0123456789ABCDEFL, x20); CHECK_EQUAL_64(0x0123456789ABCDEFL, x20);
...@@ -6773,7 +6766,7 @@ TEST(add_sub_extended) { ...@@ -6773,7 +6766,7 @@ TEST(add_sub_extended) {
__ Add(x15, x0, Operand(x1, SXTB, 1)); __ Add(x15, x0, Operand(x1, SXTB, 1));
__ Add(x16, x0, Operand(x1, SXTH, 2)); __ Add(x16, x0, Operand(x1, SXTH, 2));
__ Add(x17, x0, Operand(x1, SXTW, 3)); __ Add(x17, x0, Operand(x1, SXTW, 3));
__ Add(x18, x0, Operand(x2, SXTB, 0)); __ Add(x4, x0, Operand(x2, SXTB, 0));
__ Add(x19, x0, Operand(x2, SXTB, 1)); __ Add(x19, x0, Operand(x2, SXTB, 1));
__ Add(x20, x0, Operand(x2, SXTH, 2)); __ Add(x20, x0, Operand(x2, SXTH, 2));
__ Add(x21, x0, Operand(x2, SXTW, 3)); __ Add(x21, x0, Operand(x2, SXTW, 3));
...@@ -6803,7 +6796,7 @@ TEST(add_sub_extended) { ...@@ -6803,7 +6796,7 @@ TEST(add_sub_extended) {
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFDEL, x15); CHECK_EQUAL_64(0xFFFFFFFFFFFFFFDEL, x15);
CHECK_EQUAL_64(0xFFFFFFFFFFFF37BCL, x16); CHECK_EQUAL_64(0xFFFFFFFFFFFF37BCL, x16);
CHECK_EQUAL_64(0xFFFFFFFC4D5E6F78L, x17); CHECK_EQUAL_64(0xFFFFFFFC4D5E6F78L, x17);
CHECK_EQUAL_64(0x10L, x18); CHECK_EQUAL_64(0x10L, x4);
CHECK_EQUAL_64(0x20L, x19); CHECK_EQUAL_64(0x20L, x19);
CHECK_EQUAL_64(0xC840L, x20); CHECK_EQUAL_64(0xC840L, x20);
CHECK_EQUAL_64(0x3B2A19080L, x21); CHECK_EQUAL_64(0x3B2A19080L, x21);
...@@ -7430,7 +7423,7 @@ TEST(adc_sbc_shift) { ...@@ -7430,7 +7423,7 @@ TEST(adc_sbc_shift) {
// Set the C flag. // Set the C flag.
__ Cmp(w0, Operand(w0)); __ Cmp(w0, Operand(w0));
__ Adc(x18, x2, Operand(x3)); __ Adc(x28, x2, Operand(x3));
__ Adc(x19, x0, Operand(x1, LSL, 60)); __ Adc(x19, x0, Operand(x1, LSL, 60));
__ Sbc(x20, x4, Operand(x3, LSR, 4)); __ Sbc(x20, x4, Operand(x3, LSR, 4));
__ Adc(x21, x2, Operand(x3, ASR, 4)); __ Adc(x21, x2, Operand(x3, ASR, 4));
...@@ -7457,7 +7450,7 @@ TEST(adc_sbc_shift) { ...@@ -7457,7 +7450,7 @@ TEST(adc_sbc_shift) {
CHECK_EQUAL_32(0x91111110, w13); CHECK_EQUAL_32(0x91111110, w13);
CHECK_EQUAL_32(0x9A222221, w14); CHECK_EQUAL_32(0x9A222221, w14);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFLL + 1, x18); CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFLL + 1, x28);
CHECK_EQUAL_64((1LL << 60) + 1, x19); CHECK_EQUAL_64((1LL << 60) + 1, x19);
CHECK_EQUAL_64(0xF0123456789ABCDDL + 1, x20); CHECK_EQUAL_64(0xF0123456789ABCDDL + 1, x20);
CHECK_EQUAL_64(0x0111111111111110L + 1, x21); CHECK_EQUAL_64(0x0111111111111110L + 1, x21);
...@@ -7581,7 +7574,7 @@ TEST(adc_sbc_wide_imm) { ...@@ -7581,7 +7574,7 @@ TEST(adc_sbc_wide_imm) {
// Set the C flag. // Set the C flag.
__ Cmp(w0, Operand(w0)); __ Cmp(w0, Operand(w0));
__ Adc(x18, x0, Operand(0x1234567890ABCDEFUL)); __ Adc(x28, x0, Operand(0x1234567890ABCDEFUL));
__ Adc(w19, w0, Operand(0xFFFFFFFF)); __ Adc(w19, w0, Operand(0xFFFFFFFF));
__ Sbc(x20, x0, Operand(0x1234567890ABCDEFUL)); __ Sbc(x20, x0, Operand(0x1234567890ABCDEFUL));
__ Sbc(w21, w0, Operand(0xFFFFFFFF)); __ Sbc(w21, w0, Operand(0xFFFFFFFF));
...@@ -7598,7 +7591,7 @@ TEST(adc_sbc_wide_imm) { ...@@ -7598,7 +7591,7 @@ TEST(adc_sbc_wide_imm) {
CHECK_EQUAL_64(0xFFFFFFFF, x11); CHECK_EQUAL_64(0xFFFFFFFF, x11);
CHECK_EQUAL_64(0xFFFF, x12); CHECK_EQUAL_64(0xFFFF, x12);
CHECK_EQUAL_64(0x1234567890ABCDEFUL + 1, x18); CHECK_EQUAL_64(0x1234567890ABCDEFUL + 1, x28);
CHECK_EQUAL_64(0, x19); CHECK_EQUAL_64(0, x19);
CHECK_EQUAL_64(0xEDCBA9876F543211UL, x20); CHECK_EQUAL_64(0xEDCBA9876F543211UL, x20);
CHECK_EQUAL_64(1, x21); CHECK_EQUAL_64(1, x21);
...@@ -7758,7 +7751,7 @@ TEST(cmp_shift) { ...@@ -7758,7 +7751,7 @@ TEST(cmp_shift) {
SETUP(); SETUP();
START(); START();
__ Mov(x18, 0xF0000000); __ Mov(x28, 0xF0000000);
__ Mov(x19, 0xF000000010000000UL); __ Mov(x19, 0xF000000010000000UL);
__ Mov(x20, 0xF0000000F0000000UL); __ Mov(x20, 0xF0000000F0000000UL);
__ Mov(x21, 0x7800000078000000UL); __ Mov(x21, 0x7800000078000000UL);
...@@ -7778,7 +7771,7 @@ TEST(cmp_shift) { ...@@ -7778,7 +7771,7 @@ TEST(cmp_shift) {
__ Cmp(w19, Operand(w23, LSR, 3)); __ Cmp(w19, Operand(w23, LSR, 3));
__ Mrs(x2, NZCV); __ Mrs(x2, NZCV);
__ Cmp(x18, Operand(x24, LSR, 4)); __ Cmp(x28, Operand(x24, LSR, 4));
__ Mrs(x3, NZCV); __ Mrs(x3, NZCV);
__ Cmp(w20, Operand(w25, ASR, 2)); __ Cmp(w20, Operand(w25, ASR, 2));
...@@ -7993,7 +7986,7 @@ TEST(csel) { ...@@ -7993,7 +7986,7 @@ TEST(csel) {
__ Cneg(x12, x24, ne); __ Cneg(x12, x24, ne);
__ csel(w15, w24, w25, al); __ csel(w15, w24, w25, al);
__ csel(x18, x24, x25, nv); __ csel(x28, x24, x25, nv);
__ CzeroX(x24, ne); __ CzeroX(x24, ne);
__ CzeroX(x25, eq); __ CzeroX(x25, eq);
...@@ -8020,7 +8013,7 @@ TEST(csel) { ...@@ -8020,7 +8013,7 @@ TEST(csel) {
CHECK_EQUAL_64(0x0000000F, x13); CHECK_EQUAL_64(0x0000000F, x13);
CHECK_EQUAL_64(0x0000000F0000000FUL, x14); CHECK_EQUAL_64(0x0000000F0000000FUL, x14);
CHECK_EQUAL_64(0x0000000F, x15); CHECK_EQUAL_64(0x0000000F, x15);
CHECK_EQUAL_64(0x0000000F0000000FUL, x18); CHECK_EQUAL_64(0x0000000F0000000FUL, x28);
CHECK_EQUAL_64(0, x24); CHECK_EQUAL_64(0, x24);
CHECK_EQUAL_64(0x0000001F0000001FUL, x25); CHECK_EQUAL_64(0x0000001F0000001FUL, x25);
CHECK_EQUAL_64(0x0000001F0000001FUL, x26); CHECK_EQUAL_64(0x0000001F0000001FUL, x26);
...@@ -8032,11 +8025,11 @@ TEST(csel_imm) { ...@@ -8032,11 +8025,11 @@ TEST(csel_imm) {
SETUP(); SETUP();
START(); START();
__ Mov(x18, 0); __ Mov(x28, 0);
__ Mov(x19, 0x80000000); __ Mov(x19, 0x80000000);
__ Mov(x20, 0x8000000000000000UL); __ Mov(x20, 0x8000000000000000UL);
__ Cmp(x18, Operand(0)); __ Cmp(x28, Operand(0));
__ Csel(w0, w19, -2, ne); __ Csel(w0, w19, -2, ne);
__ Csel(w1, w19, -1, ne); __ Csel(w1, w19, -1, ne);
__ Csel(w2, w19, 0, ne); __ Csel(w2, w19, 0, ne);
...@@ -8098,7 +8091,7 @@ TEST(lslv) { ...@@ -8098,7 +8091,7 @@ TEST(lslv) {
__ Lsl(x16, x0, x1); __ Lsl(x16, x0, x1);
__ Lsl(x17, x0, x2); __ Lsl(x17, x0, x2);
__ Lsl(x18, x0, x3); __ Lsl(x28, x0, x3);
__ Lsl(x19, x0, x4); __ Lsl(x19, x0, x4);
__ Lsl(x20, x0, x5); __ Lsl(x20, x0, x5);
__ Lsl(x21, x0, x6); __ Lsl(x21, x0, x6);
...@@ -8116,7 +8109,7 @@ TEST(lslv) { ...@@ -8116,7 +8109,7 @@ TEST(lslv) {
CHECK_EQUAL_64(value, x0); CHECK_EQUAL_64(value, x0);
CHECK_EQUAL_64(value << (shift[0] & 63), x16); CHECK_EQUAL_64(value << (shift[0] & 63), x16);
CHECK_EQUAL_64(value << (shift[1] & 63), x17); CHECK_EQUAL_64(value << (shift[1] & 63), x17);
CHECK_EQUAL_64(value << (shift[2] & 63), x18); CHECK_EQUAL_64(value << (shift[2] & 63), x28);
CHECK_EQUAL_64(value << (shift[3] & 63), x19); CHECK_EQUAL_64(value << (shift[3] & 63), x19);
CHECK_EQUAL_64(value << (shift[4] & 63), x20); CHECK_EQUAL_64(value << (shift[4] & 63), x20);
CHECK_EQUAL_64(value << (shift[5] & 63), x21); CHECK_EQUAL_64(value << (shift[5] & 63), x21);
...@@ -8148,7 +8141,7 @@ TEST(lsrv) { ...@@ -8148,7 +8141,7 @@ TEST(lsrv) {
__ Lsr(x16, x0, x1); __ Lsr(x16, x0, x1);
__ Lsr(x17, x0, x2); __ Lsr(x17, x0, x2);
__ Lsr(x18, x0, x3); __ Lsr(x28, x0, x3);
__ Lsr(x19, x0, x4); __ Lsr(x19, x0, x4);
__ Lsr(x20, x0, x5); __ Lsr(x20, x0, x5);
__ Lsr(x21, x0, x6); __ Lsr(x21, x0, x6);
...@@ -8166,7 +8159,7 @@ TEST(lsrv) { ...@@ -8166,7 +8159,7 @@ TEST(lsrv) {
CHECK_EQUAL_64(value, x0); CHECK_EQUAL_64(value, x0);
CHECK_EQUAL_64(value >> (shift[0] & 63), x16); CHECK_EQUAL_64(value >> (shift[0] & 63), x16);
CHECK_EQUAL_64(value >> (shift[1] & 63), x17); CHECK_EQUAL_64(value >> (shift[1] & 63), x17);
CHECK_EQUAL_64(value >> (shift[2] & 63), x18); CHECK_EQUAL_64(value >> (shift[2] & 63), x28);
CHECK_EQUAL_64(value >> (shift[3] & 63), x19); CHECK_EQUAL_64(value >> (shift[3] & 63), x19);
CHECK_EQUAL_64(value >> (shift[4] & 63), x20); CHECK_EQUAL_64(value >> (shift[4] & 63), x20);
CHECK_EQUAL_64(value >> (shift[5] & 63), x21); CHECK_EQUAL_64(value >> (shift[5] & 63), x21);
...@@ -8200,7 +8193,7 @@ TEST(asrv) { ...@@ -8200,7 +8193,7 @@ TEST(asrv) {
__ Asr(x16, x0, x1); __ Asr(x16, x0, x1);
__ Asr(x17, x0, x2); __ Asr(x17, x0, x2);
__ Asr(x18, x0, x3); __ Asr(x28, x0, x3);
__ Asr(x19, x0, x4); __ Asr(x19, x0, x4);
__ Asr(x20, x0, x5); __ Asr(x20, x0, x5);
__ Asr(x21, x0, x6); __ Asr(x21, x0, x6);
...@@ -8218,7 +8211,7 @@ TEST(asrv) { ...@@ -8218,7 +8211,7 @@ TEST(asrv) {
CHECK_EQUAL_64(value, x0); CHECK_EQUAL_64(value, x0);
CHECK_EQUAL_64(value >> (shift[0] & 63), x16); CHECK_EQUAL_64(value >> (shift[0] & 63), x16);
CHECK_EQUAL_64(value >> (shift[1] & 63), x17); CHECK_EQUAL_64(value >> (shift[1] & 63), x17);
CHECK_EQUAL_64(value >> (shift[2] & 63), x18); CHECK_EQUAL_64(value >> (shift[2] & 63), x28);
CHECK_EQUAL_64(value >> (shift[3] & 63), x19); CHECK_EQUAL_64(value >> (shift[3] & 63), x19);
CHECK_EQUAL_64(value >> (shift[4] & 63), x20); CHECK_EQUAL_64(value >> (shift[4] & 63), x20);
CHECK_EQUAL_64(value >> (shift[5] & 63), x21); CHECK_EQUAL_64(value >> (shift[5] & 63), x21);
...@@ -8252,7 +8245,7 @@ TEST(rorv) { ...@@ -8252,7 +8245,7 @@ TEST(rorv) {
__ Ror(x16, x0, x1); __ Ror(x16, x0, x1);
__ Ror(x17, x0, x2); __ Ror(x17, x0, x2);
__ Ror(x18, x0, x3); __ Ror(x28, x0, x3);
__ Ror(x19, x0, x4); __ Ror(x19, x0, x4);
__ Ror(x20, x0, x5); __ Ror(x20, x0, x5);
__ Ror(x21, x0, x6); __ Ror(x21, x0, x6);
...@@ -8270,7 +8263,7 @@ TEST(rorv) { ...@@ -8270,7 +8263,7 @@ TEST(rorv) {
CHECK_EQUAL_64(value, x0); CHECK_EQUAL_64(value, x0);
CHECK_EQUAL_64(0xF0123456789ABCDEUL, x16); CHECK_EQUAL_64(0xF0123456789ABCDEUL, x16);
CHECK_EQUAL_64(0xEF0123456789ABCDUL, x17); CHECK_EQUAL_64(0xEF0123456789ABCDUL, x17);
CHECK_EQUAL_64(0xDEF0123456789ABCUL, x18); CHECK_EQUAL_64(0xDEF0123456789ABCUL, x28);
CHECK_EQUAL_64(0xCDEF0123456789ABUL, x19); CHECK_EQUAL_64(0xCDEF0123456789ABUL, x19);
CHECK_EQUAL_64(0xABCDEF0123456789UL, x20); CHECK_EQUAL_64(0xABCDEF0123456789UL, x20);
CHECK_EQUAL_64(0x789ABCDEF0123456UL, x21); CHECK_EQUAL_64(0x789ABCDEF0123456UL, x21);
...@@ -8338,7 +8331,7 @@ TEST(sbfm) { ...@@ -8338,7 +8331,7 @@ TEST(sbfm) {
__ sbfm(w17, w2, 24, 15); __ sbfm(w17, w2, 24, 15);
// Aliases. // Aliases.
__ Asr(x18, x1, 32); __ Asr(x3, x1, 32);
__ Asr(x19, x2, 32); __ Asr(x19, x2, 32);
__ Sbfiz(x20, x1, 8, 16); __ Sbfiz(x20, x1, 8, 16);
__ Sbfiz(x21, x2, 8, 16); __ Sbfiz(x21, x2, 8, 16);
...@@ -8364,7 +8357,7 @@ TEST(sbfm) { ...@@ -8364,7 +8357,7 @@ TEST(sbfm) {
CHECK_EQUAL_32(0x54, w16); CHECK_EQUAL_32(0x54, w16);
CHECK_EQUAL_32(0x00321000, w17); CHECK_EQUAL_32(0x00321000, w17);
CHECK_EQUAL_64(0x01234567L, x18); CHECK_EQUAL_64(0x01234567L, x3);
CHECK_EQUAL_64(0xFFFFFFFFFEDCBA98L, x19); CHECK_EQUAL_64(0xFFFFFFFFFEDCBA98L, x19);
CHECK_EQUAL_64(0xFFFFFFFFFFCDEF00L, x20); CHECK_EQUAL_64(0xFFFFFFFFFFCDEF00L, x20);
CHECK_EQUAL_64(0x321000L, x21); CHECK_EQUAL_64(0x321000L, x21);
...@@ -8403,7 +8396,7 @@ TEST(ubfm) { ...@@ -8403,7 +8396,7 @@ TEST(ubfm) {
__ Lsl(x15, x1, 63); __ Lsl(x15, x1, 63);
__ Lsl(x16, x1, 0); __ Lsl(x16, x1, 0);
__ Lsr(x17, x1, 32); __ Lsr(x17, x1, 32);
__ Ubfiz(x18, x1, 8, 16); __ Ubfiz(x3, x1, 8, 16);
__ Ubfx(x19, x1, 8, 16); __ Ubfx(x19, x1, 8, 16);
__ Uxtb(x20, x1); __ Uxtb(x20, x1);
__ Uxth(x21, x1); __ Uxth(x21, x1);
...@@ -8425,7 +8418,7 @@ TEST(ubfm) { ...@@ -8425,7 +8418,7 @@ TEST(ubfm) {
CHECK_EQUAL_64(0x8000000000000000L, x15); CHECK_EQUAL_64(0x8000000000000000L, x15);
CHECK_EQUAL_64(0x0123456789ABCDEFL, x16); CHECK_EQUAL_64(0x0123456789ABCDEFL, x16);
CHECK_EQUAL_64(0x01234567L, x17); CHECK_EQUAL_64(0x01234567L, x17);
CHECK_EQUAL_64(0xCDEF00L, x18); CHECK_EQUAL_64(0xCDEF00L, x3);
CHECK_EQUAL_64(0xABCDL, x19); CHECK_EQUAL_64(0xABCDL, x19);
CHECK_EQUAL_64(0xEFL, x20); CHECK_EQUAL_64(0xEFL, x20);
CHECK_EQUAL_64(0xCDEFL, x21); CHECK_EQUAL_64(0xCDEFL, x21);
...@@ -9359,8 +9352,8 @@ TEST(fcmp) { ...@@ -9359,8 +9352,8 @@ TEST(fcmp) {
__ Fmov(s8, 0.0); __ Fmov(s8, 0.0);
__ Fmov(s9, 0.5); __ Fmov(s9, 0.5);
__ Mov(w18, 0x7F800001); // Single precision NaN. __ Mov(w19, 0x7F800001); // Single precision NaN.
__ Fmov(s18, w18); __ Fmov(s18, w19);
__ Fcmp(s8, s8); __ Fcmp(s8, s8);
__ Mrs(x0, NZCV); __ Mrs(x0, NZCV);
...@@ -10190,6 +10183,9 @@ TEST(fcvtas) { ...@@ -10190,6 +10183,9 @@ TEST(fcvtas) {
INIT_V8(); INIT_V8();
SETUP(); SETUP();
int64_t scratch = 0;
uintptr_t scratch_base = reinterpret_cast<uintptr_t>(&scratch);
START(); START();
__ Fmov(s0, 1.0); __ Fmov(s0, 1.0);
__ Fmov(s1, 1.1); __ Fmov(s1, 1.1);
...@@ -10207,8 +10203,8 @@ TEST(fcvtas) { ...@@ -10207,8 +10203,8 @@ TEST(fcvtas) {
__ Fmov(d13, kFP64NegativeInfinity); __ Fmov(d13, kFP64NegativeInfinity);
__ Fmov(d14, kWMaxInt - 1); __ Fmov(d14, kWMaxInt - 1);
__ Fmov(d15, kWMinInt + 1); __ Fmov(d15, kWMinInt + 1);
__ Fmov(s16, 2.5);
__ Fmov(s17, 1.1); __ Fmov(s17, 1.1);
__ Fmov(s18, 2.5);
__ Fmov(s19, -2.5); __ Fmov(s19, -2.5);
__ Fmov(s20, kFP32PositiveInfinity); __ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity); __ Fmov(s21, kFP32NegativeInfinity);
...@@ -10239,7 +10235,6 @@ TEST(fcvtas) { ...@@ -10239,7 +10235,6 @@ TEST(fcvtas) {
__ Fcvtas(w14, d14); __ Fcvtas(w14, d14);
__ Fcvtas(w15, d15); __ Fcvtas(w15, d15);
__ Fcvtas(x17, s17); __ Fcvtas(x17, s17);
__ Fcvtas(x18, s18);
__ Fcvtas(x19, s19); __ Fcvtas(x19, s19);
__ Fcvtas(x20, s20); __ Fcvtas(x20, s20);
__ Fcvtas(x21, s21); __ Fcvtas(x21, s21);
...@@ -10250,6 +10245,12 @@ TEST(fcvtas) { ...@@ -10250,6 +10245,12 @@ TEST(fcvtas) {
__ Fcvtas(x26, d26); __ Fcvtas(x26, d26);
__ Fcvtas(x27, d27); __ Fcvtas(x27, d27);
__ Fcvtas(x28, d28); __ Fcvtas(x28, d28);
// Save results to the scratch memory, for those that don't fit in registers.
__ Mov(x30, scratch_base);
__ Fcvtas(x29, s16);
__ Str(x29, MemOperand(x30));
__ Fcvtas(x29, d29); __ Fcvtas(x29, d29);
__ Fcvtas(x30, d30); __ Fcvtas(x30, d30);
END(); END();
...@@ -10272,8 +10273,8 @@ TEST(fcvtas) { ...@@ -10272,8 +10273,8 @@ TEST(fcvtas) {
CHECK_EQUAL_64(0x80000000, x13); CHECK_EQUAL_64(0x80000000, x13);
CHECK_EQUAL_64(0x7FFFFFFE, x14); CHECK_EQUAL_64(0x7FFFFFFE, x14);
CHECK_EQUAL_64(0x80000001, x15); CHECK_EQUAL_64(0x80000001, x15);
CHECK_EQUAL_64(3, scratch);
CHECK_EQUAL_64(1, x17); CHECK_EQUAL_64(1, x17);
CHECK_EQUAL_64(3, x18);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFDUL, x19); CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFDUL, x19);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x20); CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x8000000000000000UL, x21); CHECK_EQUAL_64(0x8000000000000000UL, x21);
...@@ -10339,7 +10340,7 @@ TEST(fcvtau) { ...@@ -10339,7 +10340,7 @@ TEST(fcvtau) {
__ Fcvtau(w15, d15); __ Fcvtau(w15, d15);
__ Fcvtau(x16, s16); __ Fcvtau(x16, s16);
__ Fcvtau(x17, s17); __ Fcvtau(x17, s17);
__ Fcvtau(x18, s18); __ Fcvtau(x7, s18);
__ Fcvtau(x19, s19); __ Fcvtau(x19, s19);
__ Fcvtau(x20, s20); __ Fcvtau(x20, s20);
__ Fcvtau(x21, s21); __ Fcvtau(x21, s21);
...@@ -10371,7 +10372,7 @@ TEST(fcvtau) { ...@@ -10371,7 +10372,7 @@ TEST(fcvtau) {
CHECK_EQUAL_64(0xFFFFFFFE, x14); CHECK_EQUAL_64(0xFFFFFFFE, x14);
CHECK_EQUAL_64(1, x16); CHECK_EQUAL_64(1, x16);
CHECK_EQUAL_64(1, x17); CHECK_EQUAL_64(1, x17);
CHECK_EQUAL_64(3, x18); CHECK_EQUAL_64(3, x7);
CHECK_EQUAL_64(0, x19); CHECK_EQUAL_64(0, x19);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x20); CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0, x21); CHECK_EQUAL_64(0, x21);
...@@ -10389,6 +10390,9 @@ TEST(fcvtms) { ...@@ -10389,6 +10390,9 @@ TEST(fcvtms) {
INIT_V8(); INIT_V8();
SETUP(); SETUP();
int64_t scratch = 0;
uintptr_t scratch_base = reinterpret_cast<uintptr_t>(&scratch);
START(); START();
__ Fmov(s0, 1.0); __ Fmov(s0, 1.0);
__ Fmov(s1, 1.1); __ Fmov(s1, 1.1);
...@@ -10406,8 +10410,8 @@ TEST(fcvtms) { ...@@ -10406,8 +10410,8 @@ TEST(fcvtms) {
__ Fmov(d13, kFP64NegativeInfinity); __ Fmov(d13, kFP64NegativeInfinity);
__ Fmov(d14, kWMaxInt - 1); __ Fmov(d14, kWMaxInt - 1);
__ Fmov(d15, kWMinInt + 1); __ Fmov(d15, kWMinInt + 1);
__ Fmov(s16, 1.5);
__ Fmov(s17, 1.1); __ Fmov(s17, 1.1);
__ Fmov(s18, 1.5);
__ Fmov(s19, -1.5); __ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity); __ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity); __ Fmov(s21, kFP32NegativeInfinity);
...@@ -10438,7 +10442,6 @@ TEST(fcvtms) { ...@@ -10438,7 +10442,6 @@ TEST(fcvtms) {
__ Fcvtms(w14, d14); __ Fcvtms(w14, d14);
__ Fcvtms(w15, d15); __ Fcvtms(w15, d15);
__ Fcvtms(x17, s17); __ Fcvtms(x17, s17);
__ Fcvtms(x18, s18);
__ Fcvtms(x19, s19); __ Fcvtms(x19, s19);
__ Fcvtms(x20, s20); __ Fcvtms(x20, s20);
__ Fcvtms(x21, s21); __ Fcvtms(x21, s21);
...@@ -10449,6 +10452,12 @@ TEST(fcvtms) { ...@@ -10449,6 +10452,12 @@ TEST(fcvtms) {
__ Fcvtms(x26, d26); __ Fcvtms(x26, d26);
__ Fcvtms(x27, d27); __ Fcvtms(x27, d27);
__ Fcvtms(x28, d28); __ Fcvtms(x28, d28);
// Save results to the scratch memory, for those that don't fit in registers.
__ Mov(x30, scratch_base);
__ Fcvtms(x29, s16);
__ Str(x29, MemOperand(x30));
__ Fcvtms(x29, d29); __ Fcvtms(x29, d29);
__ Fcvtms(x30, d30); __ Fcvtms(x30, d30);
END(); END();
...@@ -10471,8 +10480,8 @@ TEST(fcvtms) { ...@@ -10471,8 +10480,8 @@ TEST(fcvtms) {
CHECK_EQUAL_64(0x80000000, x13); CHECK_EQUAL_64(0x80000000, x13);
CHECK_EQUAL_64(0x7FFFFFFE, x14); CHECK_EQUAL_64(0x7FFFFFFE, x14);
CHECK_EQUAL_64(0x80000001, x15); CHECK_EQUAL_64(0x80000001, x15);
CHECK_EQUAL_64(1, scratch);
CHECK_EQUAL_64(1, x17); CHECK_EQUAL_64(1, x17);
CHECK_EQUAL_64(1, x18);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFEUL, x19); CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFEUL, x19);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x20); CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x8000000000000000UL, x21); CHECK_EQUAL_64(0x8000000000000000UL, x21);
...@@ -10491,6 +10500,9 @@ TEST(fcvtmu) { ...@@ -10491,6 +10500,9 @@ TEST(fcvtmu) {
INIT_V8(); INIT_V8();
SETUP(); SETUP();
int64_t scratch = 0;
uintptr_t scratch_base = reinterpret_cast<uintptr_t>(&scratch);
START(); START();
__ Fmov(s0, 1.0); __ Fmov(s0, 1.0);
__ Fmov(s1, 1.1); __ Fmov(s1, 1.1);
...@@ -10508,8 +10520,8 @@ TEST(fcvtmu) { ...@@ -10508,8 +10520,8 @@ TEST(fcvtmu) {
__ Fmov(d13, kFP64NegativeInfinity); __ Fmov(d13, kFP64NegativeInfinity);
__ Fmov(d14, kWMaxInt - 1); __ Fmov(d14, kWMaxInt - 1);
__ Fmov(d15, kWMinInt + 1); __ Fmov(d15, kWMinInt + 1);
__ Fmov(s16, 1.5);
__ Fmov(s17, 1.1); __ Fmov(s17, 1.1);
__ Fmov(s18, 1.5);
__ Fmov(s19, -1.5); __ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity); __ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity); __ Fmov(s21, kFP32NegativeInfinity);
...@@ -10538,8 +10550,8 @@ TEST(fcvtmu) { ...@@ -10538,8 +10550,8 @@ TEST(fcvtmu) {
__ Fcvtmu(w12, d12); __ Fcvtmu(w12, d12);
__ Fcvtmu(w13, d13); __ Fcvtmu(w13, d13);
__ Fcvtmu(w14, d14); __ Fcvtmu(w14, d14);
__ Fcvtmu(w15, d15);
__ Fcvtmu(x17, s17); __ Fcvtmu(x17, s17);
__ Fcvtmu(x18, s18);
__ Fcvtmu(x19, s19); __ Fcvtmu(x19, s19);
__ Fcvtmu(x20, s20); __ Fcvtmu(x20, s20);
__ Fcvtmu(x21, s21); __ Fcvtmu(x21, s21);
...@@ -10550,6 +10562,12 @@ TEST(fcvtmu) { ...@@ -10550,6 +10562,12 @@ TEST(fcvtmu) {
__ Fcvtmu(x26, d26); __ Fcvtmu(x26, d26);
__ Fcvtmu(x27, d27); __ Fcvtmu(x27, d27);
__ Fcvtmu(x28, d28); __ Fcvtmu(x28, d28);
// Save results to the scratch memory, for those that don't fit in registers.
__ Mov(x30, scratch_base);
__ Fcvtmu(x29, s16);
__ Str(x29, MemOperand(x30));
__ Fcvtmu(x29, d29); __ Fcvtmu(x29, d29);
__ Fcvtmu(x30, d30); __ Fcvtmu(x30, d30);
END(); END();
...@@ -10571,8 +10589,9 @@ TEST(fcvtmu) { ...@@ -10571,8 +10589,9 @@ TEST(fcvtmu) {
CHECK_EQUAL_64(0xFFFFFFFF, x12); CHECK_EQUAL_64(0xFFFFFFFF, x12);
CHECK_EQUAL_64(0, x13); CHECK_EQUAL_64(0, x13);
CHECK_EQUAL_64(0x7FFFFFFE, x14); CHECK_EQUAL_64(0x7FFFFFFE, x14);
CHECK_EQUAL_64(0x0, x15);
CHECK_EQUAL_64(1, scratch);
CHECK_EQUAL_64(1, x17); CHECK_EQUAL_64(1, x17);
CHECK_EQUAL_64(1, x18);
CHECK_EQUAL_64(0x0UL, x19); CHECK_EQUAL_64(0x0UL, x19);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x20); CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x0UL, x21); CHECK_EQUAL_64(0x0UL, x21);
...@@ -10591,6 +10610,9 @@ TEST(fcvtns) { ...@@ -10591,6 +10610,9 @@ TEST(fcvtns) {
INIT_V8(); INIT_V8();
SETUP(); SETUP();
int64_t scratch = 0;
uintptr_t scratch_base = reinterpret_cast<uintptr_t>(&scratch);
START(); START();
__ Fmov(s0, 1.0); __ Fmov(s0, 1.0);
__ Fmov(s1, 1.1); __ Fmov(s1, 1.1);
...@@ -10608,8 +10630,8 @@ TEST(fcvtns) { ...@@ -10608,8 +10630,8 @@ TEST(fcvtns) {
__ Fmov(d13, kFP64NegativeInfinity); __ Fmov(d13, kFP64NegativeInfinity);
__ Fmov(d14, kWMaxInt - 1); __ Fmov(d14, kWMaxInt - 1);
__ Fmov(d15, kWMinInt + 1); __ Fmov(d15, kWMinInt + 1);
__ Fmov(s16, 1.5);
__ Fmov(s17, 1.1); __ Fmov(s17, 1.1);
__ Fmov(s18, 1.5);
__ Fmov(s19, -1.5); __ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity); __ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity); __ Fmov(s21, kFP32NegativeInfinity);
...@@ -10640,7 +10662,6 @@ TEST(fcvtns) { ...@@ -10640,7 +10662,6 @@ TEST(fcvtns) {
__ Fcvtns(w14, d14); __ Fcvtns(w14, d14);
__ Fcvtns(w15, d15); __ Fcvtns(w15, d15);
__ Fcvtns(x17, s17); __ Fcvtns(x17, s17);
__ Fcvtns(x18, s18);
__ Fcvtns(x19, s19); __ Fcvtns(x19, s19);
__ Fcvtns(x20, s20); __ Fcvtns(x20, s20);
__ Fcvtns(x21, s21); __ Fcvtns(x21, s21);
...@@ -10651,6 +10672,12 @@ TEST(fcvtns) { ...@@ -10651,6 +10672,12 @@ TEST(fcvtns) {
__ Fcvtns(x26, d26); __ Fcvtns(x26, d26);
__ Fcvtns(x27, d27); __ Fcvtns(x27, d27);
// __ Fcvtns(x28, d28); // __ Fcvtns(x28, d28);
// Save results to the scratch memory, for those that don't fit in registers.
__ Mov(x30, scratch_base);
__ Fcvtns(x29, s16);
__ Str(x29, MemOperand(x30));
__ Fcvtns(x29, d29); __ Fcvtns(x29, d29);
__ Fcvtns(x30, d30); __ Fcvtns(x30, d30);
END(); END();
...@@ -10673,8 +10700,8 @@ TEST(fcvtns) { ...@@ -10673,8 +10700,8 @@ TEST(fcvtns) {
CHECK_EQUAL_64(0x80000000, x13); CHECK_EQUAL_64(0x80000000, x13);
CHECK_EQUAL_64(0x7FFFFFFE, x14); CHECK_EQUAL_64(0x7FFFFFFE, x14);
CHECK_EQUAL_64(0x80000001, x15); CHECK_EQUAL_64(0x80000001, x15);
CHECK_EQUAL_64(2, scratch);
CHECK_EQUAL_64(1, x17); CHECK_EQUAL_64(1, x17);
CHECK_EQUAL_64(2, x18);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFEUL, x19); CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFEUL, x19);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x20); CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x8000000000000000UL, x21); CHECK_EQUAL_64(0x8000000000000000UL, x21);
...@@ -10701,6 +10728,7 @@ TEST(fcvtnu) { ...@@ -10701,6 +10728,7 @@ TEST(fcvtnu) {
__ Fmov(s4, kFP32PositiveInfinity); __ Fmov(s4, kFP32PositiveInfinity);
__ Fmov(s5, kFP32NegativeInfinity); __ Fmov(s5, kFP32NegativeInfinity);
__ Fmov(s6, 0xFFFFFF00); // Largest float < UINT32_MAX. __ Fmov(s6, 0xFFFFFF00); // Largest float < UINT32_MAX.
__ Fmov(s7, 1.5);
__ Fmov(d8, 1.0); __ Fmov(d8, 1.0);
__ Fmov(d9, 1.1); __ Fmov(d9, 1.1);
__ Fmov(d10, 1.5); __ Fmov(d10, 1.5);
...@@ -10710,7 +10738,6 @@ TEST(fcvtnu) { ...@@ -10710,7 +10738,6 @@ TEST(fcvtnu) {
__ Fmov(d14, 0xFFFFFFFE); __ Fmov(d14, 0xFFFFFFFE);
__ Fmov(s16, 1.0); __ Fmov(s16, 1.0);
__ Fmov(s17, 1.1); __ Fmov(s17, 1.1);
__ Fmov(s18, 1.5);
__ Fmov(s19, -1.5); __ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity); __ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity); __ Fmov(s21, kFP32NegativeInfinity);
...@@ -10730,6 +10757,7 @@ TEST(fcvtnu) { ...@@ -10730,6 +10757,7 @@ TEST(fcvtnu) {
__ Fcvtnu(w4, s4); __ Fcvtnu(w4, s4);
__ Fcvtnu(w5, s5); __ Fcvtnu(w5, s5);
__ Fcvtnu(w6, s6); __ Fcvtnu(w6, s6);
__ Fcvtnu(x7, s7);
__ Fcvtnu(w8, d8); __ Fcvtnu(w8, d8);
__ Fcvtnu(w9, d9); __ Fcvtnu(w9, d9);
__ Fcvtnu(w10, d10); __ Fcvtnu(w10, d10);
...@@ -10740,7 +10768,6 @@ TEST(fcvtnu) { ...@@ -10740,7 +10768,6 @@ TEST(fcvtnu) {
__ Fcvtnu(w15, d15); __ Fcvtnu(w15, d15);
__ Fcvtnu(x16, s16); __ Fcvtnu(x16, s16);
__ Fcvtnu(x17, s17); __ Fcvtnu(x17, s17);
__ Fcvtnu(x18, s18);
__ Fcvtnu(x19, s19); __ Fcvtnu(x19, s19);
__ Fcvtnu(x20, s20); __ Fcvtnu(x20, s20);
__ Fcvtnu(x21, s21); __ Fcvtnu(x21, s21);
...@@ -10763,6 +10790,7 @@ TEST(fcvtnu) { ...@@ -10763,6 +10790,7 @@ TEST(fcvtnu) {
CHECK_EQUAL_64(0xFFFFFFFF, x4); CHECK_EQUAL_64(0xFFFFFFFF, x4);
CHECK_EQUAL_64(0, x5); CHECK_EQUAL_64(0, x5);
CHECK_EQUAL_64(0xFFFFFF00, x6); CHECK_EQUAL_64(0xFFFFFF00, x6);
CHECK_EQUAL_64(2, x7);
CHECK_EQUAL_64(1, x8); CHECK_EQUAL_64(1, x8);
CHECK_EQUAL_64(1, x9); CHECK_EQUAL_64(1, x9);
CHECK_EQUAL_64(2, x10); CHECK_EQUAL_64(2, x10);
...@@ -10772,7 +10800,6 @@ TEST(fcvtnu) { ...@@ -10772,7 +10800,6 @@ TEST(fcvtnu) {
CHECK_EQUAL_64(0xFFFFFFFE, x14); CHECK_EQUAL_64(0xFFFFFFFE, x14);
CHECK_EQUAL_64(1, x16); CHECK_EQUAL_64(1, x16);
CHECK_EQUAL_64(1, x17); CHECK_EQUAL_64(1, x17);
CHECK_EQUAL_64(2, x18);
CHECK_EQUAL_64(0, x19); CHECK_EQUAL_64(0, x19);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x20); CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0, x21); CHECK_EQUAL_64(0, x21);
...@@ -10790,6 +10817,9 @@ TEST(fcvtzs) { ...@@ -10790,6 +10817,9 @@ TEST(fcvtzs) {
INIT_V8(); INIT_V8();
SETUP(); SETUP();
int64_t scratch = 0;
uintptr_t scratch_base = reinterpret_cast<uintptr_t>(&scratch);
START(); START();
__ Fmov(s0, 1.0); __ Fmov(s0, 1.0);
__ Fmov(s1, 1.1); __ Fmov(s1, 1.1);
...@@ -10807,8 +10837,8 @@ TEST(fcvtzs) { ...@@ -10807,8 +10837,8 @@ TEST(fcvtzs) {
__ Fmov(d13, kFP64NegativeInfinity); __ Fmov(d13, kFP64NegativeInfinity);
__ Fmov(d14, kWMaxInt - 1); __ Fmov(d14, kWMaxInt - 1);
__ Fmov(d15, kWMinInt + 1); __ Fmov(d15, kWMinInt + 1);
__ Fmov(s16, 1.5);
__ Fmov(s17, 1.1); __ Fmov(s17, 1.1);
__ Fmov(s18, 1.5);
__ Fmov(s19, -1.5); __ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity); __ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity); __ Fmov(s21, kFP32NegativeInfinity);
...@@ -10839,7 +10869,6 @@ TEST(fcvtzs) { ...@@ -10839,7 +10869,6 @@ TEST(fcvtzs) {
__ Fcvtzs(w14, d14); __ Fcvtzs(w14, d14);
__ Fcvtzs(w15, d15); __ Fcvtzs(w15, d15);
__ Fcvtzs(x17, s17); __ Fcvtzs(x17, s17);
__ Fcvtzs(x18, s18);
__ Fcvtzs(x19, s19); __ Fcvtzs(x19, s19);
__ Fcvtzs(x20, s20); __ Fcvtzs(x20, s20);
__ Fcvtzs(x21, s21); __ Fcvtzs(x21, s21);
...@@ -10850,6 +10879,12 @@ TEST(fcvtzs) { ...@@ -10850,6 +10879,12 @@ TEST(fcvtzs) {
__ Fcvtzs(x26, d26); __ Fcvtzs(x26, d26);
__ Fcvtzs(x27, d27); __ Fcvtzs(x27, d27);
__ Fcvtzs(x28, d28); __ Fcvtzs(x28, d28);
// Save results to the scratch memory, for those that don't fit in registers.
__ Mov(x30, scratch_base);
__ Fcvtmu(x29, s16);
__ Str(x29, MemOperand(x30));
__ Fcvtzs(x29, d29); __ Fcvtzs(x29, d29);
__ Fcvtzs(x30, d30); __ Fcvtzs(x30, d30);
END(); END();
...@@ -10872,8 +10907,8 @@ TEST(fcvtzs) { ...@@ -10872,8 +10907,8 @@ TEST(fcvtzs) {
CHECK_EQUAL_64(0x80000000, x13); CHECK_EQUAL_64(0x80000000, x13);
CHECK_EQUAL_64(0x7FFFFFFE, x14); CHECK_EQUAL_64(0x7FFFFFFE, x14);
CHECK_EQUAL_64(0x80000001, x15); CHECK_EQUAL_64(0x80000001, x15);
CHECK_EQUAL_64(1, scratch);
CHECK_EQUAL_64(1, x17); CHECK_EQUAL_64(1, x17);
CHECK_EQUAL_64(1, x18);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x19); CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x19);
CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x20); CHECK_EQUAL_64(0x7FFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x8000000000000000UL, x21); CHECK_EQUAL_64(0x8000000000000000UL, x21);
...@@ -10892,6 +10927,9 @@ TEST(fcvtzu) { ...@@ -10892,6 +10927,9 @@ TEST(fcvtzu) {
INIT_V8(); INIT_V8();
SETUP(); SETUP();
int64_t scratch = 0;
uintptr_t scratch_base = reinterpret_cast<uintptr_t>(&scratch);
START(); START();
__ Fmov(s0, 1.0); __ Fmov(s0, 1.0);
__ Fmov(s1, 1.1); __ Fmov(s1, 1.1);
...@@ -10909,8 +10947,8 @@ TEST(fcvtzu) { ...@@ -10909,8 +10947,8 @@ TEST(fcvtzu) {
__ Fmov(d13, kFP64NegativeInfinity); __ Fmov(d13, kFP64NegativeInfinity);
__ Fmov(d14, kWMaxInt - 1); __ Fmov(d14, kWMaxInt - 1);
__ Fmov(d15, kWMinInt + 1); __ Fmov(d15, kWMinInt + 1);
__ Fmov(s16, 1.5);
__ Fmov(s17, 1.1); __ Fmov(s17, 1.1);
__ Fmov(s18, 1.5);
__ Fmov(s19, -1.5); __ Fmov(s19, -1.5);
__ Fmov(s20, kFP32PositiveInfinity); __ Fmov(s20, kFP32PositiveInfinity);
__ Fmov(s21, kFP32NegativeInfinity); __ Fmov(s21, kFP32NegativeInfinity);
...@@ -10939,8 +10977,8 @@ TEST(fcvtzu) { ...@@ -10939,8 +10977,8 @@ TEST(fcvtzu) {
__ Fcvtzu(w12, d12); __ Fcvtzu(w12, d12);
__ Fcvtzu(w13, d13); __ Fcvtzu(w13, d13);
__ Fcvtzu(w14, d14); __ Fcvtzu(w14, d14);
__ Fcvtzu(w15, d15);
__ Fcvtzu(x17, s17); __ Fcvtzu(x17, s17);
__ Fcvtzu(x18, s18);
__ Fcvtzu(x19, s19); __ Fcvtzu(x19, s19);
__ Fcvtzu(x20, s20); __ Fcvtzu(x20, s20);
__ Fcvtzu(x21, s21); __ Fcvtzu(x21, s21);
...@@ -10951,6 +10989,12 @@ TEST(fcvtzu) { ...@@ -10951,6 +10989,12 @@ TEST(fcvtzu) {
__ Fcvtzu(x26, d26); __ Fcvtzu(x26, d26);
__ Fcvtzu(x27, d27); __ Fcvtzu(x27, d27);
__ Fcvtzu(x28, d28); __ Fcvtzu(x28, d28);
// Save results to the scratch memory, for those that don't fit in registers.
__ Mov(x30, scratch_base);
__ Fcvtzu(x29, s16);
__ Str(x29, MemOperand(x30));
__ Fcvtzu(x29, d29); __ Fcvtzu(x29, d29);
__ Fcvtzu(x30, d30); __ Fcvtzu(x30, d30);
END(); END();
...@@ -10972,8 +11016,9 @@ TEST(fcvtzu) { ...@@ -10972,8 +11016,9 @@ TEST(fcvtzu) {
CHECK_EQUAL_64(0xFFFFFFFF, x12); CHECK_EQUAL_64(0xFFFFFFFF, x12);
CHECK_EQUAL_64(0, x13); CHECK_EQUAL_64(0, x13);
CHECK_EQUAL_64(0x7FFFFFFE, x14); CHECK_EQUAL_64(0x7FFFFFFE, x14);
CHECK_EQUAL_64(0x0, x15);
CHECK_EQUAL_64(1, scratch);
CHECK_EQUAL_64(1, x17); CHECK_EQUAL_64(1, x17);
CHECK_EQUAL_64(1, x18);
CHECK_EQUAL_64(0x0UL, x19); CHECK_EQUAL_64(0x0UL, x19);
CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x20); CHECK_EQUAL_64(0xFFFFFFFFFFFFFFFFUL, x20);
CHECK_EQUAL_64(0x0UL, x21); CHECK_EQUAL_64(0x0UL, x21);
...@@ -11439,6 +11484,8 @@ TEST(zero_dest) { ...@@ -11439,6 +11484,8 @@ TEST(zero_dest) {
__ Mov(x0, 0); __ Mov(x0, 0);
__ Mov(x1, literal_base); __ Mov(x1, literal_base);
for (int i = 2; i < x30.code(); i++) { for (int i = 2; i < x30.code(); i++) {
// Skip x18, the platform register.
if (i == 18) continue;
__ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1); __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
} }
before.Dump(&masm); before.Dump(&masm);
...@@ -11503,6 +11550,8 @@ TEST(zero_dest_setflags) { ...@@ -11503,6 +11550,8 @@ TEST(zero_dest_setflags) {
__ Mov(x0, 0); __ Mov(x0, 0);
__ Mov(x1, literal_base); __ Mov(x1, literal_base);
for (int i = 2; i < 30; i++) { for (int i = 2; i < 30; i++) {
// Skip x18, the platform register.
if (i == 18) continue;
__ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1); __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
} }
before.Dump(&masm); before.Dump(&masm);
...@@ -11858,10 +11907,14 @@ static void PushPopSimpleHelper(int reg_count, int reg_size, ...@@ -11858,10 +11907,14 @@ static void PushPopSimpleHelper(int reg_count, int reg_size,
// Registers in the TmpList can be used by the macro assembler for debug code // Registers in the TmpList can be used by the macro assembler for debug code
// (for example in 'Pop'), so we can't use them here. // (for example in 'Pop'), so we can't use them here.
static RegList const allowed = ~(masm.TmpList()->list()); // x18 is reserved for the platform register.
// Disallow x31 / xzr, to ensure this list has an even number of elements, to
// ensure alignment.
RegList allowed = ~(masm.TmpList()->list() | x18.bit() | x31.bit());
if (reg_count == kPushPopMaxRegCount) { if (reg_count == kPushPopMaxRegCount) {
reg_count = CountSetBits(allowed, kNumberOfRegisters); reg_count = CountSetBits(allowed, kNumberOfRegisters);
} }
DCHECK_EQ(reg_count % 2, 0);
// Work out which registers to use, based on reg_size. // Work out which registers to use, based on reg_size.
auto r = CreateRegisterArray<Register, kNumberOfRegisters>(); auto r = CreateRegisterArray<Register, kNumberOfRegisters>();
auto x = CreateRegisterArray<Register, kNumberOfRegisters>(); auto x = CreateRegisterArray<Register, kNumberOfRegisters>();
...@@ -12245,7 +12298,7 @@ TEST(push_pop) { ...@@ -12245,7 +12298,7 @@ TEST(push_pop) {
__ Claim(2); __ Claim(2);
__ Push(w2, w2, w1, w1); __ Push(w2, w2, w1, w1);
__ Push(x3, x3); __ Push(x3, x3);
__ Pop(w18, w19, w20, w21); __ Pop(w30, w19, w20, w21);
__ Pop(x22, x23); __ Pop(x22, x23);
__ Claim(2); __ Claim(2);
...@@ -12259,8 +12312,10 @@ TEST(push_pop) { ...@@ -12259,8 +12312,10 @@ TEST(push_pop) {
__ Claim(2); __ Claim(2);
__ PushXRegList(0); __ PushXRegList(0);
__ PopXRegList(0); __ PopXRegList(0);
__ PushXRegList(0xFFFFFFFF); // Don't push/pop x18 (platform register) or xzr (for alignment)
__ PopXRegList(0xFFFFFFFF); RegList all_regs = 0xFFFFFFFF & ~(x18.bit() | x31.bit());
__ PushXRegList(all_regs);
__ PopXRegList(all_regs);
__ Drop(12); __ Drop(12);
END(); END();
...@@ -12287,7 +12342,7 @@ TEST(push_pop) { ...@@ -12287,7 +12342,7 @@ TEST(push_pop) {
CHECK_EQUAL_32(0x33333333U, w15); CHECK_EQUAL_32(0x33333333U, w15);
CHECK_EQUAL_32(0x22222222U, w14); CHECK_EQUAL_32(0x22222222U, w14);
CHECK_EQUAL_32(0x11111111U, w18); CHECK_EQUAL_32(0x11111111U, w30);
CHECK_EQUAL_32(0x11111111U, w19); CHECK_EQUAL_32(0x11111111U, w19);
CHECK_EQUAL_32(0x11111111U, w20); CHECK_EQUAL_32(0x11111111U, w20);
CHECK_EQUAL_32(0x11111111U, w21); CHECK_EQUAL_32(0x11111111U, w21);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment