summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorJim Grosbach <grosbach@apple.com>2014-04-17 20:47:31 +0000
committerJim Grosbach <grosbach@apple.com>2014-04-17 20:47:31 +0000
commit4af58f145d7dececbd866c7f8e942cbfc5801d90 (patch)
treee3c96ded8d61284ea5beabb736feb6ce79204524 /test
parent34ac90c024cbed8110389decc4144a9385f9bcb9 (diff)
downloadllvm-4af58f145d7dececbd866c7f8e942cbfc5801d90.tar.gz
llvm-4af58f145d7dececbd866c7f8e942cbfc5801d90.tar.bz2
llvm-4af58f145d7dececbd866c7f8e942cbfc5801d90.tar.xz
ARM64: [su]xtw use W regs as inputs, not X regs.
Update the SXT[BHW]/UXTW instruction aliases and the shifted reg addressing mode handling. PR19455 and rdar://16650642 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@206495 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/ARM64/aapcs.ll4
-rw-r--r--test/CodeGen/ARM64/addr-type-promotion.ll4
-rw-r--r--test/CodeGen/ARM64/arith.ll2
-rw-r--r--test/CodeGen/ARM64/atomic.ll24
-rw-r--r--test/CodeGen/ARM64/coalesce-ext.ll2
-rw-r--r--test/CodeGen/ARM64/extend.ll2
-rw-r--r--test/CodeGen/ARM64/shifted-sext.ll6
-rw-r--r--test/CodeGen/ARM64/trunc-store.ll4
-rw-r--r--test/MC/ARM64/aliases.s26
-rw-r--r--test/MC/ARM64/memory.s8
10 files changed, 37 insertions, 45 deletions
diff --git a/test/CodeGen/ARM64/aapcs.ll b/test/CodeGen/ARM64/aapcs.ll
index e4889b7716..3998aaad2d 100644
--- a/test/CodeGen/ARM64/aapcs.ll
+++ b/test/CodeGen/ARM64/aapcs.ll
@@ -57,7 +57,7 @@ define void @test_extension(i1 %bool, i8 %char, i16 %short, i32 %int) {
%ext_char = sext i8 %char to i64
store volatile i64 %ext_char, i64* @var64
-; CHECK: sxtb [[EXT:x[0-9]+]], x1
+; CHECK: sxtb [[EXT:x[0-9]+]], w1
; CHECK: str [[EXT]], [{{x[0-9]+}}, :lo12:var64]
%ext_short = zext i16 %short to i64
@@ -67,7 +67,7 @@ define void @test_extension(i1 %bool, i8 %char, i16 %short, i32 %int) {
%ext_int = zext i32 %int to i64
store volatile i64 %ext_int, i64* @var64
-; CHECK: uxtw [[EXT:x[0-9]+]], x3
+; CHECK: uxtw [[EXT:x[0-9]+]], w3
; CHECK: str [[EXT]], [{{x[0-9]+}}, :lo12:var64]
ret void
diff --git a/test/CodeGen/ARM64/addr-type-promotion.ll b/test/CodeGen/ARM64/addr-type-promotion.ll
index 0677603473..1a3ca8bd5b 100644
--- a/test/CodeGen/ARM64/addr-type-promotion.ll
+++ b/test/CodeGen/ARM64/addr-type-promotion.ll
@@ -11,8 +11,8 @@ define zeroext i8 @fullGtU(i32 %i1, i32 %i2) {
; CHECK: adrp [[PAGE:x[0-9]+]], _block@GOTPAGE
; CHECK: ldr [[ADDR:x[0-9]+]], {{\[}}[[PAGE]], _block@GOTPAGEOFF]
; CHECK-NEXT: ldr [[BLOCKBASE:x[0-9]+]], {{\[}}[[ADDR]]]
-; CHECK-NEXT: ldrb [[BLOCKVAL1:w[0-9]+]], {{\[}}[[BLOCKBASE]], x0, sxtw]
-; CHECK-NEXT: ldrb [[BLOCKVAL2:w[0-9]+]], {{\[}}[[BLOCKBASE]], x1, sxtw]
+; CHECK-NEXT: ldrb [[BLOCKVAL1:w[0-9]+]], {{\[}}[[BLOCKBASE]], w0, sxtw]
+; CHECK-NEXT: ldrb [[BLOCKVAL2:w[0-9]+]], {{\[}}[[BLOCKBASE]], w1, sxtw]
; CHECK-NEXT cmp [[BLOCKVAL1]], [[BLOCKVAL2]]
; CHECK-NEXT b.ne
; Next BB
diff --git a/test/CodeGen/ARM64/arith.ll b/test/CodeGen/ARM64/arith.ll
index b6ff0da3b2..db528f7601 100644
--- a/test/CodeGen/ARM64/arith.ll
+++ b/test/CodeGen/ARM64/arith.ll
@@ -155,7 +155,7 @@ entry:
define i64 @t17(i16 %a, i64 %x) nounwind ssp {
entry:
; CHECK-LABEL: t17:
-; CHECK: sxth [[REG:x[0-9]+]], x0
+; CHECK: sxth [[REG:x[0-9]+]], w0
; CHECK: sub x0, xzr, [[REG]], lsl #32
; CHECK: ret
%tmp16 = sext i16 %a to i64
diff --git a/test/CodeGen/ARM64/atomic.ll b/test/CodeGen/ARM64/atomic.ll
index e8d957a812..dbdc95c428 100644
--- a/test/CodeGen/ARM64/atomic.ll
+++ b/test/CodeGen/ARM64/atomic.ll
@@ -118,8 +118,7 @@ define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) {
%ptr_regoff = getelementptr i8* %p, i32 %off32
%val_regoff = load atomic i8* %ptr_regoff unordered, align 1
%tot1 = add i8 %val_unsigned, %val_regoff
- ; FIXME: syntax is incorrect: "sxtw" should not be able to go with an x-reg.
-; CHECK: ldrb {{w[0-9]+}}, [x0, x1, sxtw]
+; CHECK: ldrb {{w[0-9]+}}, [x0, w1, sxtw]
%ptr_unscaled = getelementptr i8* %p, i32 -256
%val_unscaled = load atomic i8* %ptr_unscaled monotonic, align 1
@@ -144,8 +143,7 @@ define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) {
%ptr_regoff = getelementptr i16* %p, i32 %off32
%val_regoff = load atomic i16* %ptr_regoff unordered, align 2
%tot1 = add i16 %val_unsigned, %val_regoff
- ; FIXME: syntax is incorrect: "sxtw" should not be able to go with an x-reg.
-; CHECK: ldrh {{w[0-9]+}}, [x0, x1, sxtw #1]
+; CHECK: ldrh {{w[0-9]+}}, [x0, w1, sxtw #1]
%ptr_unscaled = getelementptr i16* %p, i32 -128
%val_unscaled = load atomic i16* %ptr_unscaled monotonic, align 2
@@ -170,8 +168,7 @@ define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) {
%ptr_regoff = getelementptr i32* %p, i32 %off32
%val_regoff = load atomic i32* %ptr_regoff unordered, align 4
%tot1 = add i32 %val_unsigned, %val_regoff
- ; FIXME: syntax is incorrect: "sxtw" should not be able to go with an x-reg.
-; CHECK: ldr {{w[0-9]+}}, [x0, x1, sxtw #2]
+; CHECK: ldr {{w[0-9]+}}, [x0, w1, sxtw #2]
%ptr_unscaled = getelementptr i32* %p, i32 -64
%val_unscaled = load atomic i32* %ptr_unscaled monotonic, align 4
@@ -196,8 +193,7 @@ define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) {
%ptr_regoff = getelementptr i64* %p, i32 %off32
%val_regoff = load atomic i64* %ptr_regoff unordered, align 8
%tot1 = add i64 %val_unsigned, %val_regoff
- ; FIXME: syntax is incorrect: "sxtw" should not be able to go with an x-reg.
-; CHECK: ldr {{x[0-9]+}}, [x0, x1, sxtw #3]
+; CHECK: ldr {{x[0-9]+}}, [x0, w1, sxtw #3]
%ptr_unscaled = getelementptr i64* %p, i32 -32
%val_unscaled = load atomic i64* %ptr_unscaled monotonic, align 8
@@ -229,8 +225,7 @@ define void @atomic_store_relaxed_8(i8* %p, i32 %off32, i8 %val) {
%ptr_regoff = getelementptr i8* %p, i32 %off32
store atomic i8 %val, i8* %ptr_regoff unordered, align 1
- ; FIXME: syntax is incorrect: "sxtw" should not be able to go with an x-reg.
-; CHECK: strb {{w[0-9]+}}, [x0, x1, sxtw]
+; CHECK: strb {{w[0-9]+}}, [x0, w1, sxtw]
%ptr_unscaled = getelementptr i8* %p, i32 -256
store atomic i8 %val, i8* %ptr_unscaled monotonic, align 1
@@ -252,8 +247,7 @@ define void @atomic_store_relaxed_16(i16* %p, i32 %off32, i16 %val) {
%ptr_regoff = getelementptr i16* %p, i32 %off32
store atomic i16 %val, i16* %ptr_regoff unordered, align 2
- ; FIXME: syntax is incorrect: "sxtw" should not be able to go with an x-reg.
-; CHECK: strh {{w[0-9]+}}, [x0, x1, sxtw #1]
+; CHECK: strh {{w[0-9]+}}, [x0, w1, sxtw #1]
%ptr_unscaled = getelementptr i16* %p, i32 -128
store atomic i16 %val, i16* %ptr_unscaled monotonic, align 2
@@ -275,8 +269,7 @@ define void @atomic_store_relaxed_32(i32* %p, i32 %off32, i32 %val) {
%ptr_regoff = getelementptr i32* %p, i32 %off32
store atomic i32 %val, i32* %ptr_regoff unordered, align 4
- ; FIXME: syntax is incorrect: "sxtw" should not be able to go with an x-reg.
-; CHECK: str {{w[0-9]+}}, [x0, x1, sxtw #2]
+; CHECK: str {{w[0-9]+}}, [x0, w1, sxtw #2]
%ptr_unscaled = getelementptr i32* %p, i32 -64
store atomic i32 %val, i32* %ptr_unscaled monotonic, align 4
@@ -298,8 +291,7 @@ define void @atomic_store_relaxed_64(i64* %p, i32 %off32, i64 %val) {
%ptr_regoff = getelementptr i64* %p, i32 %off32
store atomic i64 %val, i64* %ptr_regoff unordered, align 8
- ; FIXME: syntax is incorrect: "sxtw" should not be able to go with an x-reg.
-; CHECK: str {{x[0-9]+}}, [x0, x1, sxtw #3]
+; CHECK: str {{x[0-9]+}}, [x0, w1, sxtw #3]
%ptr_unscaled = getelementptr i64* %p, i32 -32
store atomic i64 %val, i64* %ptr_unscaled monotonic, align 8
diff --git a/test/CodeGen/ARM64/coalesce-ext.ll b/test/CodeGen/ARM64/coalesce-ext.ll
index 9e8d08e055..9420bf3bb5 100644
--- a/test/CodeGen/ARM64/coalesce-ext.ll
+++ b/test/CodeGen/ARM64/coalesce-ext.ll
@@ -7,7 +7,7 @@ define i32 @test1sext(i64 %A, i64 %B, i32* %P, i64 *%P2) nounwind {
%D = trunc i64 %C to i32
%E = shl i64 %C, 32
%F = ashr i64 %E, 32
- ; CHECK: sxtw x[[EXT:[0-9]+]], x[[SUM]]
+ ; CHECK: sxtw x[[EXT:[0-9]+]], w[[SUM]]
store volatile i64 %F, i64 *%P2
; CHECK: str x[[EXT]]
store volatile i32 %D, i32* %P
diff --git a/test/CodeGen/ARM64/extend.ll b/test/CodeGen/ARM64/extend.ll
index 4d20543671..afcaca2c49 100644
--- a/test/CodeGen/ARM64/extend.ll
+++ b/test/CodeGen/ARM64/extend.ll
@@ -5,7 +5,7 @@ define i64 @foo(i32 %i) {
; CHECK: foo
; CHECK: adrp x[[REG:[0-9]+]], _array@GOTPAGE
; CHECK: ldr x[[REG1:[0-9]+]], [x[[REG]], _array@GOTPAGEOFF]
-; CHECK: ldrsw x0, [x[[REG1]], x0, sxtw #2]
+; CHECK: ldrsw x0, [x[[REG1]], w0, sxtw #2]
; CHECK: ret
%idxprom = sext i32 %i to i64
%arrayidx = getelementptr inbounds [0 x i32]* @array, i64 0, i64 %idxprom
diff --git a/test/CodeGen/ARM64/shifted-sext.ll b/test/CodeGen/ARM64/shifted-sext.ll
index e553be5fcf..a039e8ca12 100644
--- a/test/CodeGen/ARM64/shifted-sext.ll
+++ b/test/CodeGen/ARM64/shifted-sext.ll
@@ -133,7 +133,7 @@ define i64 @extendedRightShiftcharToint64By8(i8 signext %a) nounwind readnone ss
entry:
; CHECK-LABEL: extendedRightShiftcharToint64By8:
; CHECK: add w[[REG:[0-9]+]], w0, #1
-; CHECK: sxtb x[[REG]], x[[REG]]
+; CHECK: sxtb x[[REG]], w[[REG]]
; CHECK: asr x0, x[[REG]], #8
%inc = add i8 %a, 1
%conv = sext i8 %inc to i64
@@ -223,7 +223,7 @@ define i64 @extendedRightShiftshortToint64By16(i16 signext %a) nounwind readnone
entry:
; CHECK-LABEL: extendedRightShiftshortToint64By16:
; CHECK: add w[[REG:[0-9]+]], w0, #1
-; CHECK: sxth x[[REG]], x[[REG]]
+; CHECK: sxth x[[REG]], w[[REG]]
; CHECK: asr x0, x[[REG]], #16
%inc = add i16 %a, 1
%conv = sext i16 %inc to i64
@@ -268,7 +268,7 @@ define i64 @extendedRightShiftintToint64By32(i32 %a) nounwind readnone ssp {
entry:
; CHECK-LABEL: extendedRightShiftintToint64By32:
; CHECK: add w[[REG:[0-9]+]], w0, #1
-; CHECK: sxtw x[[REG]], x[[REG]]
+; CHECK: sxtw x[[REG]], w[[REG]]
; CHECK: asr x0, x[[REG]], #32
%inc = add nsw i32 %a, 1
%conv = sext i32 %inc to i64
diff --git a/test/CodeGen/ARM64/trunc-store.ll b/test/CodeGen/ARM64/trunc-store.ll
index e65f5b56fe..cf15247e15 100644
--- a/test/CodeGen/ARM64/trunc-store.ll
+++ b/test/CodeGen/ARM64/trunc-store.ll
@@ -22,7 +22,7 @@ define void @fct32(i32 %arg, i64 %var) {
; w0 is %arg
; CHECK-NEXT: sub w[[OFFSETREGNUM:[0-9]+]], w0, #1
; w1 is %var truncated
-; CHECK-NEXT: str w1, {{\[}}[[GLOBALADDR]], x[[OFFSETREGNUM]], sxtw #2]
+; CHECK-NEXT: str w1, {{\[}}[[GLOBALADDR]], w[[OFFSETREGNUM]], sxtw #2]
; CHECK-NEXT: ret
bb:
%.pre37 = load i32** @zptr32, align 8
@@ -42,7 +42,7 @@ define void @fct16(i32 %arg, i64 %var) {
; w0 is %arg
; CHECK-NEXT: sub w[[OFFSETREGNUM:[0-9]+]], w0, #1
; w1 is %var truncated
-; CHECK-NEXT: strh w1, {{\[}}[[GLOBALADDR]], x[[OFFSETREGNUM]], sxtw #1]
+; CHECK-NEXT: strh w1, {{\[}}[[GLOBALADDR]], w[[OFFSETREGNUM]], sxtw #1]
; CHECK-NEXT: ret
bb:
%.pre37 = load i16** @zptr16, align 8
diff --git a/test/MC/ARM64/aliases.s b/test/MC/ARM64/aliases.s
index f0d1d5d76a..4fe4b93d4f 100644
--- a/test/MC/ARM64/aliases.s
+++ b/test/MC/ARM64/aliases.s
@@ -239,19 +239,19 @@ foo:
; CHECK: uxtb w1, w2
; CHECK: uxth w1, w2
- sxtb x1, x2
- sxth x1, x2
- sxtw x1, x2
- uxtb x1, x2
- uxth x1, x2
- uxtw x1, x2
-
-; CHECK: sxtb x1, x2
-; CHECK: sxth x1, x2
-; CHECK: sxtw x1, x2
-; CHECK: uxtb x1, x2
-; CHECK: uxth x1, x2
-; CHECK: uxtw x1, x2
+ sxtb x1, w2
+ sxth x1, w2
+ sxtw x1, w2
+ uxtb x1, w2
+ uxth x1, w2
+ uxtw x1, w2
+
+; CHECK: sxtb x1, w2
+; CHECK: sxth x1, w2
+; CHECK: sxtw x1, w2
+; CHECK: uxtb x1, w2
+; CHECK: uxth x1, w2
+; CHECK: uxtw x1, w2
;-----------------------------------------------------------------------------
; Negate with carry
diff --git a/test/MC/ARM64/memory.s b/test/MC/ARM64/memory.s
index 47188c6b67..579859660f 100644
--- a/test/MC/ARM64/memory.s
+++ b/test/MC/ARM64/memory.s
@@ -426,14 +426,14 @@ foo:
; CHECK: ldr q1, [x1, x2, lsl #4] ; encoding: [0x21,0x78,0xe2,0x3c]
str d1, [sp, x3]
- str d1, [sp, x3, uxtw #3]
+ str d1, [sp, w3, uxtw #3]
str q1, [sp, x3]
- str q1, [sp, x3, uxtw #4]
+ str q1, [sp, w3, uxtw #4]
; CHECK: str d1, [sp, x3] ; encoding: [0xe1,0x6b,0x23,0xfc]
-; CHECK: str d1, [sp, x3, uxtw #3] ; encoding: [0xe1,0x5b,0x23,0xfc]
+; CHECK: str d1, [sp, w3, uxtw #3] ; encoding: [0xe1,0x5b,0x23,0xfc]
; CHECK: str q1, [sp, x3] ; encoding: [0xe1,0x6b,0xa3,0x3c]
-; CHECK: str q1, [sp, x3, uxtw #4] ; encoding: [0xe1,0x5b,0xa3,0x3c]
+; CHECK: str q1, [sp, w3, uxtw #4] ; encoding: [0xe1,0x5b,0xa3,0x3c]
;-----------------------------------------------------------------------------
; Load literal