summaryrefslogtreecommitdiff
path: root/test/CodeGen/ARM/vector-DAGCombine.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/ARM/vector-DAGCombine.ll')
-rw-r--r--test/CodeGen/ARM/vector-DAGCombine.ll12
1 files changed, 6 insertions, 6 deletions
diff --git a/test/CodeGen/ARM/vector-DAGCombine.ll b/test/CodeGen/ARM/vector-DAGCombine.ll
index 793934e746..759da2235e 100644
--- a/test/CodeGen/ARM/vector-DAGCombine.ll
+++ b/test/CodeGen/ARM/vector-DAGCombine.ll
@@ -29,7 +29,7 @@ entry:
; Radar 8407927: Make sure that VMOVRRD gets optimized away when the result is
; converted back to be used as a vector type.
-; CHECK: test_vmovrrd_combine
+; CHECK-LABEL: test_vmovrrd_combine:
define <4 x i32> @test_vmovrrd_combine() nounwind {
entry:
br i1 undef, label %bb1, label %bb2
@@ -136,7 +136,7 @@ define i16 @foldBuildVectors() {
; Test that we are generating vrev and vext for reverse shuffles of v8i16
; shuffles.
-; CHECK: reverse_v8i16
+; CHECK-LABEL: reverse_v8i16:
define void @reverse_v8i16(<8 x i16>* %loadaddr, <8 x i16>* %storeaddr) {
%v0 = load <8 x i16>* %loadaddr
; CHECK: vrev64.16
@@ -149,7 +149,7 @@ define void @reverse_v8i16(<8 x i16>* %loadaddr, <8 x i16>* %storeaddr) {
; Test that we are generating vrev and vext for reverse shuffles of v16i8
; shuffles.
-; CHECK: reverse_v16i8
+; CHECK-LABEL: reverse_v16i8:
define void @reverse_v16i8(<16 x i8>* %loadaddr, <16 x i8>* %storeaddr) {
%v0 = load <16 x i8>* %loadaddr
; CHECK: vrev64.8
@@ -165,7 +165,7 @@ define void @reverse_v16i8(<16 x i8>* %loadaddr, <16 x i8>* %storeaddr) {
; vldr cannot handle unaligned loads.
; Fall back to vld1.32, which can, instead of using the general purpose loads
; followed by a costly sequence of instructions to build the vector register.
-; CHECK: t3
+; CHECK-LABEL: t3:
; CHECK: vld1.32 {[[REG:d[0-9]+]][0]}
; CHECK: vld1.32 {[[REG]][1]}
; CHECK: vmull.u8 q{{[0-9]+}}, [[REG]], [[REG]]
@@ -188,7 +188,7 @@ declare <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8>, <8 x i8>)
; Check that (insert_vector_elt (load)) => (vector_load).
; Thus, check that scalar_to_vector do not interfer with that.
define <8 x i16> @t4(i8* nocapture %sp0) {
-; CHECK: t4
+; CHECK-LABEL: t4:
; CHECK: vld1.32 {{{d[0-9]+}}[0]}, [r0]
entry:
%pix_sp0.0.cast = bitcast i8* %sp0 to i32*
@@ -202,7 +202,7 @@ entry:
; Make sure vector load is used for all three loads.
; Lowering to build vector was breaking the single use property of the load of
; %pix_sp0.0.copyload.
-; CHECK: t5
+; CHECK-LABEL: t5:
; CHECK: vld1.32 {[[REG1:d[0-9]+]][1]}, [r0]
; CHECK: vorr [[REG2:d[0-9]+]], [[REG1]], [[REG1]]
; CHECK: vld1.32 {[[REG1]][0]}, [r1]