summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorBob Wilson <bob.wilson@apple.com>2009-10-06 21:16:19 +0000
committerBob Wilson <bob.wilson@apple.com>2009-10-06 21:16:19 +0000
commitec1d81c3898753a78c934cd4aae50032d9483e53 (patch)
treee9abc4eb80bcdba31ffec85e04eb10d320d4bf67 /test
parenta3e8bf8412481139827b61f83bdd84637af800b8 (diff)
downloadllvm-ec1d81c3898753a78c934cd4aae50032d9483e53.tar.gz
llvm-ec1d81c3898753a78c934cd4aae50032d9483e53.tar.bz2
llvm-ec1d81c3898753a78c934cd4aae50032d9483e53.tar.xz
Update NEON struct names to match llvm-gcc changes.
(This is not required for correctness but might help with sanity.) git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@83415 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/ARM/vld2.ll40
-rw-r--r--test/CodeGen/ARM/vld3.ll40
-rw-r--r--test/CodeGen/ARM/vld4.ll40
-rw-r--r--test/CodeGen/ARM/vldlane.ll144
-rw-r--r--test/CodeGen/ARM/vtbl.ll66
-rw-r--r--test/CodeGen/ARM/vtrn.ll10
-rw-r--r--test/CodeGen/ARM/vuzp.ll10
-rw-r--r--test/CodeGen/ARM/vzip.ll10
8 files changed, 165 insertions, 195 deletions
diff --git a/test/CodeGen/ARM/vld2.ll b/test/CodeGen/ARM/vld2.ll
index 36e54bdf47..ec9111c39f 100644
--- a/test/CodeGen/ARM/vld2.ll
+++ b/test/CodeGen/ARM/vld2.ll
@@ -1,16 +1,16 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-%struct.__builtin_neon_v8qi2 = type { <8 x i8>, <8 x i8> }
-%struct.__builtin_neon_v4hi2 = type { <4 x i16>, <4 x i16> }
-%struct.__builtin_neon_v2si2 = type { <2 x i32>, <2 x i32> }
-%struct.__builtin_neon_v2sf2 = type { <2 x float>, <2 x float> }
+%struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> }
+%struct.__neon_int16x4x2_t = type { <4 x i16>, <4 x i16> }
+%struct.__neon_int32x2x2_t = type { <2 x i32>, <2 x i32> }
+%struct.__neon_float32x2x2_t = type { <2 x float>, <2 x float> }
define <8 x i8> @vld2i8(i8* %A) nounwind {
;CHECK: vld2i8:
;CHECK: vld2.8
- %tmp1 = call %struct.__builtin_neon_v8qi2 @llvm.arm.neon.vld2.v8i8(i8* %A)
- %tmp2 = extractvalue %struct.__builtin_neon_v8qi2 %tmp1, 0
- %tmp3 = extractvalue %struct.__builtin_neon_v8qi2 %tmp1, 1
+ %tmp1 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2.v8i8(i8* %A)
+ %tmp2 = extractvalue %struct.__neon_int8x8x2_t %tmp1, 0
+ %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp1, 1
%tmp4 = add <8 x i8> %tmp2, %tmp3
ret <8 x i8> %tmp4
}
@@ -18,9 +18,9 @@ define <8 x i8> @vld2i8(i8* %A) nounwind {
define <4 x i16> @vld2i16(i16* %A) nounwind {
;CHECK: vld2i16:
;CHECK: vld2.16
- %tmp1 = call %struct.__builtin_neon_v4hi2 @llvm.arm.neon.vld2.v4i16(i16* %A)
- %tmp2 = extractvalue %struct.__builtin_neon_v4hi2 %tmp1, 0
- %tmp3 = extractvalue %struct.__builtin_neon_v4hi2 %tmp1, 1
+ %tmp1 = call %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2.v4i16(i16* %A)
+ %tmp2 = extractvalue %struct.__neon_int16x4x2_t %tmp1, 0
+ %tmp3 = extractvalue %struct.__neon_int16x4x2_t %tmp1, 1
%tmp4 = add <4 x i16> %tmp2, %tmp3
ret <4 x i16> %tmp4
}
@@ -28,9 +28,9 @@ define <4 x i16> @vld2i16(i16* %A) nounwind {
define <2 x i32> @vld2i32(i32* %A) nounwind {
;CHECK: vld2i32:
;CHECK: vld2.32
- %tmp1 = call %struct.__builtin_neon_v2si2 @llvm.arm.neon.vld2.v2i32(i32* %A)
- %tmp2 = extractvalue %struct.__builtin_neon_v2si2 %tmp1, 0
- %tmp3 = extractvalue %struct.__builtin_neon_v2si2 %tmp1, 1
+ %tmp1 = call %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2.v2i32(i32* %A)
+ %tmp2 = extractvalue %struct.__neon_int32x2x2_t %tmp1, 0
+ %tmp3 = extractvalue %struct.__neon_int32x2x2_t %tmp1, 1
%tmp4 = add <2 x i32> %tmp2, %tmp3
ret <2 x i32> %tmp4
}
@@ -38,14 +38,14 @@ define <2 x i32> @vld2i32(i32* %A) nounwind {
define <2 x float> @vld2f(float* %A) nounwind {
;CHECK: vld2f:
;CHECK: vld2.32
- %tmp1 = call %struct.__builtin_neon_v2sf2 @llvm.arm.neon.vld2.v2f32(float* %A)
- %tmp2 = extractvalue %struct.__builtin_neon_v2sf2 %tmp1, 0
- %tmp3 = extractvalue %struct.__builtin_neon_v2sf2 %tmp1, 1
+ %tmp1 = call %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2.v2f32(float* %A)
+ %tmp2 = extractvalue %struct.__neon_float32x2x2_t %tmp1, 0
+ %tmp3 = extractvalue %struct.__neon_float32x2x2_t %tmp1, 1
%tmp4 = add <2 x float> %tmp2, %tmp3
ret <2 x float> %tmp4
}
-declare %struct.__builtin_neon_v8qi2 @llvm.arm.neon.vld2.v8i8(i8*) nounwind readonly
-declare %struct.__builtin_neon_v4hi2 @llvm.arm.neon.vld2.v4i16(i8*) nounwind readonly
-declare %struct.__builtin_neon_v2si2 @llvm.arm.neon.vld2.v2i32(i8*) nounwind readonly
-declare %struct.__builtin_neon_v2sf2 @llvm.arm.neon.vld2.v2f32(i8*) nounwind readonly
+declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2.v8i8(i8*) nounwind readonly
+declare %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2.v4i16(i8*) nounwind readonly
+declare %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2.v2i32(i8*) nounwind readonly
+declare %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2.v2f32(i8*) nounwind readonly
diff --git a/test/CodeGen/ARM/vld3.ll b/test/CodeGen/ARM/vld3.ll
index aa38bb0ec6..b03d74ab5d 100644
--- a/test/CodeGen/ARM/vld3.ll
+++ b/test/CodeGen/ARM/vld3.ll
@@ -1,16 +1,16 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-%struct.__builtin_neon_v8qi3 = type { <8 x i8>, <8 x i8>, <8 x i8> }
-%struct.__builtin_neon_v4hi3 = type { <4 x i16>, <4 x i16>, <4 x i16> }
-%struct.__builtin_neon_v2si3 = type { <2 x i32>, <2 x i32>, <2 x i32> }
-%struct.__builtin_neon_v2sf3 = type { <2 x float>, <2 x float>, <2 x float> }
+%struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> }
+%struct.__neon_int16x4x3_t = type { <4 x i16>, <4 x i16>, <4 x i16> }
+%struct.__neon_int32x2x3_t = type { <2 x i32>, <2 x i32>, <2 x i32> }
+%struct.__neon_float32x2x3_t = type { <2 x float>, <2 x float>, <2 x float> }
define <8 x i8> @vld3i8(i8* %A) nounwind {
;CHECK: vld3i8:
;CHECK: vld3.8
- %tmp1 = call %struct.__builtin_neon_v8qi3 @llvm.arm.neon.vld3.v8i8(i8* %A)
- %tmp2 = extractvalue %struct.__builtin_neon_v8qi3 %tmp1, 0
- %tmp3 = extractvalue %struct.__builtin_neon_v8qi3 %tmp1, 2
+ %tmp1 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A)
+ %tmp2 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 0
+ %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 2
%tmp4 = add <8 x i8> %tmp2, %tmp3
ret <8 x i8> %tmp4
}
@@ -18,9 +18,9 @@ define <8 x i8> @vld3i8(i8* %A) nounwind {
define <4 x i16> @vld3i16(i16* %A) nounwind {
;CHECK: vld3i16:
;CHECK: vld3.16
- %tmp1 = call %struct.__builtin_neon_v4hi3 @llvm.arm.neon.vld3.v4i16(i16* %A)
- %tmp2 = extractvalue %struct.__builtin_neon_v4hi3 %tmp1, 0
- %tmp3 = extractvalue %struct.__builtin_neon_v4hi3 %tmp1, 2
+ %tmp1 = call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3.v4i16(i16* %A)
+ %tmp2 = extractvalue %struct.__neon_int16x4x3_t %tmp1, 0
+ %tmp3 = extractvalue %struct.__neon_int16x4x3_t %tmp1, 2
%tmp4 = add <4 x i16> %tmp2, %tmp3
ret <4 x i16> %tmp4
}
@@ -28,9 +28,9 @@ define <4 x i16> @vld3i16(i16* %A) nounwind {
define <2 x i32> @vld3i32(i32* %A) nounwind {
;CHECK: vld3i32:
;CHECK: vld3.32
- %tmp1 = call %struct.__builtin_neon_v2si3 @llvm.arm.neon.vld3.v2i32(i32* %A)
- %tmp2 = extractvalue %struct.__builtin_neon_v2si3 %tmp1, 0
- %tmp3 = extractvalue %struct.__builtin_neon_v2si3 %tmp1, 2
+ %tmp1 = call %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3.v2i32(i32* %A)
+ %tmp2 = extractvalue %struct.__neon_int32x2x3_t %tmp1, 0
+ %tmp3 = extractvalue %struct.__neon_int32x2x3_t %tmp1, 2
%tmp4 = add <2 x i32> %tmp2, %tmp3
ret <2 x i32> %tmp4
}
@@ -38,14 +38,14 @@ define <2 x i32> @vld3i32(i32* %A) nounwind {
define <2 x float> @vld3f(float* %A) nounwind {
;CHECK: vld3f:
;CHECK: vld3.32
- %tmp1 = call %struct.__builtin_neon_v2sf3 @llvm.arm.neon.vld3.v2f32(float* %A)
- %tmp2 = extractvalue %struct.__builtin_neon_v2sf3 %tmp1, 0
- %tmp3 = extractvalue %struct.__builtin_neon_v2sf3 %tmp1, 2
+ %tmp1 = call %struct.__neon_float32x2x3_t @llvm.arm.neon.vld3.v2f32(float* %A)
+ %tmp2 = extractvalue %struct.__neon_float32x2x3_t %tmp1, 0
+ %tmp3 = extractvalue %struct.__neon_float32x2x3_t %tmp1, 2
%tmp4 = add <2 x float> %tmp2, %tmp3
ret <2 x float> %tmp4
}
-declare %struct.__builtin_neon_v8qi3 @llvm.arm.neon.vld3.v8i8(i8*) nounwind readonly
-declare %struct.__builtin_neon_v4hi3 @llvm.arm.neon.vld3.v4i16(i8*) nounwind readonly
-declare %struct.__builtin_neon_v2si3 @llvm.arm.neon.vld3.v2i32(i8*) nounwind readonly
-declare %struct.__builtin_neon_v2sf3 @llvm.arm.neon.vld3.v2f32(i8*) nounwind readonly
+declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8*) nounwind readonly
+declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3.v4i16(i8*) nounwind readonly
+declare %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3.v2i32(i8*) nounwind readonly
+declare %struct.__neon_float32x2x3_t @llvm.arm.neon.vld3.v2f32(i8*) nounwind readonly
diff --git a/test/CodeGen/ARM/vld4.ll b/test/CodeGen/ARM/vld4.ll
index 4d59a88ab8..6e6bb75c16 100644
--- a/test/CodeGen/ARM/vld4.ll
+++ b/test/CodeGen/ARM/vld4.ll
@@ -1,16 +1,16 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-%struct.__builtin_neon_v8qi4 = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
-%struct.__builtin_neon_v4hi4 = type { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }
-%struct.__builtin_neon_v2si4 = type { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> }
-%struct.__builtin_neon_v2sf4 = type { <2 x float>, <2 x float>, <2 x float>, <2 x float> }
+%struct.__neon_int8x8x4_t = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
+%struct.__neon_int16x4x4_t = type { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }
+%struct.__neon_int32x2x4_t = type { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> }
+%struct.__neon_float32x2x4_t = type { <2 x float>, <2 x float>, <2 x float>, <2 x float> }
define <8 x i8> @vld4i8(i8* %A) nounwind {
;CHECK: vld4i8:
;CHECK: vld4.8
- %tmp1 = call %struct.__builtin_neon_v8qi4 @llvm.arm.neon.vld4.v8i8(i8* %A)
- %tmp2 = extractvalue %struct.__builtin_neon_v8qi4 %tmp1, 0
- %tmp3 = extractvalue %struct.__builtin_neon_v8qi4 %tmp1, 2
+ %tmp1 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8(i8* %A)
+ %tmp2 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 0
+ %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 2
%tmp4 = add <8 x i8> %tmp2, %tmp3
ret <8 x i8> %tmp4
}
@@ -18,9 +18,9 @@ define <8 x i8> @vld4i8(i8* %A) nounwind {
define <4 x i16> @vld4i16(i16* %A) nounwind {
;CHECK: vld4i16:
;CHECK: vld4.16
- %tmp1 = call %struct.__builtin_neon_v4hi4 @llvm.arm.neon.vld4.v4i16(i16* %A)
- %tmp2 = extractvalue %struct.__builtin_neon_v4hi4 %tmp1, 0
- %tmp3 = extractvalue %struct.__builtin_neon_v4hi4 %tmp1, 2
+ %tmp1 = call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4.v4i16(i16* %A)
+ %tmp2 = extractvalue %struct.__neon_int16x4x4_t %tmp1, 0
+ %tmp3 = extractvalue %struct.__neon_int16x4x4_t %tmp1, 2
%tmp4 = add <4 x i16> %tmp2, %tmp3
ret <4 x i16> %tmp4
}
@@ -28,9 +28,9 @@ define <4 x i16> @vld4i16(i16* %A) nounwind {
define <2 x i32> @vld4i32(i32* %A) nounwind {
;CHECK: vld4i32:
;CHECK: vld4.32
- %tmp1 = call %struct.__builtin_neon_v2si4 @llvm.arm.neon.vld4.v2i32(i32* %A)
- %tmp2 = extractvalue %struct.__builtin_neon_v2si4 %tmp1, 0
- %tmp3 = extractvalue %struct.__builtin_neon_v2si4 %tmp1, 2
+ %tmp1 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32(i32* %A)
+ %tmp2 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 0
+ %tmp3 = extractvalue %struct.__neon_int32x2x4_t %tmp1, 2
%tmp4 = add <2 x i32> %tmp2, %tmp3
ret <2 x i32> %tmp4
}
@@ -38,14 +38,14 @@ define <2 x i32> @vld4i32(i32* %A) nounwind {
define <2 x float> @vld4f(float* %A) nounwind {
;CHECK: vld4f:
;CHECK: vld4.32
- %tmp1 = call %struct.__builtin_neon_v2sf4 @llvm.arm.neon.vld4.v2f32(float* %A)
- %tmp2 = extractvalue %struct.__builtin_neon_v2sf4 %tmp1, 0
- %tmp3 = extractvalue %struct.__builtin_neon_v2sf4 %tmp1, 2
+ %tmp1 = call %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4.v2f32(float* %A)
+ %tmp2 = extractvalue %struct.__neon_float32x2x4_t %tmp1, 0
+ %tmp3 = extractvalue %struct.__neon_float32x2x4_t %tmp1, 2
%tmp4 = add <2 x float> %tmp2, %tmp3
ret <2 x float> %tmp4
}
-declare %struct.__builtin_neon_v8qi4 @llvm.arm.neon.vld4.v8i8(i8*) nounwind readonly
-declare %struct.__builtin_neon_v4hi4 @llvm.arm.neon.vld4.v4i16(i8*) nounwind readonly
-declare %struct.__builtin_neon_v2si4 @llvm.arm.neon.vld4.v2i32(i8*) nounwind readonly
-declare %struct.__builtin_neon_v2sf4 @llvm.arm.neon.vld4.v2f32(i8*) nounwind readonly
+declare %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8(i8*) nounwind readonly
+declare %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4.v4i16(i8*) nounwind readonly
+declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4.v2i32(i8*) nounwind readonly
+declare %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4.v2f32(i8*) nounwind readonly
diff --git a/test/CodeGen/ARM/vldlane.ll b/test/CodeGen/ARM/vldlane.ll
index 01334a6be7..bbe948ce3a 100644
--- a/test/CodeGen/ARM/vldlane.ll
+++ b/test/CodeGen/ARM/vldlane.ll
@@ -1,17 +1,17 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-%struct.__builtin_neon_v8qi2 = type { <8 x i8>, <8 x i8> }
-%struct.__builtin_neon_v4hi2 = type { <4 x i16>, <4 x i16> }
-%struct.__builtin_neon_v2si2 = type { <2 x i32>, <2 x i32> }
-%struct.__builtin_neon_v2sf2 = type { <2 x float>, <2 x float> }
+%struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> }
+%struct.__neon_int16x4x2_t = type { <4 x i16>, <4 x i16> }
+%struct.__neon_int32x2x2_t = type { <2 x i32>, <2 x i32> }
+%struct.__neon_float32x2x2_t = type { <2 x float>, <2 x float> }
define <8 x i8> @vld2lanei8(i8* %A, <8 x i8>* %B) nounwind {
;CHECK: vld2lanei8:
;CHECK: vld2.8
%tmp1 = load <8 x i8>* %B
- %tmp2 = call %struct.__builtin_neon_v8qi2 @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__builtin_neon_v8qi2 %tmp2, 0
- %tmp4 = extractvalue %struct.__builtin_neon_v8qi2 %tmp2, 1
+ %tmp2 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
+ %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1
%tmp5 = add <8 x i8> %tmp3, %tmp4
ret <8 x i8> %tmp5
}
@@ -20,9 +20,9 @@ define <4 x i16> @vld2lanei16(i16* %A, <4 x i16>* %B) nounwind {
;CHECK: vld2lanei16:
;CHECK: vld2.16
%tmp1 = load <4 x i16>* %B
- %tmp2 = call %struct.__builtin_neon_v4hi2 @llvm.arm.neon.vld2lane.v4i16(i16* %A, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__builtin_neon_v4hi2 %tmp2, 0
- %tmp4 = extractvalue %struct.__builtin_neon_v4hi2 %tmp2, 1
+ %tmp2 = call %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2lane.v4i16(i16* %A, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
+ %tmp3 = extractvalue %struct.__neon_int16x4x2_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_int16x4x2_t %tmp2, 1
%tmp5 = add <4 x i16> %tmp3, %tmp4
ret <4 x i16> %tmp5
}
@@ -31,9 +31,9 @@ define <2 x i32> @vld2lanei32(i32* %A, <2 x i32>* %B) nounwind {
;CHECK: vld2lanei32:
;CHECK: vld2.32
%tmp1 = load <2 x i32>* %B
- %tmp2 = call %struct.__builtin_neon_v2si2 @llvm.arm.neon.vld2lane.v2i32(i32* %A, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__builtin_neon_v2si2 %tmp2, 0
- %tmp4 = extractvalue %struct.__builtin_neon_v2si2 %tmp2, 1
+ %tmp2 = call %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2lane.v2i32(i32* %A, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
+ %tmp3 = extractvalue %struct.__neon_int32x2x2_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_int32x2x2_t %tmp2, 1
%tmp5 = add <2 x i32> %tmp3, %tmp4
ret <2 x i32> %tmp5
}
@@ -42,31 +42,31 @@ define <2 x float> @vld2lanef(float* %A, <2 x float>* %B) nounwind {
;CHECK: vld2lanef:
;CHECK: vld2.32
%tmp1 = load <2 x float>* %B
- %tmp2 = call %struct.__builtin_neon_v2sf2 @llvm.arm.neon.vld2lane.v2f32(float* %A, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__builtin_neon_v2sf2 %tmp2, 0
- %tmp4 = extractvalue %struct.__builtin_neon_v2sf2 %tmp2, 1
+ %tmp2 = call %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2lane.v2f32(float* %A, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
+ %tmp3 = extractvalue %struct.__neon_float32x2x2_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_float32x2x2_t %tmp2, 1
%tmp5 = add <2 x float> %tmp3, %tmp4
ret <2 x float> %tmp5
}
-declare %struct.__builtin_neon_v8qi2 @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32) nounwind readonly
-declare %struct.__builtin_neon_v4hi2 @llvm.arm.neon.vld2lane.v4i16(i8*, <4 x i16>, <4 x i16>, i32) nounwind readonly
-declare %struct.__builtin_neon_v2si2 @llvm.arm.neon.vld2lane.v2i32(i8*, <2 x i32>, <2 x i32>, i32) nounwind readonly
-declare %struct.__builtin_neon_v2sf2 @llvm.arm.neon.vld2lane.v2f32(i8*, <2 x float>, <2 x float>, i32) nounwind readonly
+declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32) nounwind readonly
+declare %struct.__neon_int16x4x2_t @llvm.arm.neon.vld2lane.v4i16(i8*, <4 x i16>, <4 x i16>, i32) nounwind readonly
+declare %struct.__neon_int32x2x2_t @llvm.arm.neon.vld2lane.v2i32(i8*, <2 x i32>, <2 x i32>, i32) nounwind readonly
+declare %struct.__neon_float32x2x2_t @llvm.arm.neon.vld2lane.v2f32(i8*, <2 x float>, <2 x float>, i32) nounwind readonly
-%struct.__builtin_neon_v8qi3 = type { <8 x i8>, <8 x i8>, <8 x i8> }
-%struct.__builtin_neon_v4hi3 = type { <4 x i16>, <4 x i16>, <4 x i16> }
-%struct.__builtin_neon_v2si3 = type { <2 x i32>, <2 x i32>, <2 x i32> }
-%struct.__builtin_neon_v2sf3 = type { <2 x float>, <2 x float>, <2 x float> }
+%struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> }
+%struct.__neon_int16x4x3_t = type { <4 x i16>, <4 x i16>, <4 x i16> }
+%struct.__neon_int32x2x3_t = type { <2 x i32>, <2 x i32>, <2 x i32> }
+%struct.__neon_float32x2x3_t = type { <2 x float>, <2 x float>, <2 x float> }
define <8 x i8> @vld3lanei8(i8* %A, <8 x i8>* %B) nounwind {
;CHECK: vld3lanei8:
;CHECK: vld3.8
%tmp1 = load <8 x i8>* %B
- %tmp2 = call %struct.__builtin_neon_v8qi3 @llvm.arm.neon.vld3lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 0
- %tmp4 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 1
- %tmp5 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 2
+ %tmp2 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
+ %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 1
+ %tmp5 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 2
%tmp6 = add <8 x i8> %tmp3, %tmp4
%tmp7 = add <8 x i8> %tmp5, %tmp6
ret <8 x i8> %tmp7
@@ -76,10 +76,10 @@ define <4 x i16> @vld3lanei16(i16* %A, <4 x i16>* %B) nounwind {
;CHECK: vld3lanei16:
;CHECK: vld3.16
%tmp1 = load <4 x i16>* %B
- %tmp2 = call %struct.__builtin_neon_v4hi3 @llvm.arm.neon.vld3lane.v4i16(i16* %A, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__builtin_neon_v4hi3 %tmp2, 0
- %tmp4 = extractvalue %struct.__builtin_neon_v4hi3 %tmp2, 1
- %tmp5 = extractvalue %struct.__builtin_neon_v4hi3 %tmp2, 2
+ %tmp2 = call %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i16* %A, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
+ %tmp3 = extractvalue %struct.__neon_int16x4x3_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_int16x4x3_t %tmp2, 1
+ %tmp5 = extractvalue %struct.__neon_int16x4x3_t %tmp2, 2
%tmp6 = add <4 x i16> %tmp3, %tmp4
%tmp7 = add <4 x i16> %tmp5, %tmp6
ret <4 x i16> %tmp7
@@ -89,10 +89,10 @@ define <2 x i32> @vld3lanei32(i32* %A, <2 x i32>* %B) nounwind {
;CHECK: vld3lanei32:
;CHECK: vld3.32
%tmp1 = load <2 x i32>* %B
- %tmp2 = call %struct.__builtin_neon_v2si3 @llvm.arm.neon.vld3lane.v2i32(i32* %A, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__builtin_neon_v2si3 %tmp2, 0
- %tmp4 = extractvalue %struct.__builtin_neon_v2si3 %tmp2, 1
- %tmp5 = extractvalue %struct.__builtin_neon_v2si3 %tmp2, 2
+ %tmp2 = call %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3lane.v2i32(i32* %A, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
+ %tmp3 = extractvalue %struct.__neon_int32x2x3_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_int32x2x3_t %tmp2, 1
+ %tmp5 = extractvalue %struct.__neon_int32x2x3_t %tmp2, 2
%tmp6 = add <2 x i32> %tmp3, %tmp4
%tmp7 = add <2 x i32> %tmp5, %tmp6
ret <2 x i32> %tmp7
@@ -102,34 +102,34 @@ define <2 x float> @vld3lanef(float* %A, <2 x float>* %B) nounwind {
;CHECK: vld3lanef:
;CHECK: vld3.32
%tmp1 = load <2 x float>* %B
- %tmp2 = call %struct.__builtin_neon_v2sf3 @llvm.arm.neon.vld3lane.v2f32(float* %A, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__builtin_neon_v2sf3 %tmp2, 0
- %tmp4 = extractvalue %struct.__builtin_neon_v2sf3 %tmp2, 1
- %tmp5 = extractvalue %struct.__builtin_neon_v2sf3 %tmp2, 2
+ %tmp2 = call %struct.__neon_float32x2x3_t @llvm.arm.neon.vld3lane.v2f32(float* %A, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
+ %tmp3 = extractvalue %struct.__neon_float32x2x3_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_float32x2x3_t %tmp2, 1
+ %tmp5 = extractvalue %struct.__neon_float32x2x3_t %tmp2, 2
%tmp6 = add <2 x float> %tmp3, %tmp4
%tmp7 = add <2 x float> %tmp5, %tmp6
ret <2 x float> %tmp7
}
-declare %struct.__builtin_neon_v8qi3 @llvm.arm.neon.vld3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind readonly
-declare %struct.__builtin_neon_v4hi3 @llvm.arm.neon.vld3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind readonly
-declare %struct.__builtin_neon_v2si3 @llvm.arm.neon.vld3lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind readonly
-declare %struct.__builtin_neon_v2sf3 @llvm.arm.neon.vld3lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32) nounwind readonly
+declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind readonly
+declare %struct.__neon_int16x4x3_t @llvm.arm.neon.vld3lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind readonly
+declare %struct.__neon_int32x2x3_t @llvm.arm.neon.vld3lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind readonly
+declare %struct.__neon_float32x2x3_t @llvm.arm.neon.vld3lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, i32) nounwind readonly
-%struct.__builtin_neon_v8qi4 = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
-%struct.__builtin_neon_v4hi4 = type { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }
-%struct.__builtin_neon_v2si4 = type { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> }
-%struct.__builtin_neon_v2sf4 = type { <2 x float>, <2 x float>, <2 x float>, <2 x float> }
+%struct.__neon_int8x8x4_t = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
+%struct.__neon_int16x4x4_t = type { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> }
+%struct.__neon_int32x2x4_t = type { <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32> }
+%struct.__neon_float32x2x4_t = type { <2 x float>, <2 x float>, <2 x float>, <2 x float> }
define <8 x i8> @vld4lanei8(i8* %A, <8 x i8>* %B) nounwind {
;CHECK: vld4lanei8:
;CHECK: vld4.8
%tmp1 = load <8 x i8>* %B
- %tmp2 = call %struct.__builtin_neon_v8qi4 @llvm.arm.neon.vld4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 0
- %tmp4 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 1
- %tmp5 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 2
- %tmp6 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 3
+ %tmp2 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1)
+ %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 1
+ %tmp5 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 2
+ %tmp6 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 3
%tmp7 = add <8 x i8> %tmp3, %tmp4
%tmp8 = add <8 x i8> %tmp5, %tmp6
%tmp9 = add <8 x i8> %tmp7, %tmp8
@@ -140,11 +140,11 @@ define <4 x i16> @vld4lanei16(i16* %A, <4 x i16>* %B) nounwind {
;CHECK: vld4lanei16:
;CHECK: vld4.16
%tmp1 = load <4 x i16>* %B
- %tmp2 = call %struct.__builtin_neon_v4hi4 @llvm.arm.neon.vld4lane.v4i16(i16* %A, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__builtin_neon_v4hi4 %tmp2, 0
- %tmp4 = extractvalue %struct.__builtin_neon_v4hi4 %tmp2, 1
- %tmp5 = extractvalue %struct.__builtin_neon_v4hi4 %tmp2, 2
- %tmp6 = extractvalue %struct.__builtin_neon_v4hi4 %tmp2, 3
+ %tmp2 = call %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i16* %A, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i32 1)
+ %tmp3 = extractvalue %struct.__neon_int16x4x4_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_int16x4x4_t %tmp2, 1
+ %tmp5 = extractvalue %struct.__neon_int16x4x4_t %tmp2, 2
+ %tmp6 = extractvalue %struct.__neon_int16x4x4_t %tmp2, 3
%tmp7 = add <4 x i16> %tmp3, %tmp4
%tmp8 = add <4 x i16> %tmp5, %tmp6
%tmp9 = add <4 x i16> %tmp7, %tmp8
@@ -155,11 +155,11 @@ define <2 x i32> @vld4lanei32(i32* %A, <2 x i32>* %B) nounwind {
;CHECK: vld4lanei32:
;CHECK: vld4.32
%tmp1 = load <2 x i32>* %B
- %tmp2 = call %struct.__builtin_neon_v2si4 @llvm.arm.neon.vld4lane.v2i32(i32* %A, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__builtin_neon_v2si4 %tmp2, 0
- %tmp4 = extractvalue %struct.__builtin_neon_v2si4 %tmp2, 1
- %tmp5 = extractvalue %struct.__builtin_neon_v2si4 %tmp2, 2
- %tmp6 = extractvalue %struct.__builtin_neon_v2si4 %tmp2, 3
+ %tmp2 = call %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i32* %A, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i32 1)
+ %tmp3 = extractvalue %struct.__neon_int32x2x4_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_int32x2x4_t %tmp2, 1
+ %tmp5 = extractvalue %struct.__neon_int32x2x4_t %tmp2, 2
+ %tmp6 = extractvalue %struct.__neon_int32x2x4_t %tmp2, 3
%tmp7 = add <2 x i32> %tmp3, %tmp4
%tmp8 = add <2 x i32> %tmp5, %tmp6
%tmp9 = add <2 x i32> %tmp7, %tmp8
@@ -170,18 +170,18 @@ define <2 x float> @vld4lanef(float* %A, <2 x float>* %B) nounwind {
;CHECK: vld4lanef:
;CHECK: vld4.32
%tmp1 = load <2 x float>* %B
- %tmp2 = call %struct.__builtin_neon_v2sf4 @llvm.arm.neon.vld4lane.v2f32(float* %A, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
- %tmp3 = extractvalue %struct.__builtin_neon_v2sf4 %tmp2, 0
- %tmp4 = extractvalue %struct.__builtin_neon_v2sf4 %tmp2, 1
- %tmp5 = extractvalue %struct.__builtin_neon_v2sf4 %tmp2, 2
- %tmp6 = extractvalue %struct.__builtin_neon_v2sf4 %tmp2, 3
+ %tmp2 = call %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4lane.v2f32(float* %A, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, i32 1)
+ %tmp3 = extractvalue %struct.__neon_float32x2x4_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_float32x2x4_t %tmp2, 1
+ %tmp5 = extractvalue %struct.__neon_float32x2x4_t %tmp2, 2
+ %tmp6 = extractvalue %struct.__neon_float32x2x4_t %tmp2, 3
%tmp7 = add <2 x float> %tmp3, %tmp4
%tmp8 = add <2 x float> %tmp5, %tmp6
%tmp9 = add <2 x float> %tmp7, %tmp8
ret <2 x float> %tmp9
}
-declare %struct.__builtin_neon_v8qi4 @llvm.arm.neon.vld4lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind readonly
-declare %struct.__builtin_neon_v4hi4 @llvm.arm.neon.vld4lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind readonly
-declare %struct.__builtin_neon_v2si4 @llvm.arm.neon.vld4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind readonly
-declare %struct.__builtin_neon_v2sf4 @llvm.arm.neon.vld4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32) nounwind readonly
+declare %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4lane.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, i32) nounwind readonly
+declare %struct.__neon_int16x4x4_t @llvm.arm.neon.vld4lane.v4i16(i8*, <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16>, i32) nounwind readonly
+declare %struct.__neon_int32x2x4_t @llvm.arm.neon.vld4lane.v2i32(i8*, <2 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, i32) nounwind readonly
+declare %struct.__neon_float32x2x4_t @llvm.arm.neon.vld4lane.v2f32(i8*, <2 x float>, <2 x float>, <2 x float>, <2 x float>, i32) nounwind readonly
diff --git a/test/CodeGen/ARM/vtbl.ll b/test/CodeGen/ARM/vtbl.ll
index 89653b097a..926498739e 100644
--- a/test/CodeGen/ARM/vtbl.ll
+++ b/test/CodeGen/ARM/vtbl.ll
@@ -1,8 +1,8 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-%struct.__builtin_neon_v8qi2 = type { <8 x i8>, <8 x i8> }
-%struct.__builtin_neon_v8qi3 = type { <8 x i8>, <8 x i8>, <8 x i8> }
-%struct.__builtin_neon_v8qi4 = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
+%struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> }
+%struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> }
+%struct.__neon_int8x8x4_t = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
define <8 x i8> @vtbl1(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK: vtbl1:
@@ -13,38 +13,38 @@ define <8 x i8> @vtbl1(<8 x i8>* %A, <8 x i8>* %B) nounwind {
ret <8 x i8> %tmp3
}
-define <8 x i8> @vtbl2(<8 x i8>* %A, %struct.__builtin_neon_v8qi2* %B) nounwind {
+define <8 x i8> @vtbl2(<8 x i8>* %A, %struct.__neon_int8x8x2_t* %B) nounwind {
;CHECK: vtbl2:
;CHECK: vtbl.8
%tmp1 = load <8 x i8>* %A
- %tmp2 = load %struct.__builtin_neon_v8qi2* %B
- %tmp3 = extractvalue %struct.__builtin_neon_v8qi2 %tmp2, 0
- %tmp4 = extractvalue %struct.__builtin_neon_v8qi2 %tmp2, 1
+ %tmp2 = load %struct.__neon_int8x8x2_t* %B
+ %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1
%tmp5 = call <8 x i8> @llvm.arm.neon.vtbl2(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4)
ret <8 x i8> %tmp5
}
-define <8 x i8> @vtbl3(<8 x i8>* %A, %struct.__builtin_neon_v8qi3* %B) nounwind {
+define <8 x i8> @vtbl3(<8 x i8>* %A, %struct.__neon_int8x8x3_t* %B) nounwind {
;CHECK: vtbl3:
;CHECK: vtbl.8
%tmp1 = load <8 x i8>* %A
- %tmp2 = load %struct.__builtin_neon_v8qi3* %B
- %tmp3 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 0
- %tmp4 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 1
- %tmp5 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 2
+ %tmp2 = load %struct.__neon_int8x8x3_t* %B
+ %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 1
+ %tmp5 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 2
%tmp6 = call <8 x i8> @llvm.arm.neon.vtbl3(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5)
ret <8 x i8> %tmp6
}
-define <8 x i8> @vtbl4(<8 x i8>* %A, %struct.__builtin_neon_v8qi4* %B) nounwind {
+define <8 x i8> @vtbl4(<8 x i8>* %A, %struct.__neon_int8x8x4_t* %B) nounwind {
;CHECK: vtbl4:
;CHECK: vtbl.8
%tmp1 = load <8 x i8>* %A
- %tmp2 = load %struct.__builtin_neon_v8qi4* %B
- %tmp3 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 0
- %tmp4 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 1
- %tmp5 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 2
- %tmp6 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 3
+ %tmp2 = load %struct.__neon_int8x8x4_t* %B
+ %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 1
+ %tmp5 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 2
+ %tmp6 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 3
%tmp7 = call <8 x i8> @llvm.arm.neon.vtbl4(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6)
ret <8 x i8> %tmp7
}
@@ -59,40 +59,40 @@ define <8 x i8> @vtbx1(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
ret <8 x i8> %tmp4
}
-define <8 x i8> @vtbx2(<8 x i8>* %A, %struct.__builtin_neon_v8qi2* %B, <8 x i8>* %C) nounwind {
+define <8 x i8> @vtbx2(<8 x i8>* %A, %struct.__neon_int8x8x2_t* %B, <8 x i8>* %C) nounwind {
;CHECK: vtbx2:
;CHECK: vtbx.8
%tmp1 = load <8 x i8>* %A
- %tmp2 = load %struct.__builtin_neon_v8qi2* %B
- %tmp3 = extractvalue %struct.__builtin_neon_v8qi2 %tmp2, 0
- %tmp4 = extractvalue %struct.__builtin_neon_v8qi2 %tmp2, 1
+ %tmp2 = load %struct.__neon_int8x8x2_t* %B
+ %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1
%tmp5 = load <8 x i8>* %C
%tmp6 = call <8 x i8> @llvm.arm.neon.vtbx2(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5)
ret <8 x i8> %tmp6
}
-define <8 x i8> @vtbx3(<8 x i8>* %A, %struct.__builtin_neon_v8qi3* %B, <8 x i8>* %C) nounwind {
+define <8 x i8> @vtbx3(<8 x i8>* %A, %struct.__neon_int8x8x3_t* %B, <8 x i8>* %C) nounwind {
;CHECK: vtbx3:
;CHECK: vtbx.8
%tmp1 = load <8 x i8>* %A
- %tmp2 = load %struct.__builtin_neon_v8qi3* %B
- %tmp3 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 0
- %tmp4 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 1
- %tmp5 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 2
+ %tmp2 = load %struct.__neon_int8x8x3_t* %B
+ %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 1
+ %tmp5 = extractvalue %struct.__neon_int8x8x3_t %tmp2, 2
%tmp6 = load <8 x i8>* %C
%tmp7 = call <8 x i8> @llvm.arm.neon.vtbx3(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6)
ret <8 x i8> %tmp7
}
-define <8 x i8> @vtbx4(<8 x i8>* %A, %struct.__builtin_neon_v8qi4* %B, <8 x i8>* %C) nounwind {
+define <8 x i8> @vtbx4(<8 x i8>* %A, %struct.__neon_int8x8x4_t* %B, <8 x i8>* %C) nounwind {
;CHECK: vtbx4:
;CHECK: vtbx.8
%tmp1 = load <8 x i8>* %A
- %tmp2 = load %struct.__builtin_neon_v8qi4* %B
- %tmp3 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 0
- %tmp4 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 1
- %tmp5 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 2
- %tmp6 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 3
+ %tmp2 = load %struct.__neon_int8x8x4_t* %B
+ %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 0
+ %tmp4 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 1
+ %tmp5 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 2
+ %tmp6 = extractvalue %struct.__neon_int8x8x4_t %tmp2, 3
%tmp7 = load <8 x i8>* %C
%tmp8 = call <8 x i8> @llvm.arm.neon.vtbx4(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6, <8 x i8> %tmp7)
ret <8 x i8> %tmp8
diff --git a/test/CodeGen/ARM/vtrn.ll b/test/CodeGen/ARM/vtrn.ll
index be55daa6a9..5122b0981e 100644
--- a/test/CodeGen/ARM/vtrn.ll
+++ b/test/CodeGen/ARM/vtrn.ll
@@ -1,15 +1,5 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-%struct.__builtin_neon_v8qi2 = type { <8 x i8>, <8 x i8> }
-%struct.__builtin_neon_v4hi2 = type { <4 x i16>, <4 x i16> }
-%struct.__builtin_neon_v2si2 = type { <2 x i32>, <2 x i32> }
-%struct.__builtin_neon_v2sf2 = type { <2 x float>, <2 x float> }
-
-%struct.__builtin_neon_v16qi2 = type { <16 x i8>, <16 x i8> }
-%struct.__builtin_neon_v8hi2 = type { <8 x i16>, <8 x i16> }
-%struct.__builtin_neon_v4si2 = type { <4 x i32>, <4 x i32> }
-%struct.__builtin_neon_v4sf2 = type { <4 x float>, <4 x float> }
-
define <8 x i8> @vtrni8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK: vtrni8:
;CHECK: vtrn.8
diff --git a/test/CodeGen/ARM/vuzp.ll b/test/CodeGen/ARM/vuzp.ll
index 411f59e9ce..e531718d94 100644
--- a/test/CodeGen/ARM/vuzp.ll
+++ b/test/CodeGen/ARM/vuzp.ll
@@ -1,15 +1,5 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-%struct.__builtin_neon_v8qi2 = type { <8 x i8>, <8 x i8> }
-%struct.__builtin_neon_v4hi2 = type { <4 x i16>, <4 x i16> }
-%struct.__builtin_neon_v2si2 = type { <2 x i32>, <2 x i32> }
-%struct.__builtin_neon_v2sf2 = type { <2 x float>, <2 x float> }
-
-%struct.__builtin_neon_v16qi2 = type { <16 x i8>, <16 x i8> }
-%struct.__builtin_neon_v8hi2 = type { <8 x i16>, <8 x i16> }
-%struct.__builtin_neon_v4si2 = type { <4 x i32>, <4 x i32> }
-%struct.__builtin_neon_v4sf2 = type { <4 x float>, <4 x float> }
-
define <8 x i8> @vuzpi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK: vuzpi8:
;CHECK: vuzp.8
diff --git a/test/CodeGen/ARM/vzip.ll b/test/CodeGen/ARM/vzip.ll
index a1509b9dc5..32f7e0d02c 100644
--- a/test/CodeGen/ARM/vzip.ll
+++ b/test/CodeGen/ARM/vzip.ll
@@ -1,15 +1,5 @@
; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
-%struct.__builtin_neon_v8qi2 = type { <8 x i8>, <8 x i8> }
-%struct.__builtin_neon_v4hi2 = type { <4 x i16>, <4 x i16> }
-%struct.__builtin_neon_v2si2 = type { <2 x i32>, <2 x i32> }
-%struct.__builtin_neon_v2sf2 = type { <2 x float>, <2 x float> }
-
-%struct.__builtin_neon_v16qi2 = type { <16 x i8>, <16 x i8> }
-%struct.__builtin_neon_v8hi2 = type { <8 x i16>, <8 x i16> }
-%struct.__builtin_neon_v4si2 = type { <4 x i32>, <4 x i32> }
-%struct.__builtin_neon_v4sf2 = type { <4 x float>, <4 x float> }
-
define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK: vzipi8:
;CHECK: vzip.8