summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorChad Rosier <mcrosier@codeaurora.org>2013-10-31 19:28:44 +0000
committerChad Rosier <mcrosier@codeaurora.org>2013-10-31 19:28:44 +0000
commit1d28917dc39f38847f5c69c0a60cd1491430bdad (patch)
tree691afc4dbf1350521d322b4882b1bc71f4632fef /test
parent1d6d49fbb104781cc3e9da9dcc3e36b6cbcd38b6 (diff)
downloadllvm-1d28917dc39f38847f5c69c0a60cd1491430bdad.tar.gz
llvm-1d28917dc39f38847f5c69c0a60cd1491430bdad.tar.bz2
llvm-1d28917dc39f38847f5c69c0a60cd1491430bdad.tar.xz
[AArch64] Add support for NEON scalar shift immediate instructions.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@193790 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/AArch64/neon-scalar-shift-imm.ll527
-rw-r--r--test/MC/AArch64/neon-diagnostics.s24
-rw-r--r--test/MC/AArch64/neon-scalar-shift-imm.s186
-rw-r--r--test/MC/Disassembler/AArch64/neon-instructions.txt162
4 files changed, 887 insertions, 12 deletions
diff --git a/test/CodeGen/AArch64/neon-scalar-shift-imm.ll b/test/CodeGen/AArch64/neon-scalar-shift-imm.ll
new file mode 100644
index 0000000000..b11540f80a
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-shift-imm.ll
@@ -0,0 +1,527 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+define i64 @test_vshrd_n_s64(i64 %a) {
+; CHECK: test_vshrd_n_s64
+; CHECK: sshr {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsshr = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsshr1 = call <1 x i64> @llvm.aarch64.neon.vshrds.n(<1 x i64> %vsshr, i32 63)
+ %0 = extractelement <1 x i64> %vsshr1, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vshrds.n(<1 x i64>, i32)
+
+define i64 @test_vshrd_n_u64(i64 %a) {
+; CHECK: test_vshrd_n_u64
+; CHECK: ushr {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vushr = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vushr1 = call <1 x i64> @llvm.aarch64.neon.vshrdu.n(<1 x i64> %vushr, i32 63)
+ %0 = extractelement <1 x i64> %vushr1, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vshrdu.n(<1 x i64>, i32)
+
+define i64 @test_vrshrd_n_s64(i64 %a) {
+; CHECK: test_vrshrd_n_s64
+; CHECK: srshr {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsrshr = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsrshr1 = call <1 x i64> @llvm.aarch64.neon.vrshrds.n(<1 x i64> %vsrshr, i32 63)
+ %0 = extractelement <1 x i64> %vsrshr1, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vrshrds.n(<1 x i64>, i32)
+
+define i64 @test_vrshrd_n_u64(i64 %a) {
+; CHECK: test_vrshrd_n_u64
+; CHECK: urshr {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vurshr = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vurshr1 = call <1 x i64> @llvm.aarch64.neon.vrshrdu.n(<1 x i64> %vurshr, i32 63)
+ %0 = extractelement <1 x i64> %vurshr1, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vrshrdu.n(<1 x i64>, i32)
+
+define i64 @test_vsrad_n_s64(i64 %a, i64 %b) {
+; CHECK: test_vsrad_n_s64
+; CHECK: ssra {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vssra = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vssra1 = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vssra2 = call <1 x i64> @llvm.aarch64.neon.vsrads.n(<1 x i64> %vssra, <1 x i64> %vssra1, i32 63)
+ %0 = extractelement <1 x i64> %vssra2, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vsrads.n(<1 x i64>, <1 x i64>, i32)
+
+define i64 @test_vsrad_n_u64(i64 %a, i64 %b) {
+; CHECK: test_vsrad_n_u64
+; CHECK: usra {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vusra = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vusra1 = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vusra2 = call <1 x i64> @llvm.aarch64.neon.vsradu.n(<1 x i64> %vusra, <1 x i64> %vusra1, i32 63)
+ %0 = extractelement <1 x i64> %vusra2, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vsradu.n(<1 x i64>, <1 x i64>, i32)
+
+define i64 @test_vrsrad_n_s64(i64 %a, i64 %b) {
+; CHECK: test_vrsrad_n_s64
+; CHECK: srsra {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsrsra = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsrsra1 = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vsrsra2 = call <1 x i64> @llvm.aarch64.neon.vrsrads.n(<1 x i64> %vsrsra, <1 x i64> %vsrsra1, i32 63)
+ %0 = extractelement <1 x i64> %vsrsra2, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vrsrads.n(<1 x i64>, <1 x i64>, i32)
+
+define i64 @test_vrsrad_n_u64(i64 %a, i64 %b) {
+; CHECK: test_vrsrad_n_u64
+; CHECK: ursra {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vursra = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vursra1 = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vursra2 = call <1 x i64> @llvm.aarch64.neon.vrsradu.n(<1 x i64> %vursra, <1 x i64> %vursra1, i32 63)
+ %0 = extractelement <1 x i64> %vursra2, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vrsradu.n(<1 x i64>, <1 x i64>, i32)
+
+define i64 @test_vshld_n_s64(i64 %a) {
+; CHECK: test_vshld_n_s64
+; CHECK: shl {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vshl = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vshl1 = call <1 x i64> @llvm.aarch64.neon.vshld.n(<1 x i64> %vshl, i32 63)
+ %0 = extractelement <1 x i64> %vshl1, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vshld.n(<1 x i64>, i32)
+
+define i64 @test_vshld_n_u64(i64 %a) {
+; CHECK: test_vshld_n_u64
+; CHECK: shl {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vshl = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vshl1 = call <1 x i64> @llvm.aarch64.neon.vshld.n(<1 x i64> %vshl, i32 63)
+ %0 = extractelement <1 x i64> %vshl1, i32 0
+ ret i64 %0
+}
+
+define i8 @test_vqshlb_n_s8(i8 %a) {
+; CHECK: test_vqshlb_n_s8
+; CHECK: sqshl {{b[0-9]+}}, {{b[0-9]+}}, #7
+entry:
+ %vsqshl = insertelement <1 x i8> undef, i8 %a, i32 0
+ %vsqshl1 = call <1 x i8> @llvm.aarch64.neon.vqshls.n.v1i8(<1 x i8> %vsqshl, i32 7)
+ %0 = extractelement <1 x i8> %vsqshl1, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vqshls.n.v1i8(<1 x i8>, i32)
+
+define i16 @test_vqshlh_n_s16(i16 %a) {
+; CHECK: test_vqshlh_n_s16
+; CHECK: sqshl {{h[0-9]+}}, {{h[0-9]+}}, #15
+entry:
+ %vsqshl = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vsqshl1 = call <1 x i16> @llvm.aarch64.neon.vqshls.n.v1i16(<1 x i16> %vsqshl, i32 15)
+ %0 = extractelement <1 x i16> %vsqshl1, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vqshls.n.v1i16(<1 x i16>, i32)
+
+define i32 @test_vqshls_n_s32(i32 %a) {
+; CHECK: test_vqshls_n_s32
+; CHECK: sqshl {{s[0-9]+}}, {{s[0-9]+}}, #31
+entry:
+ %vsqshl = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vsqshl1 = call <1 x i32> @llvm.aarch64.neon.vqshls.n.v1i32(<1 x i32> %vsqshl, i32 31)
+ %0 = extractelement <1 x i32> %vsqshl1, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vqshls.n.v1i32(<1 x i32>, i32)
+
+define i64 @test_vqshld_n_s64(i64 %a) {
+; CHECK: test_vqshld_n_s64
+; CHECK: sqshl {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsqshl = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsqshl1 = call <1 x i64> @llvm.aarch64.neon.vqshls.n.v1i64(<1 x i64> %vsqshl, i32 63)
+ %0 = extractelement <1 x i64> %vsqshl1, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vqshls.n.v1i64(<1 x i64>, i32)
+
+define i8 @test_vqshlb_n_u8(i8 %a) {
+; CHECK: test_vqshlb_n_u8
+; CHECK: uqshl {{b[0-9]+}}, {{b[0-9]+}}, #7
+entry:
+ %vuqshl = insertelement <1 x i8> undef, i8 %a, i32 0
+ %vuqshl1 = call <1 x i8> @llvm.aarch64.neon.vqshlu.n.v1i8(<1 x i8> %vuqshl, i32 7)
+ %0 = extractelement <1 x i8> %vuqshl1, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vqshlu.n.v1i8(<1 x i8>, i32)
+
+define i16 @test_vqshlh_n_u16(i16 %a) {
+; CHECK: test_vqshlh_n_u16
+; CHECK: uqshl {{h[0-9]+}}, {{h[0-9]+}}, #15
+entry:
+ %vuqshl = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vuqshl1 = call <1 x i16> @llvm.aarch64.neon.vqshlu.n.v1i16(<1 x i16> %vuqshl, i32 15)
+ %0 = extractelement <1 x i16> %vuqshl1, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vqshlu.n.v1i16(<1 x i16>, i32)
+
+define i32 @test_vqshls_n_u32(i32 %a) {
+; CHECK: test_vqshls_n_u32
+; CHECK: uqshl {{s[0-9]+}}, {{s[0-9]+}}, #31
+entry:
+ %vuqshl = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vuqshl1 = call <1 x i32> @llvm.aarch64.neon.vqshlu.n.v1i32(<1 x i32> %vuqshl, i32 31)
+ %0 = extractelement <1 x i32> %vuqshl1, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vqshlu.n.v1i32(<1 x i32>, i32)
+
+define i64 @test_vqshld_n_u64(i64 %a) {
+; CHECK: test_vqshld_n_u64
+; CHECK: uqshl {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vuqshl = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vuqshl1 = call <1 x i64> @llvm.aarch64.neon.vqshlu.n.v1i64(<1 x i64> %vuqshl, i32 63)
+ %0 = extractelement <1 x i64> %vuqshl1, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vqshlu.n.v1i64(<1 x i64>, i32)
+
+define i8 @test_vqshlub_n_s8(i8 %a) {
+; CHECK: test_vqshlub_n_s8
+; CHECK: sqshlu {{b[0-9]+}}, {{b[0-9]+}}, #7
+entry:
+ %vsqshlu = insertelement <1 x i8> undef, i8 %a, i32 0
+ %vsqshlu1 = call <1 x i8> @llvm.aarch64.neon.vqshlus.n.v1i8(<1 x i8> %vsqshlu, i32 7)
+ %0 = extractelement <1 x i8> %vsqshlu1, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vqshlus.n.v1i8(<1 x i8>, i32)
+
+define i16 @test_vqshluh_n_s16(i16 %a) {
+; CHECK: test_vqshluh_n_s16
+; CHECK: sqshlu {{h[0-9]+}}, {{h[0-9]+}}, #15
+entry:
+ %vsqshlu = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vsqshlu1 = call <1 x i16> @llvm.aarch64.neon.vqshlus.n.v1i16(<1 x i16> %vsqshlu, i32 15)
+ %0 = extractelement <1 x i16> %vsqshlu1, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vqshlus.n.v1i16(<1 x i16>, i32)
+
+define i32 @test_vqshlus_n_s32(i32 %a) {
+; CHECK: test_vqshlus_n_s32
+; CHECK: sqshlu {{s[0-9]+}}, {{s[0-9]+}}, #31
+entry:
+ %vsqshlu = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vsqshlu1 = call <1 x i32> @llvm.aarch64.neon.vqshlus.n.v1i32(<1 x i32> %vsqshlu, i32 31)
+ %0 = extractelement <1 x i32> %vsqshlu1, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vqshlus.n.v1i32(<1 x i32>, i32)
+
+define i64 @test_vqshlud_n_s64(i64 %a) {
+; CHECK: test_vqshlud_n_s64
+; CHECK: sqshlu {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsqshlu = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsqshlu1 = call <1 x i64> @llvm.aarch64.neon.vqshlus.n.v1i64(<1 x i64> %vsqshlu, i32 63)
+ %0 = extractelement <1 x i64> %vsqshlu1, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vqshlus.n.v1i64(<1 x i64>, i32)
+
+define i64 @test_vsrid_n_s64(i64 %a) {
+; CHECK: test_vsrid_n_s64
+; CHECK: sri {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsri = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsri1 = call <1 x i64> @llvm.aarch64.neon.vsrid.n(<1 x i64> %vsri, i32 63)
+ %0 = extractelement <1 x i64> %vsri1, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vsrid.n(<1 x i64>, i32)
+
+define i64 @test_vsrid_n_u64(i64 %a) {
+; CHECK: test_vsrid_n_u64
+; CHECK: sri {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsri = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsri1 = call <1 x i64> @llvm.aarch64.neon.vsrid.n(<1 x i64> %vsri, i32 63)
+ %0 = extractelement <1 x i64> %vsri1, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vslid_n_s64(i64 %a) {
+; CHECK: test_vslid_n_s64
+; CHECK: sli {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsli = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsli1 = call <1 x i64> @llvm.aarch64.neon.vslid.n(<1 x i64> %vsli, i32 63)
+ %0 = extractelement <1 x i64> %vsli1, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vslid.n(<1 x i64>, i32)
+
+define i64 @test_vslid_n_u64(i64 %a) {
+; CHECK: test_vslid_n_u64
+; CHECK: sli {{d[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsli = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsli1 = call <1 x i64> @llvm.aarch64.neon.vslid.n(<1 x i64> %vsli, i32 63)
+ %0 = extractelement <1 x i64> %vsli1, i32 0
+ ret i64 %0
+}
+
+define i8 @test_vqshrnh_n_s16(i16 %a) {
+; CHECK: test_vqshrnh_n_s16
+; CHECK: sqshrn {{b[0-9]+}}, {{h[0-9]+}}, #15
+entry:
+ %vsqshrn = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vsqshrn1 = call <1 x i8> @llvm.aarch64.neon.vsqshrn.v1i8(<1 x i16> %vsqshrn, i32 15)
+ %0 = extractelement <1 x i8> %vsqshrn1, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vsqshrn.v1i8(<1 x i16>, i32)
+
+define i16 @test_vqshrns_n_s32(i32 %a) {
+; CHECK: test_vqshrns_n_s32
+; CHECK: sqshrn {{h[0-9]+}}, {{s[0-9]+}}, #31
+entry:
+ %vsqshrn = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vsqshrn1 = call <1 x i16> @llvm.aarch64.neon.vsqshrn.v1i16(<1 x i32> %vsqshrn, i32 31)
+ %0 = extractelement <1 x i16> %vsqshrn1, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vsqshrn.v1i16(<1 x i32>, i32)
+
+define i32 @test_vqshrnd_n_s64(i64 %a) {
+; CHECK: test_vqshrnd_n_s64
+; CHECK: sqshrn {{s[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsqshrn = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsqshrn1 = call <1 x i32> @llvm.aarch64.neon.vsqshrn.v1i32(<1 x i64> %vsqshrn, i32 63)
+ %0 = extractelement <1 x i32> %vsqshrn1, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vsqshrn.v1i32(<1 x i64>, i32)
+
+define i8 @test_vqshrnh_n_u16(i16 %a) {
+; CHECK: test_vqshrnh_n_u16
+; CHECK: uqshrn {{b[0-9]+}}, {{h[0-9]+}}, #15
+entry:
+ %vuqshrn = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vuqshrn1 = call <1 x i8> @llvm.aarch64.neon.vuqshrn.v1i8(<1 x i16> %vuqshrn, i32 15)
+ %0 = extractelement <1 x i8> %vuqshrn1, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vuqshrn.v1i8(<1 x i16>, i32)
+
+define i16 @test_vqshrns_n_u32(i32 %a) {
+; CHECK: test_vqshrns_n_u32
+; CHECK: uqshrn {{h[0-9]+}}, {{s[0-9]+}}, #31
+entry:
+ %vuqshrn = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vuqshrn1 = call <1 x i16> @llvm.aarch64.neon.vuqshrn.v1i16(<1 x i32> %vuqshrn, i32 31)
+ %0 = extractelement <1 x i16> %vuqshrn1, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vuqshrn.v1i16(<1 x i32>, i32)
+
+define i32 @test_vqshrnd_n_u64(i64 %a) {
+; CHECK: test_vqshrnd_n_u64
+; CHECK: uqshrn {{s[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vuqshrn = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vuqshrn1 = call <1 x i32> @llvm.aarch64.neon.vuqshrn.v1i32(<1 x i64> %vuqshrn, i32 63)
+ %0 = extractelement <1 x i32> %vuqshrn1, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vuqshrn.v1i32(<1 x i64>, i32)
+
+define i8 @test_vqrshrnh_n_s16(i16 %a) {
+; CHECK: test_vqrshrnh_n_s16
+; CHECK: sqrshrn {{b[0-9]+}}, {{h[0-9]+}}, #15
+entry:
+ %vsqrshrn = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vsqrshrn1 = call <1 x i8> @llvm.aarch64.neon.vsqrshrn.v1i8(<1 x i16> %vsqrshrn, i32 15)
+ %0 = extractelement <1 x i8> %vsqrshrn1, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vsqrshrn.v1i8(<1 x i16>, i32)
+
+define i16 @test_vqrshrns_n_s32(i32 %a) {
+; CHECK: test_vqrshrns_n_s32
+; CHECK: sqrshrn {{h[0-9]+}}, {{s[0-9]+}}, #31
+entry:
+ %vsqrshrn = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vsqrshrn1 = call <1 x i16> @llvm.aarch64.neon.vsqrshrn.v1i16(<1 x i32> %vsqrshrn, i32 31)
+ %0 = extractelement <1 x i16> %vsqrshrn1, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vsqrshrn.v1i16(<1 x i32>, i32)
+
+define i32 @test_vqrshrnd_n_s64(i64 %a) {
+; CHECK: test_vqrshrnd_n_s64
+; CHECK: sqrshrn {{s[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsqrshrn = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsqrshrn1 = call <1 x i32> @llvm.aarch64.neon.vsqrshrn.v1i32(<1 x i64> %vsqrshrn, i32 63)
+ %0 = extractelement <1 x i32> %vsqrshrn1, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vsqrshrn.v1i32(<1 x i64>, i32)
+
+define i8 @test_vqrshrnh_n_u16(i16 %a) {
+; CHECK: test_vqrshrnh_n_u16
+; CHECK: uqrshrn {{b[0-9]+}}, {{h[0-9]+}}, #15
+entry:
+ %vuqrshrn = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vuqrshrn1 = call <1 x i8> @llvm.aarch64.neon.vuqrshrn.v1i8(<1 x i16> %vuqrshrn, i32 15)
+ %0 = extractelement <1 x i8> %vuqrshrn1, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vuqrshrn.v1i8(<1 x i16>, i32)
+
+define i16 @test_vqrshrns_n_u32(i32 %a) {
+; CHECK: test_vqrshrns_n_u32
+; CHECK: uqrshrn {{h[0-9]+}}, {{s[0-9]+}}, #31
+entry:
+ %vuqrshrn = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vuqrshrn1 = call <1 x i16> @llvm.aarch64.neon.vuqrshrn.v1i16(<1 x i32> %vuqrshrn, i32 31)
+ %0 = extractelement <1 x i16> %vuqrshrn1, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vuqrshrn.v1i16(<1 x i32>, i32)
+
+define i32 @test_vqrshrnd_n_u64(i64 %a) {
+; CHECK: test_vqrshrnd_n_u64
+; CHECK: uqrshrn {{s[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vuqrshrn = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vuqrshrn1 = call <1 x i32> @llvm.aarch64.neon.vuqrshrn.v1i32(<1 x i64> %vuqrshrn, i32 63)
+ %0 = extractelement <1 x i32> %vuqrshrn1, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vuqrshrn.v1i32(<1 x i64>, i32)
+
+define i8 @test_vqshrunh_n_s16(i16 %a) {
+; CHECK: test_vqshrunh_n_s16
+; CHECK: sqshrun {{b[0-9]+}}, {{h[0-9]+}}, #15
+entry:
+ %vsqshrun = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vsqshrun1 = call <1 x i8> @llvm.aarch64.neon.vsqshrun.v1i8(<1 x i16> %vsqshrun, i32 15)
+ %0 = extractelement <1 x i8> %vsqshrun1, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vsqshrun.v1i8(<1 x i16>, i32)
+
+define i16 @test_vqshruns_n_s32(i32 %a) {
+; CHECK: test_vqshruns_n_s32
+; CHECK: sqshrun {{h[0-9]+}}, {{s[0-9]+}}, #31
+entry:
+ %vsqshrun = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vsqshrun1 = call <1 x i16> @llvm.aarch64.neon.vsqshrun.v1i16(<1 x i32> %vsqshrun, i32 31)
+ %0 = extractelement <1 x i16> %vsqshrun1, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vsqshrun.v1i16(<1 x i32>, i32)
+
+define i32 @test_vqshrund_n_s64(i64 %a) {
+; CHECK: test_vqshrund_n_s64
+; CHECK: sqshrun {{s[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsqshrun = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsqshrun1 = call <1 x i32> @llvm.aarch64.neon.vsqshrun.v1i32(<1 x i64> %vsqshrun, i32 63)
+ %0 = extractelement <1 x i32> %vsqshrun1, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vsqshrun.v1i32(<1 x i64>, i32)
+
+define i8 @test_vqrshrunh_n_s16(i16 %a) {
+; CHECK: test_vqrshrunh_n_s16
+; CHECK: sqrshrun {{b[0-9]+}}, {{h[0-9]+}}, #15
+entry:
+ %vsqrshrun = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vsqrshrun1 = call <1 x i8> @llvm.aarch64.neon.vsqrshrun.v1i8(<1 x i16> %vsqrshrun, i32 15)
+ %0 = extractelement <1 x i8> %vsqrshrun1, i32 0
+ ret i8 %0
+}
+
+declare <1 x i8> @llvm.aarch64.neon.vsqrshrun.v1i8(<1 x i16>, i32)
+
+define i16 @test_vqrshruns_n_s32(i32 %a) {
+; CHECK: test_vqrshruns_n_s32
+; CHECK: sqrshrun {{h[0-9]+}}, {{s[0-9]+}}, #31
+entry:
+ %vsqrshrun = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vsqrshrun1 = call <1 x i16> @llvm.aarch64.neon.vsqrshrun.v1i16(<1 x i32> %vsqrshrun, i32 31)
+ %0 = extractelement <1 x i16> %vsqrshrun1, i32 0
+ ret i16 %0
+}
+
+declare <1 x i16> @llvm.aarch64.neon.vsqrshrun.v1i16(<1 x i32>, i32)
+
+define i32 @test_vqrshrund_n_s64(i64 %a) {
+; CHECK: test_vqrshrund_n_s64
+; CHECK: sqrshrun {{s[0-9]+}}, {{d[0-9]+}}, #63
+entry:
+ %vsqrshrun = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vsqrshrun1 = call <1 x i32> @llvm.aarch64.neon.vsqrshrun.v1i32(<1 x i64> %vsqrshrun, i32 63)
+ %0 = extractelement <1 x i32> %vsqrshrun1, i32 0
+ ret i32 %0
+}
+
+declare <1 x i32> @llvm.aarch64.neon.vsqrshrun.v1i32(<1 x i64>, i32)
diff --git a/test/MC/AArch64/neon-diagnostics.s b/test/MC/AArch64/neon-diagnostics.s
index 28f8e7a816..c378ce40bf 100644
--- a/test/MC/AArch64/neon-diagnostics.s
+++ b/test/MC/AArch64/neon-diagnostics.s
@@ -970,23 +970,23 @@
//----------------------------------------------------------------------
// Mismatched vector types
- sqshl b0, b1, s0
- uqshl h0, h1, b0
- sqshl s0, s1, h0
- uqshl d0, d1, b0
+ sqshl b0, s1, b0
+ uqshl h0, b1, h0
+ sqshl s0, h1, s0
+ uqshl d0, b1, d0
// CHECK-ERROR: error: invalid operand for instruction
-// CHECK-ERROR: sqshl b0, b1, s0
-// CHECK-ERROR: ^
+// CHECK-ERROR: sqshl b0, s1, b0
+// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
-// CHECK-ERROR: uqshl h0, h1, b0
-// CHECK-ERROR: ^
+// CHECK-ERROR: uqshl h0, b1, h0
+// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
-// CHECK-ERROR: sqshl s0, s1, h0
-// CHECK-ERROR: ^
+// CHECK-ERROR: sqshl s0, h1, s0
+// CHECK-ERROR: ^
// CHECK-ERROR: error: invalid operand for instruction
-// CHECK-ERROR: uqshl d0, d1, b0
-// CHECK-ERROR: ^
+// CHECK-ERROR: uqshl d0, b1, d0
+// CHECK-ERROR: ^
//----------------------------------------------------------------------
// Scalar Integer Rouding Shift Left (Signed, Unsigned)
diff --git a/test/MC/AArch64/neon-scalar-shift-imm.s b/test/MC/AArch64/neon-scalar-shift-imm.s
new file mode 100644
index 0000000000..96cb815eaf
--- /dev/null
+++ b/test/MC/AArch64/neon-scalar-shift-imm.s
@@ -0,0 +1,186 @@
+// RUN: llvm-mc -triple aarch64-none-linux-gnu -mattr=+neon -show-encoding < %s | FileCheck %s
+
+// Check that the assembler can handle the documented syntax for AArch64
+
+//----------------------------------------------------------------------
+// Scalar Signed Shift Right (Immediate)
+//----------------------------------------------------------------------
+ sshr d15, d16, #12
+
+// CHECK: sshr d15, d16, #12 // encoding: [0x0f,0x06,0x74,0x5f]
+
+//----------------------------------------------------------------------
+// Scalar Unsigned Shift Right (Immediate)
+//----------------------------------------------------------------------
+ ushr d10, d17, #18
+
+// CHECK: ushr d10, d17, #18 // encoding: [0x2a,0x06,0x6e,0x7f]
+
+//----------------------------------------------------------------------
+// Scalar Signed Rounding Shift Right (Immediate)
+//----------------------------------------------------------------------
+ srshr d19, d18, #7
+
+// CHECK: srshr d19, d18, #7 // encoding: [0x53,0x26,0x79,0x5f]
+
+//----------------------------------------------------------------------
+// Scalar Unigned Rounding Shift Right (Immediate)
+//----------------------------------------------------------------------
+ urshr d20, d23, #31
+
+// CHECK: urshr d20, d23, #31 // encoding: [0xf4,0x26,0x61,0x7f]
+
+//----------------------------------------------------------------------
+// Scalar Signed Shift Right and Accumulate (Immediate)
+//----------------------------------------------------------------------
+ ssra d18, d12, #21
+
+// CHECK: ssra d18, d12, #21 // encoding: [0x92,0x15,0x6b,0x5f]
+
+//----------------------------------------------------------------------
+// Scalar Unsigned Shift Right and Accumulate (Immediate)
+//----------------------------------------------------------------------
+ usra d20, d13, #61
+
+// CHECK: usra d20, d13, #61 // encoding: [0xb4,0x15,0x43,0x7f]
+
+//----------------------------------------------------------------------
+// Scalar Signed Rounding Shift Right and Accumulate (Immediate)
+//----------------------------------------------------------------------
+ srsra d15, d11, #19
+
+// CHECK: srsra d15, d11, #19 // encoding: [0x6f,0x35,0x6d,0x5f]
+
+//----------------------------------------------------------------------
+// Scalar Unsigned Rounding Shift Right and Accumulate (Immediate)
+//----------------------------------------------------------------------
+ ursra d18, d10, #13
+
+// CHECK: ursra d18, d10, #13 // encoding: [0x52,0x35,0x73,0x7f]
+
+//----------------------------------------------------------------------
+// Scalar Shift Left (Immediate)
+//----------------------------------------------------------------------
+ shl d7, d10, #12
+
+// CHECK: shl d7, d10, #12 // encoding: [0x47,0x55,0x4c,0x5f]
+
+//----------------------------------------------------------------------
+// Signed Saturating Shift Left (Immediate)
+//----------------------------------------------------------------------
+ sqshl b11, b19, #7
+ sqshl h13, h18, #11
+ sqshl s14, s17, #22
+ sqshl d15, d16, #51
+
+// CHECK: sqshl b11, b19, #7 // encoding: [0x6b,0x76,0x0f,0x5f]
+// CHECK: sqshl h13, h18, #11 // encoding: [0x4d,0x76,0x1b,0x5f]
+// CHECK: sqshl s14, s17, #22 // encoding: [0x2e,0x76,0x36,0x5f]
+// CHECK: sqshl d15, d16, #51 // encoding: [0x0f,0x76,0x73,0x5f]
+
+//----------------------------------------------------------------------
+// Unsigned Saturating Shift Left (Immediate)
+//----------------------------------------------------------------------
+ uqshl b18, b15, #6
+ uqshl h11, h18, #7
+ uqshl s14, s19, #18
+ uqshl d15, d12, #19
+
+// CHECK: uqshl b18, b15, #6 // encoding: [0xf2,0x75,0x0e,0x7f]
+// CHECK: uqshl h11, h18, #7 // encoding: [0x4b,0x76,0x17,0x7f]
+// CHECK: uqshl s14, s19, #18 // encoding: [0x6e,0x76,0x32,0x7f]
+// CHECK: uqshl d15, d12, #19 // encoding: [0x8f,0x75,0x53,0x7f]
+
+//----------------------------------------------------------------------
+// Signed Saturating Shift Left Unsigned (Immediate)
+//----------------------------------------------------------------------
+ sqshlu b15, b18, #6
+ sqshlu h19, h17, #6
+ sqshlu s16, s14, #25
+ sqshlu d11, d13, #32
+
+// CHECK: sqshlu b15, b18, #6 // encoding: [0x4f,0x66,0x0e,0x7f]
+// CHECK: sqshlu h19, h17, #6 // encoding: [0x33,0x66,0x16,0x7f]
+// CHECK: sqshlu s16, s14, #25 // encoding: [0xd0,0x65,0x39,0x7f]
+// CHECK: sqshlu d11, d13, #32 // encoding: [0xab,0x65,0x60,0x7f]
+
+//----------------------------------------------------------------------
+// Shift Right And Insert (Immediate)
+//----------------------------------------------------------------------
+ sri d10, d12, #14
+
+// CHECK: sri d10, d12, #14 // encoding: [0x8a,0x45,0x72,0x7f]
+
+//----------------------------------------------------------------------
+// Shift Left And Insert (Immediate)
+//----------------------------------------------------------------------
+ sli d10, d14, #12
+
+// CHECK: sli d10, d14, #12 // encoding: [0xca,0x55,0x4c,0x7f]
+
+//----------------------------------------------------------------------
+// Signed Saturating Shift Right Narrow (Immediate)
+//----------------------------------------------------------------------
+ sqshrn b10, h15, #5
+ sqshrn h17, s10, #4
+ sqshrn s18, d10, #31
+
+// CHECK: sqshrn b10, h15, #5 // encoding: [0xea,0x95,0x0b,0x5f]
+// CHECK: sqshrn h17, s10, #4 // encoding: [0x51,0x95,0x1c,0x5f]
+// CHECK: sqshrn s18, d10, #31 // encoding: [0x52,0x95,0x21,0x5f]
+
+//----------------------------------------------------------------------
+// Unsigned Saturating Shift Right Narrow (Immediate)
+//----------------------------------------------------------------------
+ uqshrn b12, h10, #7
+ uqshrn h10, s14, #5
+ uqshrn s10, d12, #13
+
+// CHECK: uqshrn b12, h10, #7 // encoding: [0x4c,0x95,0x09,0x7f]
+// CHECK: uqshrn h10, s14, #5 // encoding: [0xca,0x95,0x1b,0x7f]
+// CHECK: uqshrn s10, d12, #13 // encoding: [0x8a,0x95,0x33,0x7f]
+
+//----------------------------------------------------------------------
+// Signed Saturating Rounded Shift Right Narrow (Immediate)
+//----------------------------------------------------------------------
+ sqrshrn b10, h13, #2
+ sqrshrn h15, s10, #6
+ sqrshrn s15, d12, #9
+
+// CHECK: sqrshrn b10, h13, #2 // encoding: [0xaa,0x9d,0x0e,0x5f]
+// CHECK: sqrshrn h15, s10, #6 // encoding: [0x4f,0x9d,0x1a,0x5f]
+// CHECK: sqrshrn s15, d12, #9 // encoding: [0x8f,0x9d,0x37,0x5f]
+
+//----------------------------------------------------------------------
+// Unsigned Saturating Rounded Shift Right Narrow (Immediate)
+//----------------------------------------------------------------------
+ uqrshrn b10, h12, #5
+ uqrshrn h12, s10, #14
+ uqrshrn s10, d10, #25
+
+// CHECK: uqrshrn b10, h12, #5 // encoding: [0x8a,0x9d,0x0b,0x7f]
+// CHECK: uqrshrn h12, s10, #14 // encoding: [0x4c,0x9d,0x12,0x7f]
+// CHECK: uqrshrn s10, d10, #25 // encoding: [0x4a,0x9d,0x27,0x7f]
+
+//----------------------------------------------------------------------
+// Signed Saturating Shift Right Unsigned Narrow (Immediate)
+//----------------------------------------------------------------------
+ sqshrun b15, h10, #7
+ sqshrun h20, s14, #3
+ sqshrun s10, d15, #15
+
+// CHECK: sqshrun b15, h10, #7 // encoding: [0x4f,0x85,0x09,0x7f]
+// CHECK: sqshrun h20, s14, #3 // encoding: [0xd4,0x85,0x1d,0x7f]
+// CHECK: sqshrun s10, d15, #15 // encoding: [0xea,0x85,0x31,0x7f]
+
+//----------------------------------------------------------------------
+// Signed Saturating Rounded Shift Right Unsigned Narrow (Immediate)
+//----------------------------------------------------------------------
+
+ sqrshrun b17, h10, #6
+ sqrshrun h10, s13, #15
+ sqrshrun s22, d16, #31
+
+// CHECK: sqrshrun b17, h10, #6 // encoding: [0x51,0x8d,0x0a,0x7f]
+// CHECK: sqrshrun h10, s13, #15 // encoding: [0xaa,0x8d,0x11,0x7f]
+// CHECK: sqrshrun s22, d16, #31 // encoding: [0x16,0x8e,0x21,0x7f]
diff --git a/test/MC/Disassembler/AArch64/neon-instructions.txt b/test/MC/Disassembler/AArch64/neon-instructions.txt
index 2627b14403..c63b65a280 100644
--- a/test/MC/Disassembler/AArch64/neon-instructions.txt
+++ b/test/MC/Disassembler/AArch64/neon-instructions.txt
@@ -1793,3 +1793,165 @@
0x52,0x4a,0x21,0x7e
0x34,0x4a,0x61,0x7e
0xd3,0x49,0xa1,0x7e
+
+#----------------------------------------------------------------------
+# Scalar Signed Shift Right (Immediate)
+#----------------------------------------------------------------------
+# CHECK: sshr d15, d16, #12
+0x0f,0x06,0x74,0x5f
+
+#----------------------------------------------------------------------
+# Scalar Unsigned Shift Right (Immediate)
+#----------------------------------------------------------------------
+# CHECK: ushr d10, d17, #18
+0x2a,0x06,0x6e,0x7f
+
+#----------------------------------------------------------------------
+# Scalar Signed Rounding Shift Right (Immediate)
+#----------------------------------------------------------------------
+# CHECK: srshr d19, d18, #7
+0x53,0x26,0x79,0x5f
+
+#----------------------------------------------------------------------
+# Scalar Unigned Rounding Shift Right (Immediate)
+#----------------------------------------------------------------------
+# CHECK: urshr d20, d23, #31
+0xf4,0x26,0x61,0x7f
+
+#----------------------------------------------------------------------
+# Scalar Signed Shift Right and Accumulate (Immediate)
+#----------------------------------------------------------------------
+# CHECK: ssra d18, d12, #21
+0x92,0x15,0x6b,0x5f
+
+#----------------------------------------------------------------------
+# Scalar Unsigned Shift Right and Accumulate (Immediate)
+#----------------------------------------------------------------------
+# CHECK: usra d20, d13, #61
+0xb4,0x15,0x43,0x7f
+
+#----------------------------------------------------------------------
+# Scalar Signed Rounding Shift Right and Accumulate (Immediate)
+#----------------------------------------------------------------------
+# CHECK: srsra d15, d11, #19
+0x6f,0x35,0x6d,0x5f
+
+#----------------------------------------------------------------------
+# Scalar Unsigned Rounding Shift Right and Accumulate (Immediate)
+#----------------------------------------------------------------------
+# CHECK: ursra d18, d10, #13
+0x52,0x35,0x73,0x7f
+
+#----------------------------------------------------------------------
+# Scalar Shift Left (Immediate)
+#----------------------------------------------------------------------
+# CHECK: shl d7, d10, #12
+0x47,0x55,0x4c,0x5f
+
+#----------------------------------------------------------------------
+# Signed Saturating Shift Left (Immediate)
+#----------------------------------------------------------------------
+# CHECK: sqshl b11, b19, #7
+# CHECK: sqshl h13, h18, #11
+# CHECK: sqshl s14, s17, #22
+# CHECK: sqshl d15, d16, #51
+0x6b,0x76,0x0f,0x5f
+0x4d,0x76,0x1b,0x5f
+0x2e,0x76,0x36,0x5f
+0x0f,0x76,0x73,0x5f
+
+#----------------------------------------------------------------------
+# Unsigned Saturating Shift Left (Immediate)
+#----------------------------------------------------------------------
+# CHECK: uqshl b18, b15, #6
+# CHECK: uqshl h11, h18, #7
+# CHECK: uqshl s14, s19, #18
+# CHECK: uqshl d15, d12, #19
+0xf2,0x75,0x0e,0x7f
+0x4b,0x76,0x17,0x7f
+0x6e,0x76,0x32,0x7f
+0x8f,0x75,0x53,0x7f
+
+#----------------------------------------------------------------------
+# Signed Saturating Shift Left Unsigned (Immediate)
+#----------------------------------------------------------------------
+# CHECK: sqshlu b15, b18, #6
+# CHECK: sqshlu h19, h17, #6
+# CHECK: sqshlu s16, s14, #25
+# CHECK: sqshlu d11, d13, #32
+0x4f,0x66,0x0e,0x7f
+0x33,0x66,0x16,0x7f
+0xd0,0x65,0x39,0x7f
+0xab,0x65,0x60,0x7f
+
+#----------------------------------------------------------------------
+# Shift Right And Insert (Immediate)
+#----------------------------------------------------------------------
+# CHECK: sri d10, d12, #14
+0x8a,0x45,0x72,0x7f
+
+#----------------------------------------------------------------------
+# Shift Left And Insert (Immediate)
+#----------------------------------------------------------------------
+# CHECK: sli d10, d14, #12
+0xca,0x55,0x4c,0x7f
+
+#----------------------------------------------------------------------
+# Signed Saturating Shift Right Narrow (Immediate)
+#----------------------------------------------------------------------
+# CHECK: sqshrn b10, h15, #5
+# CHECK: sqshrn h17, s10, #4
+# CHECK: sqshrn s18, d10, #31
+0xea,0x95,0x0b,0x5f
+0x51,0x95,0x1c,0x5f
+0x52,0x95,0x21,0x5f
+
+#----------------------------------------------------------------------
+# Unsigned Saturating Shift Right Narrow (Immediate)
+#----------------------------------------------------------------------
+# CHECK: uqshrn b12, h10, #7
+# CHECK: uqshrn h10, s14, #5
+# CHECK: uqshrn s10, d12, #13
+0x4c,0x95,0x09,0x7f
+0xca,0x95,0x1b,0x7f
+0x8a,0x95,0x33,0x7f
+
+#----------------------------------------------------------------------
+# Signed Saturating Rounded Shift Right Narrow (Immediate)
+#----------------------------------------------------------------------
+# CHECK: sqrshrn b10, h13, #2
+# CHECK: sqrshrn h15, s10, #6
+# CHECK: sqrshrn s15, d12, #9
+0xaa,0x9d,0x0e,0x5f
+0x4f,0x9d,0x1a,0x5f
+0x8f,0x9d,0x37,0x5f
+
+#----------------------------------------------------------------------
+# Unsigned Saturating Rounded Shift Right Narrow (Immediate)
+#----------------------------------------------------------------------
+# CHECK: uqrshrn b10, h12, #5
+# CHECK: uqrshrn h12, s10, #14
+# CHECK: uqrshrn s10, d10, #25
+0x8a,0x9d,0x0b,0x7f
+0x4c,0x9d,0x12,0x7f
+0x4a,0x9d,0x27,0x7f
+
+#----------------------------------------------------------------------
+# Signed Saturating Shift Right Unsigned Narrow (Immediate)
+#----------------------------------------------------------------------
+# CHECK: sqshrun b15, h10, #7
+# CHECK: sqshrun h20, s14, #3
+# CHECK: sqshrun s10, d15, #15
+0x4f,0x85,0x09,0x7f
+0xd4,0x85,0x1d,0x7f
+0xea,0x85,0x31,0x7f
+
+#----------------------------------------------------------------------
+# Signed Saturating Rounded Shift Right Unsigned Narrow (Immediate)
+#----------------------------------------------------------------------
+# CHECK: sqrshrun b17, h10, #6
+# CHECK: sqrshrun h10, s13, #15
+# CHECK: sqrshrun s22, d16, #31
+0x51,0x8d,0x0a,0x7f
+0xaa,0x8d,0x11,0x7f
+0x16,0x8e,0x21,0x7f