From 7f76cb6666194d7269bbd6ee0966eacc709dd10a Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Thu, 26 Jul 2012 07:48:28 +0000 Subject: Make l/q suffixes on AVX forms of scalar convert instructions consistent with their non-AVX forms. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@160775 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/MC/Disassembler/X86/simple-tests.txt | 8 +++---- test/MC/Disassembler/X86/x86-32.txt | 8 +++---- test/MC/X86/x86-32-avx.s | 16 ++++++------- test/MC/X86/x86_64-avx-encoding.s | 40 +++++++++++++++---------------- 4 files changed, 36 insertions(+), 36 deletions(-) (limited to 'test/MC') diff --git a/test/MC/Disassembler/X86/simple-tests.txt b/test/MC/Disassembler/X86/simple-tests.txt index 15e046feaf..672d239243 100644 --- a/test/MC/Disassembler/X86/simple-tests.txt +++ b/test/MC/Disassembler/X86/simple-tests.txt @@ -123,10 +123,10 @@ # CHECK: vcvtss2sil %xmm0, %eax 0xc5 0xfa 0x2d 0xc0 -# CHECK: vcvtsd2si %xmm0, %eax +# CHECK: vcvtsd2sil %xmm0, %eax 0xc5 0xfb 0x2d 0xc0 -# CHECK: vcvtsd2si %xmm0, %rax +# CHECK: vcvtsd2siq %xmm0, %rax 0xc4 0xe1 0xfb 0x2d 0xc0 # CHECK: vmaskmovpd %xmm0, %xmm1, (%rax) @@ -437,10 +437,10 @@ # CHECK: vroundsd $0, %xmm0, %xmm0, %xmm0 0xc4 0xe3 0x7d 0x0b 0xc0 0x00 -# CHECK: vcvtsd2si %xmm0, %eax +# CHECK: vcvtsd2sil %xmm0, %eax 0xc4 0xe1 0x7f 0x2d 0xc0 -# CHECK: vcvtsd2si %xmm0, %rax +# CHECK: vcvtsd2siq %xmm0, %rax 0xc4 0xe1 0xff 0x2d 0xc0 # CHECK: vucomisd %xmm1, %xmm0 diff --git a/test/MC/Disassembler/X86/x86-32.txt b/test/MC/Disassembler/X86/x86-32.txt index 3ec55f9689..899657b0d4 100644 --- a/test/MC/Disassembler/X86/x86-32.txt +++ b/test/MC/Disassembler/X86/x86-32.txt @@ -159,10 +159,10 @@ # CHECK: vcvtss2sil %xmm0, %eax 0xc5 0xfa 0x2d 0xc0 -# CHECK: vcvtsd2si %xmm0, %eax +# CHECK: vcvtsd2sil %xmm0, %eax 0xc5 0xfb 0x2d 0xc0 -# CHECK: vcvtsd2si %xmm0, %eax +# CHECK: vcvtsd2sil %xmm0, %eax 0xc4 0xe1 0x7b 0x2d 0xc0 # CHECK: vmaskmovpd %xmm0, %xmm1, (%eax) @@ -460,10 +460,10 @@ # CHECK: vroundsd $0, %xmm0, %xmm0, %xmm0 0xc4 0xe3 0x7d 0x0b 0xc0 0x00 -# CHECK: vcvtsd2si %xmm0, %eax +# CHECK: vcvtsd2sil %xmm0, %eax 0xc4 0xe1 0x7f 0x2d 0xc0 -# CHECK: vcvtsd2si %xmm0, %eax +# CHECK: vcvtsd2sil %xmm0, %eax 0xc4 0xe1 0xff 0x2d 0xc0 # CHECK: vucomisd %xmm1, %xmm0 diff --git a/test/MC/X86/x86-32-avx.s b/test/MC/X86/x86-32-avx.s index 9a7a50687e..586f3fe73c 100644 --- a/test/MC/X86/x86-32-avx.s +++ b/test/MC/X86/x86-32-avx.s @@ -3103,21 +3103,21 @@ // CHECK: encoding: [0xc5,0xf8,0x77] vzeroupper -// CHECK: vcvtsd2si %xmm4, %ecx +// CHECK: vcvtsd2sil %xmm4, %ecx // CHECK: encoding: [0xc5,0xfb,0x2d,0xcc] - vcvtsd2si %xmm4, %ecx + vcvtsd2sil %xmm4, %ecx -// CHECK: vcvtsd2si (%ecx), %ecx +// CHECK: vcvtsd2sil (%ecx), %ecx // CHECK: encoding: [0xc5,0xfb,0x2d,0x09] - vcvtsd2si (%ecx), %ecx + vcvtsd2sil (%ecx), %ecx -// CHECK: vcvtsi2sdl (%ebp), %xmm0, %xmm7 +// CHECK: vcvtsi2sd (%ebp), %xmm0, %xmm7 // CHECK: encoding: [0xc5,0xfb,0x2a,0x7d,0x00] - vcvtsi2sdl (%ebp), %xmm0, %xmm7 + vcvtsi2sd (%ebp), %xmm0, %xmm7 -// CHECK: vcvtsi2sdl (%esp), %xmm0, %xmm7 +// CHECK: vcvtsi2sd (%esp), %xmm0, %xmm7 // CHECK: encoding: [0xc5,0xfb,0x2a,0x3c,0x24] - vcvtsi2sdl (%esp), %xmm0, %xmm7 + vcvtsi2sd (%esp), %xmm0, %xmm7 // CHECK: vlddqu (%eax), %ymm2 // CHECK: encoding: [0xc5,0xff,0xf0,0x10] diff --git a/test/MC/X86/x86_64-avx-encoding.s b/test/MC/X86/x86_64-avx-encoding.s index 930e33b3c6..46ff9ead39 100644 --- a/test/MC/X86/x86_64-avx-encoding.s +++ b/test/MC/X86/x86_64-avx-encoding.s @@ -3860,29 +3860,29 @@ vdivpd -4(%rcx,%rbx,8), %xmm10, %xmm11 // CHECK: encoding: [0xc4,0x63,0x2d,0x06,0x18,0x07] vperm2f128 $7, (%rax), %ymm10, %ymm11 -// CHECK: vcvtsd2si %xmm8, %r8d +// CHECK: vcvtsd2sil %xmm8, %r8d // CHECK: encoding: [0xc4,0x41,0x7b,0x2d,0xc0] - vcvtsd2si %xmm8, %r8d + vcvtsd2sil %xmm8, %r8d -// CHECK: vcvtsd2si (%rcx), %ecx +// CHECK: vcvtsd2sil (%rcx), %ecx // CHECK: encoding: [0xc5,0xfb,0x2d,0x09] - vcvtsd2si (%rcx), %ecx + vcvtsd2sil (%rcx), %ecx -// CHECK: vcvtss2si %xmm4, %rcx +// CHECK: vcvtss2siq %xmm4, %rcx // CHECK: encoding: [0xc4,0xe1,0xfa,0x2d,0xcc] - vcvtss2si %xmm4, %rcx + vcvtss2siq %xmm4, %rcx -// CHECK: vcvtss2si (%rcx), %r8 +// CHECK: vcvtss2siq (%rcx), %r8 // CHECK: encoding: [0xc4,0x61,0xfa,0x2d,0x01] - vcvtss2si (%rcx), %r8 + vcvtss2siq (%rcx), %r8 -// CHECK: vcvtsi2sdl %r8d, %xmm8, %xmm15 +// CHECK: vcvtsi2sd %r8d, %xmm8, %xmm15 // CHECK: encoding: [0xc4,0x41,0x3b,0x2a,0xf8] - vcvtsi2sdl %r8d, %xmm8, %xmm15 + vcvtsi2sd %r8d, %xmm8, %xmm15 -// CHECK: vcvtsi2sdl (%rbp), %xmm8, %xmm15 +// CHECK: vcvtsi2sd (%rbp), %xmm8, %xmm15 // CHECK: encoding: [0xc5,0x3b,0x2a,0x7d,0x00] - vcvtsi2sdl (%rbp), %xmm8, %xmm15 + vcvtsi2sd (%rbp), %xmm8, %xmm15 // CHECK: vcvtsi2sdq %rcx, %xmm4, %xmm6 // CHECK: encoding: [0xc4,0xe1,0xdb,0x2a,0xf1] @@ -3900,21 +3900,21 @@ vdivpd -4(%rcx,%rbx,8), %xmm10, %xmm11 // CHECK: encoding: [0xc4,0xe1,0xda,0x2a,0x31] vcvtsi2ssq (%rcx), %xmm4, %xmm6 -// CHECK: vcvttsd2si %xmm4, %rcx +// CHECK: vcvttsd2siq %xmm4, %rcx // CHECK: encoding: [0xc4,0xe1,0xfb,0x2c,0xcc] - vcvttsd2si %xmm4, %rcx + vcvttsd2siq %xmm4, %rcx -// CHECK: vcvttsd2si (%rcx), %rcx +// CHECK: vcvttsd2siq (%rcx), %rcx // CHECK: encoding: [0xc4,0xe1,0xfb,0x2c,0x09] - vcvttsd2si (%rcx), %rcx + vcvttsd2siq (%rcx), %rcx -// CHECK: vcvttss2si %xmm4, %rcx +// CHECK: vcvttss2siq %xmm4, %rcx // CHECK: encoding: [0xc4,0xe1,0xfa,0x2c,0xcc] - vcvttss2si %xmm4, %rcx + vcvttss2siq %xmm4, %rcx -// CHECK: vcvttss2si (%rcx), %rcx +// CHECK: vcvttss2siq (%rcx), %rcx // CHECK: encoding: [0xc4,0xe1,0xfa,0x2c,0x09] - vcvttss2si (%rcx), %rcx + vcvttss2siq (%rcx), %rcx // CHECK: vlddqu (%rax), %ymm12 // CHECK: encoding: [0xc5,0x7f,0xf0,0x20] -- cgit v1.2.3