diff options
author | Craig Topper <craig.topper@gmail.com> | 2013-10-07 05:42:48 +0000 |
---|---|---|
committer | Craig Topper <craig.topper@gmail.com> | 2013-10-07 05:42:48 +0000 |
commit | 8fdba75d5b865246455c335adf439def1c3daaeb (patch) | |
tree | 10367cba7f90d1814f80a8513b9bc04724166e27 /test/MC | |
parent | 36a9b31b981553350f5cc4adad9917656c20e96e (diff) | |
download | llvm-8fdba75d5b865246455c335adf439def1c3daaeb.tar.gz llvm-8fdba75d5b865246455c335adf439def1c3daaeb.tar.bz2 llvm-8fdba75d5b865246455c335adf439def1c3daaeb.tar.xz |
Teach X86 asm parser that VMOVAPSrr and other VEX-encoded register to register moves should be switched from using the MRMSrcReg form to the MRMDestReg form if the source register is a 64-bit extended register and the destination register is not.
This allows the instruction to be encoded using the 2-byte VEX form instead of the 3-byte VEX form. The GNU assembler has similar behavior and instruction selection already does this.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@192088 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/MC')
-rw-r--r-- | test/MC/X86/x86_64-avx-encoding.s | 56 |
1 files changed, 56 insertions, 0 deletions
diff --git a/test/MC/X86/x86_64-avx-encoding.s b/test/MC/X86/x86_64-avx-encoding.s index 6da9e21fef..1a4e784451 100644 --- a/test/MC/X86/x86_64-avx-encoding.s +++ b/test/MC/X86/x86_64-avx-encoding.s @@ -4185,3 +4185,59 @@ _foo2: // CHECK: vpgatherqd %xmm8, (%r15,%ymm9,2), %xmm10 // CHECK: encoding: [0xc4,0x02,0x3d,0x91,0x14,0x4f] vpgatherqd %xmm8, (%r15,%ymm9,2), %xmm10 + +// CHECK: vmovaps %xmm0, %xmm8 +// CHECK: encoding: [0xc5,0x78,0x28,0xc0] + vmovaps %xmm0, %xmm8 + +// CHECK: vmovaps %xmm8, %xmm0 +// CHECK: encoding: [0xc5,0x78,0x29,0xc0] + vmovaps %xmm8, %xmm0 + +// CHECK: vmovaps %ymm0, %ymm8 +// CHECK: encoding: [0xc5,0x7c,0x28,0xc0] + vmovaps %ymm0, %ymm8 + +// CHECK: vmovaps %ymm8, %ymm0 +// CHECK: encoding: [0xc5,0x7c,0x29,0xc0] + vmovaps %ymm8, %ymm0 + +// CHECK: vmovups %xmm0, %xmm8 +// CHECK: encoding: [0xc5,0x78,0x10,0xc0] + vmovups %xmm0, %xmm8 + +// CHECK: vmovups %xmm8, %xmm0 +// CHECK: encoding: [0xc5,0x78,0x11,0xc0] + vmovups %xmm8, %xmm0 + +// CHECK: vmovups %ymm0, %ymm8 +// CHECK: encoding: [0xc5,0x7c,0x10,0xc0] + vmovups %ymm0, %ymm8 + +// CHECK: vmovups %ymm8, %ymm0 +// CHECK: encoding: [0xc5,0x7c,0x11,0xc0] + vmovups %ymm8, %ymm0 + +// CHECK: vmovss %xmm0, %xmm0, %xmm8 +// CHECK: encoding: [0xc5,0x7a,0x10,0xc0] + vmovss %xmm0, %xmm0, %xmm8 + +// CHECK: vmovss %xmm0, %xmm8, %xmm0 +// CHECK: encoding: [0xc5,0xba,0x10,0xc0] + vmovss %xmm0, %xmm8, %xmm0 + +// CHECK: vmovss %xmm8, %xmm0, %xmm0 +// CHECK: encoding: [0xc5,0x7a,0x11,0xc0] + vmovss %xmm8, %xmm0, %xmm0 + +// CHECK: vmovsd %xmm0, %xmm0, %xmm8 +// CHECK: encoding: [0xc5,0x7b,0x10,0xc0] + vmovsd %xmm0, %xmm0, %xmm8 + +// CHECK: vmovsd %xmm0, %xmm8, %xmm0 +// CHECK: encoding: [0xc5,0xbb,0x10,0xc0] + vmovsd %xmm0, %xmm8, %xmm0 + +// CHECK: vmovsd %xmm8, %xmm0, %xmm0 +// CHECK: encoding: [0xc5,0x7b,0x11,0xc0] + vmovsd %xmm8, %xmm0, %xmm0 |