From 1b1fdc059968238af92a84bfa85f7b08093e8c32 Mon Sep 17 00:00:00 2001 From: Chad Rosier Date: Tue, 20 Mar 2012 21:43:40 +0000 Subject: [avx] Add patterns for combining vextractf128 + vmovaps/vmovups/vmobdqu to vextractf128 with 128-bit mem dest. Combines vextractf128 $0, %ymm0, %xmm0 vmovaps %xmm0, (%rdi) to vextractf128 $0, %ymm0, (%rdi) rdar://11082570 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@153139 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrSSE.td | 17 +++++++ test/CodeGen/X86/avx-vextractf128.ll | 89 ++++++++++++++++++++++++++++++++++++ 2 files changed, 106 insertions(+) diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 02205bb2c7..df426279ea 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -7309,6 +7309,23 @@ def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs), []>, VEX; } +// Extract and store. +let Predicates = [HasAVX] in { + def : Pat<(alignedstore (int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2), addr:$dst), + (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>; + def : Pat<(alignedstore (int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2), addr:$dst), + (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>; + def : Pat<(alignedstore (int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2), addr:$dst), + (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>; + + def : Pat<(int_x86_sse_storeu_ps addr:$dst, (int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2)), + (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>; + def : Pat<(int_x86_sse2_storeu_pd addr:$dst, (int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2)), + (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>; + def : Pat<(int_x86_sse2_storeu_dq addr:$dst, (bc_v16i8 (int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2))), + (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>; +} + // AVX1 patterns let Predicates = [HasAVX] in { def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2), diff --git a/test/CodeGen/X86/avx-vextractf128.ll b/test/CodeGen/X86/avx-vextractf128.ll index 6d3fbfd011..fe0f6caed3 100644 --- a/test/CodeGen/X86/avx-vextractf128.ll +++ b/test/CodeGen/X86/avx-vextractf128.ll @@ -1,5 +1,6 @@ ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s +; CHECK: @A ; CHECK-NOT: vunpck ; CHECK: vextractf128 $1 define <8 x float> @A(<8 x float> %a) nounwind uwtable readnone ssp { @@ -8,6 +9,7 @@ entry: ret <8 x float> %shuffle } +; CHECK: @B ; CHECK-NOT: vunpck ; CHECK: vextractf128 $1 define <4 x double> @B(<4 x double> %a) nounwind uwtable readnone ssp { @@ -15,3 +17,90 @@ entry: %shuffle = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> ret <4 x double> %shuffle } + +; CHECK: @t0 +; CHECK-NOT: vextractf128 $0, %ymm0, %xmm0 +; CHECK-NOT: vmovaps %xmm0, (%rdi) +; CHECK: vextractf128 $0, %ymm0, (%rdi) +define void @t0(float* nocapture %addr, <8 x float> %a) nounwind uwtable ssp { +entry: + %0 = tail call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %a, i8 0) + %1 = bitcast float* %addr to <4 x float>* + store <4 x float> %0, <4 x float>* %1, align 16 + ret void +} + +declare <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float>, i8) nounwind readnone + +; CHECK: @t1 +; CHECK-NOT: vextractf128 $0, %ymm0, %xmm0 +; CHECK-NOT: vmovups %xmm0, (%rdi) +; CHECK: vextractf128 $0, %ymm0, (%rdi) +define void @t1(float* %addr, <8 x float> %a) nounwind uwtable ssp { +entry: + %0 = tail call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %a, i8 0) + %1 = bitcast float* %addr to i8* + tail call void @llvm.x86.sse.storeu.ps(i8* %1, <4 x float> %0) + ret void +} + +declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind + +; CHECK: @t2 +; CHECK-NOT: vextractf128 $0, %ymm0, %xmm0 +; CHECK-NOT: vmovaps %xmm0, (%rdi) +; CHECK: vextractf128 $0, %ymm0, (%rdi) +define void @t2(double* nocapture %addr, <4 x double> %a) nounwind uwtable ssp { +entry: + %0 = tail call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a, i8 0) + %1 = bitcast double* %addr to <2 x double>* + store <2 x double> %0, <2 x double>* %1, align 16 + ret void +} + +declare <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double>, i8) nounwind readnone + +; CHECK: @t3 +; CHECK-NOT: vextractf128 $0, %ymm0, %xmm0 +; CHECK-NOT: vmovups %xmm0, (%rdi) +; CHECK: vextractf128 $0, %ymm0, (%rdi) +define void @t3(double* %addr, <4 x double> %a) nounwind uwtable ssp { +entry: + %0 = tail call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a, i8 0) + %1 = bitcast double* %addr to i8* + tail call void @llvm.x86.sse2.storeu.pd(i8* %1, <2 x double> %0) + ret void +} + +declare void @llvm.x86.sse2.storeu.pd(i8*, <2 x double>) nounwind + +; CHECK: @t4 +; CHECK-NOT: vextractf128 $0, %ymm0, %xmm0 +; CHECK-NOT: vmovaps %xmm0, (%rdi) +; CHECK: vextractf128 $0, %ymm0, (%rdi) +define void @t4(<2 x i64>* nocapture %addr, <4 x i64> %a) nounwind uwtable ssp { +entry: + %0 = bitcast <4 x i64> %a to <8 x i32> + %1 = tail call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %0, i8 0) + %2 = bitcast <4 x i32> %1 to <2 x i64> + store <2 x i64> %2, <2 x i64>* %addr, align 16 + ret void +} + +declare <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32>, i8) nounwind readnone + +; CHECK: @t5 +; CHECK-NOT: vextractf128 $0, %ymm0, %xmm0 +; CHECK-NOT: vmovdqu %xmm0, (%rdi) +; CHECK: vextractf128 $0, %ymm0, (%rdi) +define void @t5(<2 x i64>* %addr, <4 x i64> %a) nounwind uwtable ssp { +entry: + %0 = bitcast <4 x i64> %a to <8 x i32> + %1 = tail call <4 x i32> @llvm.x86.avx.vextractf128.si.256(<8 x i32> %0, i8 0) + %2 = bitcast <2 x i64>* %addr to i8* + %3 = bitcast <4 x i32> %1 to <16 x i8> + tail call void @llvm.x86.sse2.storeu.dq(i8* %2, <16 x i8> %3) + ret void +} + +declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind -- cgit v1.2.3