From a9a0e06cbd9d8e891c3d97a901fefe2654d4610b Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Wed, 24 Feb 2016 15:14:21 +0000 Subject: [PATCH] [X86][SSE41] Combine vector blends with zero Part 2 of 2 This patch add support for combining target shuffles into blends-with-zero. Differential Revision: http://reviews.llvm.org/D17483 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@261745 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 58 +++++++++++++++++++ test/CodeGen/X86/insertelement-zero.ll | 21 +++---- test/CodeGen/X86/insertps-combine.ll | 6 +- .../X86/merge-consecutive-loads-256.ll | 4 +- test/CodeGen/X86/vec_insert-7.ll | 5 +- test/CodeGen/X86/vector-shuffle-128-v2.ll | 4 +- test/CodeGen/X86/vector-zext.ll | 8 +-- 7 files changed, 80 insertions(+), 26 deletions(-) diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index c7b4fa5ec6a7..3ffd6666f557 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -4332,6 +4332,17 @@ static bool isSequentialOrUndefInRange(ArrayRef Mask, return true; } +/// Return true if every element in Mask, beginning +/// from position Pos and ending in Pos+Size, falls within the specified +/// sequential range (Low, Low+Size], or is undef or is zero. +static bool isSequentialOrUndefOrZeroInRange(ArrayRef Mask, unsigned Pos, + unsigned Size, int Low) { + for (unsigned i = Pos, e = Pos + Size; i != e; ++i, ++Low) + if (!isUndefOrZero(Mask[i]) && Mask[i] != Low) + return false; + return true; +} + /// Return true if the specified EXTRACT_SUBVECTOR operand specifies a vector /// extract that is suitable for instruction that extract 128 or 256 bit vectors static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) { @@ -23666,6 +23677,53 @@ static bool combineX86ShuffleChain(SDValue Input, SDValue Root, return true; } + // Attempt to blend with zero. + if (VT.getVectorNumElements() <= 8 && + ((Subtarget.hasSSE41() && VT.is128BitVector()) || + (Subtarget.hasAVX() && VT.is256BitVector()))) { + // Convert VT to a type compatible with X86ISD::BLENDI. + // TODO - add 16i16 support (requires lane duplication). + MVT ShuffleVT = VT; + if (Subtarget.hasAVX2()) { + if (VT == MVT::v4i64) + ShuffleVT = MVT::v8i32; + else if (VT == MVT::v2i64) + ShuffleVT = MVT::v4i32; + } else { + if (VT == MVT::v2i64 || VT == MVT::v4i32) + ShuffleVT = MVT::v8i16; + else if (VT == MVT::v4i64) + ShuffleVT = MVT::v4f64; + else if (VT == MVT::v8i32) + ShuffleVT = MVT::v8f32; + } + + if (isSequentialOrUndefOrZeroInRange(Mask, /*Pos*/ 0, /*Size*/ Mask.size(), + /*Low*/ 0) && + Mask.size() <= ShuffleVT.getVectorNumElements()) { + unsigned BlendMask = 0; + unsigned ShuffleSize = ShuffleVT.getVectorNumElements(); + unsigned MaskRatio = ShuffleSize / Mask.size(); + + for (unsigned i = 0; i != ShuffleSize; ++i) + if (Mask[i / MaskRatio] < 0) + BlendMask |= 1u << i; + + if (Root.getOpcode() != X86ISD::BLENDI || + Root->getConstantOperandVal(2) != BlendMask) { + SDValue Zero = getZeroVector(ShuffleVT, Subtarget, DAG, DL); + Res = DAG.getBitcast(ShuffleVT, Input); + DCI.AddToWorklist(Res.getNode()); + Res = DAG.getNode(X86ISD::BLENDI, DL, ShuffleVT, Res, Zero, + DAG.getConstant(BlendMask, DL, MVT::i8)); + DCI.AddToWorklist(Res.getNode()); + DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res), + /*AddTo*/ true); + return true; + } + } + } + // Don't try to re-form single instruction chains under any circumstances now // that we've done encoding canonicalization for them. if (Depth < 2) diff --git a/test/CodeGen/X86/insertelement-zero.ll b/test/CodeGen/X86/insertelement-zero.ll index b574e791de05..65c1c0957adf 100644 --- a/test/CodeGen/X86/insertelement-zero.ll +++ b/test/CodeGen/X86/insertelement-zero.ll @@ -75,8 +75,7 @@ define <4 x double> @insert_v4f64_0zz3(<4 x double> %a) { ; AVX-LABEL: insert_v4f64_0zz3: ; AVX: # BB#0: ; AVX-NEXT: vxorpd %ymm1, %ymm1, %ymm1 -; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3] -; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3] +; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] ; AVX-NEXT: retq %1 = insertelement <4 x double> %a, double 0.0, i32 1 %2 = insertelement <4 x double> %1, double 0.0, i32 2 @@ -235,8 +234,7 @@ define <8 x float> @insert_v8f32_z12345z7(<8 x float> %a) { ; AVX-LABEL: insert_v8f32_z12345z7: ; AVX: # BB#0: ; AVX-NEXT: vxorps %ymm1, %ymm1, %ymm1 -; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7] -; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7] +; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7] ; AVX-NEXT: retq %1 = insertelement <8 x float> %a, float 0.0, i32 0 %2 = insertelement <8 x float> %1, float 0.0, i32 6 @@ -330,15 +328,13 @@ define <8 x i32> @insert_v8i32_z12345z7(<8 x i32> %a) { ; AVX1-LABEL: insert_v8i32_z12345z7: ; AVX1: # BB#0: ; AVX1-NEXT: vxorps %ymm1, %ymm1, %ymm1 -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7] -; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7] ; AVX1-NEXT: retq ; ; AVX2-LABEL: insert_v8i32_z12345z7: ; AVX2: # BB#0: ; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1 -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7] -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7] +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7] ; AVX2-NEXT: retq %1 = insertelement <8 x i32> %a, i32 0, i32 0 %2 = insertelement <8 x i32> %1, i32 0, i32 6 @@ -370,15 +366,13 @@ define <8 x i16> @insert_v8i16_z12345z7(<8 x i16> %a) { ; SSE41-LABEL: insert_v8i16_z12345z7: ; SSE41: # BB#0: ; SSE41-NEXT: pxor %xmm1, %xmm1 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7] -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6],xmm0[7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7] ; SSE41-NEXT: retq ; ; AVX-LABEL: insert_v8i16_z12345z7: ; AVX: # BB#0: ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7] -; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6],xmm0[7] +; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7] ; AVX-NEXT: retq %1 = insertelement <8 x i16> %a, i16 0, i32 0 %2 = insertelement <8 x i16> %1, i16 0, i32 6 @@ -413,8 +407,7 @@ define <16 x i16> @insert_v16i16_z12345z789ABZDEz(<16 x i16> %a) { ; SSE41-LABEL: insert_v16i16_z12345z789ABZDEz: ; SSE41: # BB#0: ; SSE41-NEXT: pxor %xmm2, %xmm2 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3,4,5,6,7] -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm2[6],xmm0[7] +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3,4,5],xmm2[6],xmm0[7] ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7] ; SSE41-NEXT: retq ; diff --git a/test/CodeGen/X86/insertps-combine.ll b/test/CodeGen/X86/insertps-combine.ll index 51d987bfdbda..3645c0fcf754 100644 --- a/test/CodeGen/X86/insertps-combine.ll +++ b/test/CodeGen/X86/insertps-combine.ll @@ -117,12 +117,14 @@ define <4 x float> @insertps_undef_input0(<4 x float> %a0, <4 x float> %a1) { define <4 x float> @insertps_undef_input1(<4 x float> %a0, <4 x float> %a1) { ; SSE-LABEL: insertps_undef_input1: ; SSE: # BB#0: -; SSE-NEXT: insertps {{.*#+}} xmm0 = zero,zero,zero,xmm0[3] +; SSE-NEXT: xorps %xmm1, %xmm1 +; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3] ; SSE-NEXT: retq ; ; AVX-LABEL: insertps_undef_input1: ; AVX: # BB#0: -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,zero,zero,xmm0[3] +; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3] ; AVX-NEXT: retq %res0 = fadd <4 x float> %a1, %res1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %res0, i8 21) diff --git a/test/CodeGen/X86/merge-consecutive-loads-256.ll b/test/CodeGen/X86/merge-consecutive-loads-256.ll index f627241f3fd5..aa55e86b33f6 100644 --- a/test/CodeGen/X86/merge-consecutive-loads-256.ll +++ b/test/CodeGen/X86/merge-consecutive-loads-256.ll @@ -278,8 +278,8 @@ define <8 x float> @merge_8f32_2f32_23z5(<2 x float>* %ptr) nounwind uwtable noi ; X32-AVX-LABEL: merge_8f32_2f32_23z5: ; X32-AVX: # BB#0: ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX-NEXT: vxorpd %ymm0, %ymm0, %ymm0 -; X32-AVX-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm0[2],mem[3] +; X32-AVX-NEXT: vxorps %ymm0, %ymm0, %ymm0 +; X32-AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5],mem[6,7] ; X32-AVX-NEXT: retl %ptr0 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 2 %ptr1 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 3 diff --git a/test/CodeGen/X86/vec_insert-7.ll b/test/CodeGen/X86/vec_insert-7.ll index 961e3f25434e..27187183d43a 100644 --- a/test/CodeGen/X86/vec_insert-7.ll +++ b/test/CodeGen/X86/vec_insert-7.ll @@ -12,8 +12,9 @@ define x86_mmx @mmx_movzl(x86_mmx %x) nounwind { ; CHECK-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero ; CHECK-NEXT: movl $32, %eax ; CHECK-NEXT: pinsrd $0, %eax, %xmm0 -; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; CHECK-NEXT: movq %xmm0, (%esp) +; CHECK-NEXT: pxor %xmm1, %xmm1 +; CHECK-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] +; CHECK-NEXT: movq %xmm1, (%esp) ; CHECK-NEXT: movq (%esp), %mm0 ; CHECK-NEXT: addl $20, %esp ; CHECK-NEXT: retl diff --git a/test/CodeGen/X86/vector-shuffle-128-v2.ll b/test/CodeGen/X86/vector-shuffle-128-v2.ll index e5b5c82e9789..5d970e11e5bc 100644 --- a/test/CodeGen/X86/vector-shuffle-128-v2.ll +++ b/test/CodeGen/X86/vector-shuffle-128-v2.ll @@ -932,7 +932,7 @@ define <2 x i64> @shuffle_v2i64_bitcast_z123(<2 x i64> %x) { ; SSE41-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; SSE41-NEXT: xorps %xmm1, %xmm1 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7] +; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; SSE41-NEXT: retq ; ; AVX1-LABEL: shuffle_v2i64_bitcast_z123: @@ -940,7 +940,7 @@ define <2 x i64> @shuffle_v2i64_bitcast_z123(<2 x i64> %x) { ; AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7] +; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v2i64_bitcast_z123: diff --git a/test/CodeGen/X86/vector-zext.ll b/test/CodeGen/X86/vector-zext.ll index d07f1ea62dfe..a3c567de652a 100644 --- a/test/CodeGen/X86/vector-zext.ll +++ b/test/CodeGen/X86/vector-zext.ll @@ -1162,8 +1162,8 @@ define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ; AVX1-LABEL: shuf_zext_4i32_to_4i64: ; AVX1: # BB#0: # %entry ; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero -; AVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm2[0],xmm0[1] +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7] ; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,0] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq @@ -1592,8 +1592,8 @@ define <4 x i64> @shuf_zext_4i32_to_4i64_offset1(<4 x i32> %A) nounwind uwtable ; AVX1-LABEL: shuf_zext_4i32_to_4i64_offset1: ; AVX1: # BB#0: # %entry ; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm0[3],zero,zero,zero -; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2 -; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3] +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5],xmm2[6,7] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ;