summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHans Wennborg <hans@hanshq.net>2019-09-09 09:48:38 +0000
committerHans Wennborg <hans@hanshq.net>2019-09-09 09:48:38 +0000
commit5cbaa56ac5ff406d65037d5fa43ad44e0191f9b0 (patch)
tree284de8a6a618065a02c45fcdbcaf1fe2425a6247
parentb508b4ba06795704af6f05d4159fdf656e0185df (diff)
downloadllvm-5cbaa56ac5ff406d65037d5fa43ad44e0191f9b0.tar.gz
Merging r370592:
------------------------------------------------------------------------ r370592 | rksimon | 2019-08-31 18:21:31 +0200 (Sat, 31 Aug 2019) | 3 lines [X86] EltsFromConsecutiveLoads - Don't confuse elt count with vector element count (PR43170) EltsFromConsecutiveLoads was assuming that the number of input elts was the same as the number of elements in the output vector type when creating a zeroing shuffle, causing an assert when subvectors were being combined instead of just scalars. ------------------------------------------------------------------------ llvm-svn: 371382
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp27
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-avx512.ll38
2 files changed, 54 insertions, 11 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index e67ad2332159..0c5b8a79dd62 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -7650,17 +7650,22 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
// IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
// vector and a zero vector to clear out the zero elements.
if (!isAfterLegalize && VT.isVector()) {
- SmallVector<int, 4> ClearMask(NumElems, -1);
- for (unsigned i = 0; i < NumElems; ++i) {
- if (ZeroMask[i])
- ClearMask[i] = i + NumElems;
- else if (LoadMask[i])
- ClearMask[i] = i;
- }
- SDValue V = CreateLoad(VT, LDBase);
- SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
- : DAG.getConstantFP(0.0, DL, VT);
- return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
+ unsigned NumMaskElts = VT.getVectorNumElements();
+ if ((NumMaskElts % NumElems) == 0) {
+ unsigned Scale = NumMaskElts / NumElems;
+ SmallVector<int, 4> ClearMask(NumMaskElts, -1);
+ for (unsigned i = 0; i < NumElems; ++i) {
+ if (UndefMask[i])
+ continue;
+ int Offset = ZeroMask[i] ? NumMaskElts : 0;
+ for (unsigned j = 0; j != Scale; ++j)
+ ClearMask[(i * Scale) + j] = (i * Scale) + j + Offset;
+ }
+ SDValue V = CreateLoad(VT, LDBase);
+ SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
+ : DAG.getConstantFP(0.0, DL, VT);
+ return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
+ }
}
}
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll b/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll
index 2092b3bf4530..65472aaeea7e 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll
@@ -936,3 +936,41 @@ define <16 x float> @test_masked_permps_v16f32(<16 x float>* %vp, <16 x float> %
%res = select <16 x i1> <i1 1, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 1, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 0>, <16 x float> %shuf, <16 x float> %vec2
ret <16 x float> %res
}
+
+%union1= type { <16 x float> }
+@src1 = external dso_local local_unnamed_addr global %union1, align 64
+
+define void @PR43170(<16 x float>* %a0) {
+; SKX64-LABEL: PR43170:
+; SKX64: # %bb.0: # %entry
+; SKX64-NEXT: vmovaps {{.*}}(%rip), %ymm0
+; SKX64-NEXT: vmovaps %zmm0, (%rdi)
+; SKX64-NEXT: vzeroupper
+; SKX64-NEXT: retq
+;
+; KNL64-LABEL: PR43170:
+; KNL64: # %bb.0: # %entry
+; KNL64-NEXT: vmovaps {{.*}}(%rip), %ymm0
+; KNL64-NEXT: vmovaps %zmm0, (%rdi)
+; KNL64-NEXT: retq
+;
+; SKX32-LABEL: PR43170:
+; SKX32: # %bb.0: # %entry
+; SKX32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; SKX32-NEXT: vmovaps src1, %ymm0
+; SKX32-NEXT: vmovaps %zmm0, (%eax)
+; SKX32-NEXT: vzeroupper
+; SKX32-NEXT: retl
+;
+; KNL32-LABEL: PR43170:
+; KNL32: # %bb.0: # %entry
+; KNL32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; KNL32-NEXT: vmovaps src1, %ymm0
+; KNL32-NEXT: vmovaps %zmm0, (%eax)
+; KNL32-NEXT: retl
+entry:
+ %0 = load <8 x float>, <8 x float>* bitcast (%union1* @src1 to <8 x float>*), align 64
+ %1 = shufflevector <8 x float> %0, <8 x float> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ store <16 x float> %1, <16 x float>* %a0, align 64
+ ret void
+}