1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni -mattr=+avx512vl | FileCheck %s
declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>)
define i32 @mul_i8i8(ptr%a, <16 x i8> %b, i32 %c) {
; CHECK-LABEL: mul_i8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovdqa (%rdi), %xmm1
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpdpbusd %xmm0, %xmm1, %xmm2
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0
; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vmovd %xmm0, %eax
; CHECK-NEXT: addl %esi, %eax
; CHECK-NEXT: retq
entry:
%0 = load <16 x i8>, ptr %a, align 16
%1 = zext <16 x i8> %0 to <16 x i32>
%2 = sext <16 x i8> %b to <16 x i32>
%3 = mul nsw <16 x i32> %1, %2
%4 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %3)
%op.extra = add nsw i32 %4, %c
ret i32 %op.extra
}
define i32 @mul_i4i8(<16 x i4> %a, <16 x i8> %b, i32 %c) {
; CHECK-LABEL: mul_i4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpdpbusd %xmm1, %xmm0, %xmm2
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0
; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vmovd %xmm0, %eax
; CHECK-NEXT: addl %edi, %eax
; CHECK-NEXT: retq
entry:
%0 = zext <16 x i4> %a to <16 x i32>
%1 = sext <16 x i8> %b to <16 x i32>
%2 = mul nsw <16 x i32> %0, %1
%3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
%op.extra = add nsw i32 %3, %c
ret i32 %op.extra
}
define i32 @mul_i4i4(<16 x i4> %a, <16 x i4> %b, i32 %c) {
; CHECK-LABEL: mul_i4i4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpsllw $4, %xmm1, %xmm1
; CHECK-NEXT: vpsrlw $4, %xmm1, %xmm1
; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; CHECK-NEXT: vpternlogq $108, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm1
; CHECK-NEXT: vpsubb %xmm2, %xmm1, %xmm1
; CHECK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpdpbusd %xmm1, %xmm0, %xmm2
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0
; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vmovd %xmm0, %eax
; CHECK-NEXT: addl %edi, %eax
; CHECK-NEXT: retq
entry:
%0 = zext <16 x i4> %a to <16 x i32>
%1 = sext <16 x i4> %b to <16 x i32>
%2 = mul nsw <16 x i32> %0, %1
%3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
%op.extra = add nsw i32 %3, %c
ret i32 %op.extra
}
define i32 @mul_sext_i4i4(<16 x i4> %a, <16 x i4> %b, i32 %c) {
; CHECK-LABEL: mul_sext_i4i4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; CHECK-NEXT: vpsllw $12, %ymm1, %ymm1
; CHECK-NEXT: vpsraw $12, %ymm1, %ymm1
; CHECK-NEXT: vpsllw $12, %ymm0, %ymm0
; CHECK-NEXT: vpsraw $12, %ymm0, %ymm0
; CHECK-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1
; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vmovd %xmm0, %eax
; CHECK-NEXT: addl %edi, %eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
%0 = sext <16 x i4> %a to <16 x i32>
%1 = sext <16 x i4> %b to <16 x i32>
%2 = mul nsw <16 x i32> %0, %1
%3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
%op.extra = add nsw i32 %3, %c
ret i32 %op.extra
}
define i32 @mul_zext_i4i4(<16 x i4> %a, <16 x i4> %b, i32 %c) {
; CHECK-LABEL: mul_zext_i4i4:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; CHECK-NEXT: vpand %xmm2, %xmm1, %xmm1
; CHECK-NEXT: vpand %xmm2, %xmm0, %xmm0
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpdpbusd %xmm1, %xmm0, %xmm2
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0
; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vmovd %xmm0, %eax
; CHECK-NEXT: addl %edi, %eax
; CHECK-NEXT: retq
entry:
%0 = zext <16 x i4> %a to <16 x i32>
%1 = zext <16 x i4> %b to <16 x i32>
%2 = mul nsw <16 x i32> %0, %1
%3 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %2)
%op.extra = add nsw i32 %3, %c
ret i32 %op.extra
}
|