1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
|
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -S < %s | FileCheck %s
define void @fixed_array16i32_to_scalable4i32(ptr %out) {
; CHECK-LABEL: @fixed_array16i32_to_scalable4i32(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP:%.*]] = alloca [16 x i32], align 16
; CHECK-NEXT: store volatile <vscale x 4 x i32> zeroinitializer, ptr [[TMP]], align 16
; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <vscale x 4 x i32>, ptr [[TMP]], align 16
; CHECK-NEXT: store <vscale x 4 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
; CHECK-NEXT: ret void
;
entry:
%tmp = alloca [16 x i32], align 16
store volatile <vscale x 4 x i32> zeroinitializer, ptr %tmp, align 16
%reload = load volatile <vscale x 4 x i32>, ptr %tmp, align 16
store <vscale x 4 x i32> %reload, ptr %out, align 16
ret void
}
define void @scalable4i32_to_fixed16i32(ptr %out) {
; CHECK-LABEL: @scalable4i32_to_fixed16i32(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP:%.*]] = alloca <vscale x 4 x i32>, align 64
; CHECK-NEXT: store <16 x i32> zeroinitializer, ptr [[TMP]], align 64
; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <16 x i32>, ptr [[TMP]], align 64
; CHECK-NEXT: store <16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
; CHECK-NEXT: ret void
;
entry:
%tmp = alloca <vscale x 4 x i32>, align 16
store <16 x i32> zeroinitializer, ptr %tmp, align 16
%reload = load volatile <16 x i32>, ptr %tmp, align 16
store <16 x i32> %reload, ptr %out, align 16
ret void
}
define void @fixed16i32_to_scalable4i32(ptr %out) {
; CHECK-LABEL: @fixed16i32_to_scalable4i32(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP:%.*]] = alloca <16 x i32>, align 16
; CHECK-NEXT: store volatile <vscale x 4 x i32> zeroinitializer, ptr [[TMP]], align 16
; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <vscale x 4 x i32>, ptr [[TMP]], align 16
; CHECK-NEXT: store <vscale x 4 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
; CHECK-NEXT: ret void
;
entry:
%tmp = alloca <16 x i32>, align 16
store volatile <vscale x 4 x i32> zeroinitializer, ptr %tmp, align 16
%reload = load volatile <vscale x 4 x i32>, ptr %tmp, align 16
store <vscale x 4 x i32> %reload, ptr %out, align 16
ret void
}
define void @scalable16i32_to_fixed16i32(ptr %out) {
; CHECK-LABEL: @scalable16i32_to_fixed16i32(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP:%.*]] = alloca <vscale x 16 x i32>, align 64
; CHECK-NEXT: store volatile <16 x i32> zeroinitializer, ptr [[TMP]], align 64
; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <16 x i32>, ptr [[TMP]], align 64
; CHECK-NEXT: store <16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
; CHECK-NEXT: ret void
;
entry:
%tmp = alloca <vscale x 16 x i32>, align 16
store volatile <16 x i32> zeroinitializer, ptr %tmp, align 16
%reload = load volatile <16 x i32>, ptr %tmp, align 16
store <16 x i32> %reload, ptr %out, align 16
ret void
}
define void @scalable32i32_to_scalable16i32(ptr %out) {
; CHECK-LABEL: @scalable32i32_to_scalable16i32(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP:%.*]] = alloca <vscale x 32 x i32>, align 64
; CHECK-NEXT: store volatile <vscale x 16 x i32> zeroinitializer, ptr [[TMP]], align 64
; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, ptr [[TMP]], align 64
; CHECK-NEXT: store <vscale x 16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
; CHECK-NEXT: ret void
;
entry:
%tmp = alloca <vscale x 32 x i32>, align 16
store volatile <vscale x 16 x i32> zeroinitializer, ptr %tmp, align 16
%reload = load volatile <vscale x 16 x i32>, ptr %tmp, align 16
store <vscale x 16 x i32> %reload, ptr %out, align 16
ret void
}
define void @scalable32i16_to_scalable16i32(ptr %out) {
; CHECK-LABEL: @scalable32i16_to_scalable16i32(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP:%.*]] = alloca <vscale x 32 x i16>, align 64
; CHECK-NEXT: store volatile <vscale x 16 x i32> zeroinitializer, ptr [[TMP]], align 64
; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, ptr [[TMP]], align 64
; CHECK-NEXT: store <vscale x 16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
; CHECK-NEXT: ret void
;
entry:
%tmp = alloca <vscale x 32 x i16>, align 16
store volatile <vscale x 16 x i32> zeroinitializer, ptr %tmp, align 16
%reload = load volatile <vscale x 16 x i32>, ptr %tmp, align 16
store <vscale x 16 x i32> %reload, ptr %out, align 16
ret void
}
define void @scalable32i16_to_scalable16i32_multiuse(ptr %out, ptr %out2) {
; CHECK-LABEL: @scalable32i16_to_scalable16i32_multiuse(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP:%.*]] = alloca <vscale x 32 x i16>, align 64
; CHECK-NEXT: store volatile <vscale x 16 x i32> zeroinitializer, ptr [[TMP]], align 64
; CHECK-NEXT: [[RELOAD:%.*]] = load volatile <vscale x 16 x i32>, ptr [[TMP]], align 64
; CHECK-NEXT: store <vscale x 16 x i32> [[RELOAD]], ptr [[OUT:%.*]], align 16
; CHECK-NEXT: [[RELOAD2:%.*]] = load volatile <vscale x 32 x i16>, ptr [[TMP]], align 64
; CHECK-NEXT: store <vscale x 32 x i16> [[RELOAD2]], ptr [[OUT2:%.*]], align 16
; CHECK-NEXT: ret void
;
entry:
%tmp = alloca <vscale x 32 x i16>, align 16
store volatile <vscale x 16 x i32> zeroinitializer, ptr %tmp, align 16
%reload = load volatile <vscale x 16 x i32>, ptr %tmp, align 16
store <vscale x 16 x i32> %reload, ptr %out, align 16
%reload2 = load volatile <vscale x 32 x i16>, ptr %tmp, align 16
store <vscale x 32 x i16> %reload2, ptr %out2, align 16
ret void
}
|