1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
|
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=x86_64 -machine-sink-load-instrs-threshold=2 -run-pass=machine-sink %s -o - | FileCheck %s
# RUN: llc -mtriple=x86_64 -machine-sink-load-instrs-threshold=2 -run-pass=mir-debugify,machine-sink,mir-strip-debug %s -o - | FileCheck %s
# Verify that machine-sink pass is debug invariant wrt to given input. Since
# the pass examines MemOperands the IR is required for the original bug to
# trigger.
--- |
@e = global i32 0, align 1
@d = global i32 0, align 1
@f = global i32 0, align 1
@g = global i32 0, align 1
define i32 @l() {
entry:
br label %for.body
for.body: ; preds = %h.exit, %entry
%cmp = phi i1 [ true, %entry ], [ false, %h.exit ]
%0 = load i32, ptr @d, align 1
%tobool61.not.i = icmp eq i32 %0, 0
%e.promoted44.i = load i32, ptr @e, align 1
br i1 %tobool61.not.i, label %h.exit, label %for.cond13.preheader.preheader.i
for.cond13.preheader.preheader.i: ; preds = %for.body
%1 = load i32, ptr @f, align 1
store i32 %1, ptr @g, align 1
br label %h.exit
h.exit: ; preds = %for.cond13.preheader.preheader.i, %for.body
%.us-phi50.i = or i32 %e.promoted44.i, 4
store i32 %.us-phi50.i, ptr @e, align 1
br i1 %cmp, label %for.body, label %for.end
for.end: ; preds = %h.exit
ret i32 undef
}
...
---
name: l
alignment: 16
tracksRegLiveness: true
registers:
- { id: 0, class: gr8 }
- { id: 1, class: gr32 }
- { id: 2, class: gr8 }
- { id: 3, class: gr64 }
- { id: 4, class: gr64 }
- { id: 5, class: gr64 }
- { id: 6, class: gr32 }
- { id: 7, class: gr64 }
- { id: 8, class: gr8 }
- { id: 9, class: gr32 }
- { id: 10, class: gr64 }
- { id: 11, class: gr32 }
- { id: 12, class: gr32 }
frameInfo:
maxAlignment: 1
machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: l
; CHECK: bb.0.entry:
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[MOV8ri:%[0-9]+]]:gr8 = MOV8ri 1
; CHECK-NEXT: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @d, $noreg :: (load (s64) from got)
; CHECK-NEXT: [[MOV64rm1:%[0-9]+]]:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @e, $noreg :: (load (s64) from got)
; CHECK-NEXT: [[MOV64rm2:%[0-9]+]]:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @f, $noreg :: (load (s64) from got)
; CHECK-NEXT: [[MOV64rm3:%[0-9]+]]:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @g, $noreg :: (load (s64) from got)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1.for.body:
; CHECK-NEXT: successors: %bb.3(0x30000000), %bb.2(0x50000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[PHI:%[0-9]+]]:gr8 = PHI [[MOV8ri]], %bb.0, %8, %bb.3
; CHECK-NEXT: CMP32mi8 [[MOV64rm]], 1, $noreg, 0, $noreg, 0, implicit-def $eflags :: (dereferenceable load (s32) from @d, align 1)
; CHECK-NEXT: JCC_1 %bb.3, 4, implicit $eflags
; CHECK-NEXT: JMP_1 %bb.2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2.for.cond13.preheader.preheader.i:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[MOV64rm2]], 1, $noreg, 0, $noreg :: (dereferenceable load (s32) from @f, align 1)
; CHECK-NEXT: MOV32mr [[MOV64rm3]], 1, $noreg, 0, $noreg, killed [[MOV32rm]] :: (store (s32) into @g, align 1)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3.h.exit:
; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.4(0x04000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV64rm1]], 1, $noreg, 0, $noreg :: (dereferenceable load (s32) from @e, align 1)
; CHECK-NEXT: [[OR32ri8_:%[0-9]+]]:gr32 = OR32ri8 [[MOV32rm1]], 4, implicit-def dead $eflags
; CHECK-NEXT: MOV32mr [[MOV64rm1]], 1, $noreg, 0, $noreg, killed [[OR32ri8_]] :: (store (s32) into @e, align 1)
; CHECK-NEXT: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def dead $eflags
; CHECK-NEXT: [[COPY:%[0-9]+]]:gr8 = COPY [[MOV32r0_]].sub_8bit
; CHECK-NEXT: TEST8ri [[PHI]], 1, implicit-def $eflags
; CHECK-NEXT: JCC_1 %bb.1, 5, implicit $eflags
; CHECK-NEXT: JMP_1 %bb.4
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.4.for.end:
; CHECK-NEXT: [[DEF:%[0-9]+]]:gr32 = IMPLICIT_DEF
; CHECK-NEXT: $eax = COPY [[DEF]]
; CHECK-NEXT: RET 0, $eax
bb.0.entry:
%2:gr8 = MOV8ri 1
%3:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @d, $noreg :: (load (s64) from got)
%4:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @e, $noreg :: (load (s64) from got)
%5:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @f, $noreg :: (load (s64) from got)
%7:gr64 = MOV64rm $rip, 1, $noreg, target-flags(x86-gotpcrel) @g, $noreg :: (load (s64) from got)
bb.1.for.body:
successors: %bb.3(0x30000000), %bb.2(0x50000000)
%0:gr8 = PHI %2, %bb.0, %8, %bb.3
CMP32mi8 %3, 1, $noreg, 0, $noreg, 0, implicit-def $eflags :: (dereferenceable load (s32) from @d, align 1)
%1:gr32 = MOV32rm %4, 1, $noreg, 0, $noreg :: (dereferenceable load (s32) from @e, align 1)
JCC_1 %bb.3, 4, implicit $eflags
JMP_1 %bb.2
bb.2.for.cond13.preheader.preheader.i:
%6:gr32 = MOV32rm %5, 1, $noreg, 0, $noreg :: (dereferenceable load (s32) from @f, align 1)
MOV32mr %7, 1, $noreg, 0, $noreg, killed %6 :: (store (s32) into @g, align 1)
bb.3.h.exit:
successors: %bb.1(0x7c000000), %bb.4(0x04000000)
%9:gr32 = OR32ri8 %1, 4, implicit-def dead $eflags
MOV32mr %4, 1, $noreg, 0, $noreg, killed %9 :: (store (s32) into @e, align 1)
%11:gr32 = MOV32r0 implicit-def dead $eflags
%8:gr8 = COPY %11.sub_8bit
TEST8ri %0, 1, implicit-def $eflags
JCC_1 %bb.1, 5, implicit $eflags
JMP_1 %bb.4
bb.4.for.end:
%12:gr32 = IMPLICIT_DEF
$eax = COPY %12
RET 0, $eax
...
|