summaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/Thumb2/mve-vcmla.ll
blob: d1976472e394601c8b3bc36acf9250fad39259a5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve.fp -verify-machineinstrs -o - %s | FileCheck %s

declare <8 x half> @llvm.arm.mve.vcmlaq.v8f16(i32, <8 x half>, <8 x half>, <8 x half>)
declare <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32, <4 x float>, <4 x float>, <4 x float>)
declare <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32, <8 x half>, <8 x half>)
declare <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32, <4 x float>, <4 x float>)


define arm_aapcs_vfpcc <4 x float> @reassoc_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: reassoc_f32x4:
; CHECK:       @ %bb.0: @ %entry
; CHECK-NEXT:    vcmla.f32 q0, q1, q2, #0
; CHECK-NEXT:    bx lr
entry:
  %d = tail call <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 0, <4 x float> zeroinitializer, <4 x float> %b, <4 x float> %c)
  %res = fadd fast <4 x float> %d, %a
  ret <4 x float> %res
}

define arm_aapcs_vfpcc <4 x float> @reassoc_c_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: reassoc_c_f32x4:
; CHECK:       @ %bb.0: @ %entry
; CHECK-NEXT:    vcmla.f32 q0, q1, q2, #90
; CHECK-NEXT:    bx lr
entry:
  %d = tail call <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 1, <4 x float> zeroinitializer, <4 x float> %b, <4 x float> %c)
  %res = fadd fast <4 x float> %a, %d
  ret <4 x float> %res
}

define arm_aapcs_vfpcc <8 x half> @reassoc_f16x4(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
; CHECK-LABEL: reassoc_f16x4:
; CHECK:       @ %bb.0: @ %entry
; CHECK-NEXT:    vcmla.f16 q0, q1, q2, #180
; CHECK-NEXT:    bx lr
entry:
  %d = tail call <8 x half> @llvm.arm.mve.vcmlaq.v8f16(i32 2, <8 x half> zeroinitializer, <8 x half> %b, <8 x half> %c)
  %res = fadd fast <8 x half> %d, %a
  ret <8 x half> %res
}

define arm_aapcs_vfpcc <8 x half> @reassoc_c_f16x4(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
; CHECK-LABEL: reassoc_c_f16x4:
; CHECK:       @ %bb.0: @ %entry
; CHECK-NEXT:    vcmla.f16 q0, q1, q2, #270
; CHECK-NEXT:    bx lr
entry:
  %d = tail call <8 x half> @llvm.arm.mve.vcmlaq.v8f16(i32 3, <8 x half> zeroinitializer, <8 x half> %b, <8 x half> %c)
  %res = fadd fast <8 x half> %a, %d
  ret <8 x half> %res
}

define arm_aapcs_vfpcc <4 x float> @reassoc_nonfast_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: reassoc_nonfast_f32x4:
; CHECK:       @ %bb.0: @ %entry
; CHECK-NEXT:    vmov.i32 q3, #0x0
; CHECK-NEXT:    vcmla.f32 q3, q1, q2, #0
; CHECK-NEXT:    vadd.f32 q0, q3, q0
; CHECK-NEXT:    bx lr
entry:
  %d = tail call <4 x float> @llvm.arm.mve.vcmlaq.v4f32(i32 0, <4 x float> zeroinitializer, <4 x float> %b, <4 x float> %c)
  %res = fadd <4 x float> %d, %a
  ret <4 x float> %res
}



define arm_aapcs_vfpcc <4 x float> @muladd_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: muladd_f32x4:
; CHECK:       @ %bb.0: @ %entry
; CHECK-NEXT:    vcmla.f32 q0, q1, q2, #0
; CHECK-NEXT:    bx lr
entry:
  %d = tail call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 0, <4 x float> %b, <4 x float> %c)
  %res = fadd fast <4 x float> %d, %a
  ret <4 x float> %res
}

define arm_aapcs_vfpcc <4 x float> @muladd_c_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: muladd_c_f32x4:
; CHECK:       @ %bb.0: @ %entry
; CHECK-NEXT:    vcmla.f32 q0, q1, q2, #90
; CHECK-NEXT:    bx lr
entry:
  %d = tail call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 1, <4 x float> %b, <4 x float> %c)
  %res = fadd fast <4 x float> %a, %d
  ret <4 x float> %res
}

define arm_aapcs_vfpcc <8 x half> @muladd_f16x4(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
; CHECK-LABEL: muladd_f16x4:
; CHECK:       @ %bb.0: @ %entry
; CHECK-NEXT:    vcmla.f16 q0, q1, q2, #180
; CHECK-NEXT:    bx lr
entry:
  %d = tail call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 2, <8 x half> %b, <8 x half> %c)
  %res = fadd fast <8 x half> %d, %a
  ret <8 x half> %res
}

define arm_aapcs_vfpcc <8 x half> @muladd_c_f16x4(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
; CHECK-LABEL: muladd_c_f16x4:
; CHECK:       @ %bb.0: @ %entry
; CHECK-NEXT:    vcmla.f16 q0, q1, q2, #270
; CHECK-NEXT:    bx lr
entry:
  %d = tail call <8 x half> @llvm.arm.mve.vcmulq.v8f16(i32 3, <8 x half> %b, <8 x half> %c)
  %res = fadd fast <8 x half> %a, %d
  ret <8 x half> %res
}

define arm_aapcs_vfpcc <4 x float> @muladd_nonfast_f32x4(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
; CHECK-LABEL: muladd_nonfast_f32x4:
; CHECK:       @ %bb.0: @ %entry
; CHECK-NEXT:    vcmul.f32 q3, q1, q2, #0
; CHECK-NEXT:    vadd.f32 q0, q3, q0
; CHECK-NEXT:    bx lr
entry:
  %d = tail call <4 x float> @llvm.arm.mve.vcmulq.v4f32(i32 0, <4 x float> %b, <4 x float> %c)
  %res = fadd <4 x float> %d, %a
  ret <4 x float> %res
}