summaryrefslogtreecommitdiff
path: root/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-133.c
blob: 2da8ab7c15e35a1b543d557beeb6f98b536f8aa6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
/* { dg-do compile } */
/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */
/* { dg-final { check-function-bodies "**" "" } } */
#include "riscv_vector.h"

/*
** f1:
**	vsetivli\tzero,4,e32,m1,t[au],mu
**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
**	vmslt\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
**	vmslt\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,v0.t
**	vsm\.v\tv[0-9]+,0\([a-x0-9]+\)
**	ret
*/
void f1 (void * in, void * in2, void *out, int32_t x)
{
    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
    vint32m1_t v2 = __riscv_vle32_v_i32m1 (in2, 4);
    vbool32_t m3 = __riscv_vmslt_vx_i32m1_b32 (v, x, 4);
    vbool32_t m4 = __riscv_vmslt_vx_i32m1_b32_mu (m3, m3, v2, x, 4);
    __riscv_vsm_v_b32 (out, m4, 4);
}

/*
** f2:
**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
**	vsetivli\tzero,4,e32,m1,t[au],mu
**	vle32.v\tv[0-9]+,0\([a-x0-9]+\)
**	vle32.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
**	vmslt\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
**	vmslt\.vx\tv[1-9][0-9]?,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
**	vsm.v\tv[0-9]+,0\([a-x0-9]+\)
**	ret
*/
void f2 (void * in, void *out, int32_t x)
{
    vbool32_t mask = *(vbool32_t*)in;
    asm volatile ("":::"memory");
    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
    vbool32_t m3 = __riscv_vmslt_vx_i32m1_b32 (v, x, 4);
    vbool32_t m4 = __riscv_vmslt_vx_i32m1_b32_mu (mask, m3, v2, x, 4);
    __riscv_vsm_v_b32 (out, m4, 4);
}

/*
** f3:
**	vsetvli\t[a-x0-9]+,zero,e8,mf4,ta,ma
**	vlm.v\tv[0-9]+,0\([a-x0-9]+\)
**	vsetivli\tzero,4,e32,m1,t[au],m[au]
**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\)
**	vle32\.v\tv[0-9]+,0\([a-x0-9]+\),v0.t
**	vmslt\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+
**	vmslt\.vx\tv[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t
**	vsm.v\tv[0-9]+,0\([a-x0-9]+\)
**	ret
*/
void f3 (void * in, void *out, int32_t x)
{
    vbool32_t mask = *(vbool32_t*)in;
    asm volatile ("":::"memory");
    vint32m1_t v = __riscv_vle32_v_i32m1 (in, 4);
    vint32m1_t v2 = __riscv_vle32_v_i32m1_m (mask, in, 4);
    vbool32_t m3 = __riscv_vmslt_vx_i32m1_b32 (v, x, 4);
    vbool32_t m4 = __riscv_vmslt_vx_i32m1_b32_m (m3, v2, x, 4);
    __riscv_vsm_v_b32 (out, m4, 4);
}