summaryrefslogtreecommitdiff
path: root/gcc/testsuite/gcc.target/aarch64/sve_store_scalar_offset_1.c
blob: 1a48f7b6080b98591ed02e55f65e0a0f2ead1ef0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
/* { dg-do assemble } */
/* { dg-options "-O3 -march=armv8-a+sve -msve-vector-bits=256 --save-temps" } */

#include <stdint.h>

typedef int64_t vnx2di __attribute__((vector_size(32)));
typedef int32_t vnx4si __attribute__((vector_size(32)));
typedef int16_t vnx8hi __attribute__((vector_size(32)));
typedef int8_t vnx16qi __attribute__((vector_size(32)));

void sve_store_64_z_lsl (uint64_t *a, unsigned long i)
{
  asm volatile ("" : "=w" (*(vnx2di *) &a[i]));
}

void sve_store_64_s_lsl (int64_t *a, signed long i)
{
  asm volatile ("" : "=w" (*(vnx2di *) &a[i]));
}

void sve_store_32_z_lsl (uint32_t *a, unsigned long i)
{
  asm volatile ("" : "=w" (*(vnx4si *) &a[i]));
}

void sve_store_32_s_lsl (int32_t *a, signed long i)
{
  asm volatile ("" : "=w" (*(vnx4si *) &a[i]));
}

void sve_store_16_z_lsl (uint16_t *a, unsigned long i)
{
  asm volatile ("" : "=w" (*(vnx8hi *) &a[i]));
}

void sve_store_16_s_lsl (int16_t *a, signed long i)
{
  asm volatile ("" : "=w" (*(vnx8hi *) &a[i]));
}

/* ??? The other argument order leads to a redundant move.  */
void sve_store_8_z (unsigned long i, uint8_t *a)
{
  asm volatile ("" : "=w" (*(vnx16qi *) &a[i]));
}

void sve_store_8_s (signed long i, int8_t *a)
{
  asm volatile ("" : "=w" (*(vnx16qi *) &a[i]));
}

/* { dg-final { scan-assembler-times {\tst1d\tz0\.d, p[0-7], \[x0, x1, lsl 3\]\n} 2 } } */
/* { dg-final { scan-assembler-times {\tst1w\tz0\.s, p[0-7], \[x0, x1, lsl 2\]\n} 2 } } */
/* { dg-final { scan-assembler-times {\tst1h\tz0\.h, p[0-7], \[x0, x1, lsl 1\]\n} 2 } } */
/* { dg-final { scan-assembler-times {\tst1b\tz0\.b, p[0-7], \[x1, x0\]\n} 2 } } */