summaryrefslogtreecommitdiff
path: root/gcc/config/aarch64/arm_neon.h
diff options
context:
space:
mode:
authorAlan Lawrence <alan.lawrence@arm.com>2014-12-09 20:23:36 +0000
committerAlan Lawrence <alalaw01@gcc.gnu.org>2014-12-09 20:23:36 +0000
commited225d0c7b74eb6fa125cf791b7d50e2000883bc (patch)
treee0a9bed07673fdb9c44244b194dc83235216209a /gcc/config/aarch64/arm_neon.h
parentfdead6a4b4a8e3efe04c9546f2619e3426d2fb51 (diff)
downloadgcc-ed225d0c7b74eb6fa125cf791b7d50e2000883bc.tar.gz
[AArch64]Remove be_checked_get_lane, check bounds with __builtin_aarch64_im_lane_boundsi.
gcc/: PR target/63870 * config/aarch64/aarch64-simd-builtins.def (be_checked_get_lane): Delete. * config/aarch64/aarch64-simd.md (aarch64_be_checked_get_lane<mode\>): Delete. * config/aarch64/arm_neon.h (aarch64_vget_lane_any): Use GCC vector extensions, __aarch64_lane, __builtin_aarch64_im_lane_boundsi. (__aarch64_vget_lane_f32, __aarch64_vget_lane_f64, __aarch64_vget_lane_p8, __aarch64_vget_lane_p16, __aarch64_vget_lane_s8, __aarch64_vget_lane_s16, __aarch64_vget_lane_s32, __aarch64_vget_lane_s64, __aarch64_vget_lane_u8, __aarch64_vget_lane_u16, __aarch64_vget_lane_u32, __aarch64_vget_lane_u64, __aarch64_vgetq_lane_f32, __aarch64_vgetq_lane_f64, __aarch64_vgetq_lane_p8, __aarch64_vgetq_lane_p16, __aarch64_vgetq_lane_s8, __aarch64_vgetq_lane_s16, __aarch64_vgetq_lane_s32, __aarch64_vgetq_lane_s64, __aarch64_vgetq_lane_u8, __aarch64_vgetq_lane_u16, __aarch64_vgetq_lane_u32, __aarch64_vgetq_lane_u64): Delete. (__aarch64_vdup_lane_any): Use __aarch64_vget_lane_any, remove 'q2' argument. (__aarch64_vdup_lane_f32, __aarch64_vdup_lane_f64, __aarch64_vdup_lane_p8, __aarch64_vdup_lane_p16, __aarch64_vdup_lane_s8, __aarch64_vdup_lane_s16, __aarch64_vdup_lane_s32, __aarch64_vdup_lane_s64, __aarch64_vdup_lane_u8, __aarch64_vdup_lane_u16, __aarch64_vdup_lane_u32, __aarch64_vdup_lane_u64, __aarch64_vdup_laneq_f32, __aarch64_vdup_laneq_f64, __aarch64_vdup_laneq_p8, __aarch64_vdup_laneq_p16, __aarch64_vdup_laneq_s8, __aarch64_vdup_laneq_s16, __aarch64_vdup_laneq_s32, __aarch64_vdup_laneq_s64, __aarch64_vdup_laneq_u8, __aarch64_vdup_laneq_u16, __aarch64_vdup_laneq_u32, __aarch64_vdup_laneq_u64): Remove argument to __aarch64_vdup_lane_any. (vget_lane_f32, vget_lane_f64, vget_lane_p8, vget_lane_p16, vget_lane_s8, vget_lane_s16, vget_lane_s32, vget_lane_s64, vget_lane_u8, vget_lane_u16, vget_lane_u32, vget_lane_u64, vgetq_lane_f32, vgetq_lane_f64, vgetq_lane_p8, vgetq_lane_p16, vgetq_lane_s8, vgetq_lane_s16, vgetq_lane_s32, vgetq_lane_s64, vgetq_lane_u8, vgetq_lane_u16, vgetq_lane_u32, vgetq_lane_u64, vdupb_lane_p8, vdupb_lane_s8, vdupb_lane_u8, vduph_lane_p16, vduph_lane_s16, vduph_lane_u16, vdups_lane_f32, vdups_lane_s32, vdups_lane_u32, vdupb_laneq_p8, vdupb_laneq_s8, vdupb_laneq_u8, vduph_laneq_p16, vduph_laneq_s16, vduph_laneq_u16, vdups_laneq_f32, vdups_laneq_s32, vdups_laneq_u32, vdupd_laneq_f64, vdupd_laneq_s64, vdupd_laneq_u64, vfmas_lane_f32, vfma_laneq_f64, vfmad_laneq_f64, vfmas_laneq_f32, vfmss_lane_f32, vfms_laneq_f64, vfmsd_laneq_f64, vfmss_laneq_f32, vmla_lane_f32, vmla_lane_s16, vmla_lane_s32, vmla_lane_u16, vmla_lane_u32, vmla_laneq_f32, vmla_laneq_s16, vmla_laneq_s32, vmla_laneq_u16, vmla_laneq_u32, vmlaq_lane_f32, vmlaq_lane_s16, vmlaq_lane_s32, vmlaq_lane_u16, vmlaq_lane_u32, vmlaq_laneq_f32, vmlaq_laneq_s16, vmlaq_laneq_s32, vmlaq_laneq_u16, vmlaq_laneq_u32, vmls_lane_f32, vmls_lane_s16, vmls_lane_s32, vmls_lane_u16, vmls_lane_u32, vmls_laneq_f32, vmls_laneq_s16, vmls_laneq_s32, vmls_laneq_u16, vmls_laneq_u32, vmlsq_lane_f32, vmlsq_lane_s16, vmlsq_lane_s32, vmlsq_lane_u16, vmlsq_lane_u32, vmlsq_laneq_f32, vmlsq_laneq_s16, vmlsq_laneq_s32, vmlsq_laneq_u16, vmlsq_laneq_u32, vmul_lane_f32, vmul_lane_s16, vmul_lane_s32, vmul_lane_u16, vmul_lane_u32, vmuld_lane_f64, vmuld_laneq_f64, vmuls_lane_f32, vmuls_laneq_f32, vmul_laneq_f32, vmul_laneq_f64, vmul_laneq_s16, vmul_laneq_s32, vmul_laneq_u16, vmul_laneq_u32, vmulq_lane_f32, vmulq_lane_s16, vmulq_lane_s32, vmulq_lane_u16, vmulq_lane_u32, vmulq_laneq_f32, vmulq_laneq_f64, vmulq_laneq_s16, vmulq_laneq_s32, vmulq_laneq_u16, vmulq_laneq_u32) : Use __aarch64_vget_lane_any. gcc/testsuite/: * gcc.target/aarch64/simd/vget_lane_f32_indices_1.c: New test. * gcc.target/aarch64/simd/vget_lane_f64_indices_1.c: Likewise. * gcc.target/aarch64/simd/vget_lane_p16_indices_1.c: Likewise. * gcc.target/aarch64/simd/vget_lane_p8_indices_1.c: Likewise. * gcc.target/aarch64/simd/vget_lane_s16_indices_1.c: Likewise. * gcc.target/aarch64/simd/vget_lane_s32_indices_1.c: Likewise. * gcc.target/aarch64/simd/vget_lane_s64_indices_1.c: Likewise. * gcc.target/aarch64/simd/vget_lane_s8_indices_1.c: Likewise. * gcc.target/aarch64/simd/vget_lane_u16_indices_1.c: Likewise. * gcc.target/aarch64/simd/vget_lane_u32_indices_1.c: Likewise. * gcc.target/aarch64/simd/vget_lane_u64_indices_1.c: Likewise. * gcc.target/aarch64/simd/vget_lane_u8_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_f32_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_f64_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_p16_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_p8_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_s16_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_s32_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_s64_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_s8_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_u16_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_u32_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_u64_indices_1.c: Likewise. * gcc.target/aarch64/simd/vgetq_lane_u8_indices_1.c: Likewise. From-SVN: r218536
Diffstat (limited to 'gcc/config/aarch64/arm_neon.h')
-rw-r--r--gcc/config/aarch64/arm_neon.h443
1 files changed, 177 insertions, 266 deletions
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index d4d4ee973ef..319cd8c1a0a 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -426,183 +426,112 @@ typedef struct poly16x8x4_t
poly16x8_t val[4];
} poly16x8x4_t;
-/* vget_lane internal macros. */
-
-#define __aarch64_vget_lane_any(__size, __cast_ret, __cast_a, __a, __b) \
- (__cast_ret \
- __builtin_aarch64_be_checked_get_lane##__size (__cast_a __a, __b))
-
-#define __aarch64_vget_lane_f32(__a, __b) \
- __aarch64_vget_lane_any (v2sf, , , __a, __b)
-#define __aarch64_vget_lane_f64(__a, __b) __extension__ \
- ({ \
- __AARCH64_LANE_CHECK (__a, __b); \
- __a[0]; \
- })
-
-#define __aarch64_vget_lane_p8(__a, __b) \
- __aarch64_vget_lane_any (v8qi, (poly8_t), (int8x8_t), __a, __b)
-#define __aarch64_vget_lane_p16(__a, __b) \
- __aarch64_vget_lane_any (v4hi, (poly16_t), (int16x4_t), __a, __b)
-
-#define __aarch64_vget_lane_s8(__a, __b) \
- __aarch64_vget_lane_any (v8qi, , ,__a, __b)
-#define __aarch64_vget_lane_s16(__a, __b) \
- __aarch64_vget_lane_any (v4hi, , ,__a, __b)
-#define __aarch64_vget_lane_s32(__a, __b) \
- __aarch64_vget_lane_any (v2si, , ,__a, __b)
-#define __aarch64_vget_lane_s64(__a, __b) __extension__ \
- ({ \
- __AARCH64_LANE_CHECK (__a, __b); \
- __a[0]; \
- })
-
-#define __aarch64_vget_lane_u8(__a, __b) \
- __aarch64_vget_lane_any (v8qi, (uint8_t), (int8x8_t), __a, __b)
-#define __aarch64_vget_lane_u16(__a, __b) \
- __aarch64_vget_lane_any (v4hi, (uint16_t), (int16x4_t), __a, __b)
-#define __aarch64_vget_lane_u32(__a, __b) \
- __aarch64_vget_lane_any (v2si, (uint32_t), (int32x2_t), __a, __b)
-#define __aarch64_vget_lane_u64(__a, __b) __extension__ \
- ({ \
- __AARCH64_LANE_CHECK (__a, __b); \
- __a[0]; \
- })
-
-#define __aarch64_vgetq_lane_f32(__a, __b) \
- __aarch64_vget_lane_any (v4sf, , , __a, __b)
-#define __aarch64_vgetq_lane_f64(__a, __b) \
- __aarch64_vget_lane_any (v2df, , , __a, __b)
-
-#define __aarch64_vgetq_lane_p8(__a, __b) \
- __aarch64_vget_lane_any (v16qi, (poly8_t), (int8x16_t), __a, __b)
-#define __aarch64_vgetq_lane_p16(__a, __b) \
- __aarch64_vget_lane_any (v8hi, (poly16_t), (int16x8_t), __a, __b)
-
-#define __aarch64_vgetq_lane_s8(__a, __b) \
- __aarch64_vget_lane_any (v16qi, , ,__a, __b)
-#define __aarch64_vgetq_lane_s16(__a, __b) \
- __aarch64_vget_lane_any (v8hi, , ,__a, __b)
-#define __aarch64_vgetq_lane_s32(__a, __b) \
- __aarch64_vget_lane_any (v4si, , ,__a, __b)
-#define __aarch64_vgetq_lane_s64(__a, __b) \
- __aarch64_vget_lane_any (v2di, , ,__a, __b)
-
-#define __aarch64_vgetq_lane_u8(__a, __b) \
- __aarch64_vget_lane_any (v16qi, (uint8_t), (int8x16_t), __a, __b)
-#define __aarch64_vgetq_lane_u16(__a, __b) \
- __aarch64_vget_lane_any (v8hi, (uint16_t), (int16x8_t), __a, __b)
-#define __aarch64_vgetq_lane_u32(__a, __b) \
- __aarch64_vget_lane_any (v4si, (uint32_t), (int32x4_t), __a, __b)
-#define __aarch64_vgetq_lane_u64(__a, __b) \
- __aarch64_vget_lane_any (v2di, (uint64_t), (int64x2_t), __a, __b)
-
/* __aarch64_vdup_lane internal macros. */
-#define __aarch64_vdup_lane_any(__size, __q1, __q2, __a, __b) \
- vdup##__q1##_n_##__size (__aarch64_vget##__q2##_lane_##__size (__a, __b))
+#define __aarch64_vdup_lane_any(__size, __q, __a, __b) \
+ vdup##__q##_n_##__size (__aarch64_vget_lane_any (__a, __b))
#define __aarch64_vdup_lane_f32(__a, __b) \
- __aarch64_vdup_lane_any (f32, , , __a, __b)
+ __aarch64_vdup_lane_any (f32, , __a, __b)
#define __aarch64_vdup_lane_f64(__a, __b) \
- __aarch64_vdup_lane_any (f64, , , __a, __b)
+ __aarch64_vdup_lane_any (f64, , __a, __b)
#define __aarch64_vdup_lane_p8(__a, __b) \
- __aarch64_vdup_lane_any (p8, , , __a, __b)
+ __aarch64_vdup_lane_any (p8, , __a, __b)
#define __aarch64_vdup_lane_p16(__a, __b) \
- __aarch64_vdup_lane_any (p16, , , __a, __b)
+ __aarch64_vdup_lane_any (p16, , __a, __b)
#define __aarch64_vdup_lane_s8(__a, __b) \
- __aarch64_vdup_lane_any (s8, , , __a, __b)
+ __aarch64_vdup_lane_any (s8, , __a, __b)
#define __aarch64_vdup_lane_s16(__a, __b) \
- __aarch64_vdup_lane_any (s16, , , __a, __b)
+ __aarch64_vdup_lane_any (s16, , __a, __b)
#define __aarch64_vdup_lane_s32(__a, __b) \
- __aarch64_vdup_lane_any (s32, , , __a, __b)
+ __aarch64_vdup_lane_any (s32, , __a, __b)
#define __aarch64_vdup_lane_s64(__a, __b) \
- __aarch64_vdup_lane_any (s64, , , __a, __b)
+ __aarch64_vdup_lane_any (s64, , __a, __b)
#define __aarch64_vdup_lane_u8(__a, __b) \
- __aarch64_vdup_lane_any (u8, , , __a, __b)
+ __aarch64_vdup_lane_any (u8, , __a, __b)
#define __aarch64_vdup_lane_u16(__a, __b) \
- __aarch64_vdup_lane_any (u16, , , __a, __b)
+ __aarch64_vdup_lane_any (u16, , __a, __b)
#define __aarch64_vdup_lane_u32(__a, __b) \
- __aarch64_vdup_lane_any (u32, , , __a, __b)
+ __aarch64_vdup_lane_any (u32, , __a, __b)
#define __aarch64_vdup_lane_u64(__a, __b) \
- __aarch64_vdup_lane_any (u64, , , __a, __b)
+ __aarch64_vdup_lane_any (u64, , __a, __b)
/* __aarch64_vdup_laneq internal macros. */
#define __aarch64_vdup_laneq_f32(__a, __b) \
- __aarch64_vdup_lane_any (f32, , q, __a, __b)
+ __aarch64_vdup_lane_any (f32, , __a, __b)
#define __aarch64_vdup_laneq_f64(__a, __b) \
- __aarch64_vdup_lane_any (f64, , q, __a, __b)
+ __aarch64_vdup_lane_any (f64, , __a, __b)
#define __aarch64_vdup_laneq_p8(__a, __b) \
- __aarch64_vdup_lane_any (p8, , q, __a, __b)
+ __aarch64_vdup_lane_any (p8, , __a, __b)
#define __aarch64_vdup_laneq_p16(__a, __b) \
- __aarch64_vdup_lane_any (p16, , q, __a, __b)
+ __aarch64_vdup_lane_any (p16, , __a, __b)
#define __aarch64_vdup_laneq_s8(__a, __b) \
- __aarch64_vdup_lane_any (s8, , q, __a, __b)
+ __aarch64_vdup_lane_any (s8, , __a, __b)
#define __aarch64_vdup_laneq_s16(__a, __b) \
- __aarch64_vdup_lane_any (s16, , q, __a, __b)
+ __aarch64_vdup_lane_any (s16, , __a, __b)
#define __aarch64_vdup_laneq_s32(__a, __b) \
- __aarch64_vdup_lane_any (s32, , q, __a, __b)
+ __aarch64_vdup_lane_any (s32, , __a, __b)
#define __aarch64_vdup_laneq_s64(__a, __b) \
- __aarch64_vdup_lane_any (s64, , q, __a, __b)
+ __aarch64_vdup_lane_any (s64, , __a, __b)
#define __aarch64_vdup_laneq_u8(__a, __b) \
- __aarch64_vdup_lane_any (u8, , q, __a, __b)
+ __aarch64_vdup_lane_any (u8, , __a, __b)
#define __aarch64_vdup_laneq_u16(__a, __b) \
- __aarch64_vdup_lane_any (u16, , q, __a, __b)
+ __aarch64_vdup_lane_any (u16, , __a, __b)
#define __aarch64_vdup_laneq_u32(__a, __b) \
- __aarch64_vdup_lane_any (u32, , q, __a, __b)
+ __aarch64_vdup_lane_any (u32, , __a, __b)
#define __aarch64_vdup_laneq_u64(__a, __b) \
- __aarch64_vdup_lane_any (u64, , q, __a, __b)
+ __aarch64_vdup_lane_any (u64, , __a, __b)
/* __aarch64_vdupq_lane internal macros. */
#define __aarch64_vdupq_lane_f32(__a, __b) \
- __aarch64_vdup_lane_any (f32, q, , __a, __b)
+ __aarch64_vdup_lane_any (f32, q, __a, __b)
#define __aarch64_vdupq_lane_f64(__a, __b) \
- __aarch64_vdup_lane_any (f64, q, , __a, __b)
+ __aarch64_vdup_lane_any (f64, q, __a, __b)
#define __aarch64_vdupq_lane_p8(__a, __b) \
- __aarch64_vdup_lane_any (p8, q, , __a, __b)
+ __aarch64_vdup_lane_any (p8, q, __a, __b)
#define __aarch64_vdupq_lane_p16(__a, __b) \
- __aarch64_vdup_lane_any (p16, q, , __a, __b)
+ __aarch64_vdup_lane_any (p16, q, __a, __b)
#define __aarch64_vdupq_lane_s8(__a, __b) \
- __aarch64_vdup_lane_any (s8, q, , __a, __b)
+ __aarch64_vdup_lane_any (s8, q, __a, __b)
#define __aarch64_vdupq_lane_s16(__a, __b) \
- __aarch64_vdup_lane_any (s16, q, , __a, __b)
+ __aarch64_vdup_lane_any (s16, q, __a, __b)
#define __aarch64_vdupq_lane_s32(__a, __b) \
- __aarch64_vdup_lane_any (s32, q, , __a, __b)
+ __aarch64_vdup_lane_any (s32, q, __a, __b)
#define __aarch64_vdupq_lane_s64(__a, __b) \
- __aarch64_vdup_lane_any (s64, q, , __a, __b)
+ __aarch64_vdup_lane_any (s64, q, __a, __b)
#define __aarch64_vdupq_lane_u8(__a, __b) \
- __aarch64_vdup_lane_any (u8, q, , __a, __b)
+ __aarch64_vdup_lane_any (u8, q, __a, __b)
#define __aarch64_vdupq_lane_u16(__a, __b) \
- __aarch64_vdup_lane_any (u16, q, , __a, __b)
+ __aarch64_vdup_lane_any (u16, q, __a, __b)
#define __aarch64_vdupq_lane_u32(__a, __b) \
- __aarch64_vdup_lane_any (u32, q, , __a, __b)
+ __aarch64_vdup_lane_any (u32, q, __a, __b)
#define __aarch64_vdupq_lane_u64(__a, __b) \
- __aarch64_vdup_lane_any (u64, q, , __a, __b)
+ __aarch64_vdup_lane_any (u64, q, __a, __b)
/* __aarch64_vdupq_laneq internal macros. */
#define __aarch64_vdupq_laneq_f32(__a, __b) \
- __aarch64_vdup_lane_any (f32, q, q, __a, __b)
+ __aarch64_vdup_lane_any (f32, q, __a, __b)
#define __aarch64_vdupq_laneq_f64(__a, __b) \
- __aarch64_vdup_lane_any (f64, q, q, __a, __b)
+ __aarch64_vdup_lane_any (f64, q, __a, __b)
#define __aarch64_vdupq_laneq_p8(__a, __b) \
- __aarch64_vdup_lane_any (p8, q, q, __a, __b)
+ __aarch64_vdup_lane_any (p8, q, __a, __b)
#define __aarch64_vdupq_laneq_p16(__a, __b) \
- __aarch64_vdup_lane_any (p16, q, q, __a, __b)
+ __aarch64_vdup_lane_any (p16, q, __a, __b)
#define __aarch64_vdupq_laneq_s8(__a, __b) \
- __aarch64_vdup_lane_any (s8, q, q, __a, __b)
+ __aarch64_vdup_lane_any (s8, q, __a, __b)
#define __aarch64_vdupq_laneq_s16(__a, __b) \
- __aarch64_vdup_lane_any (s16, q, q, __a, __b)
+ __aarch64_vdup_lane_any (s16, q, __a, __b)
#define __aarch64_vdupq_laneq_s32(__a, __b) \
- __aarch64_vdup_lane_any (s32, q, q, __a, __b)
+ __aarch64_vdup_lane_any (s32, q, __a, __b)
#define __aarch64_vdupq_laneq_s64(__a, __b) \
- __aarch64_vdup_lane_any (s64, q, q, __a, __b)
+ __aarch64_vdup_lane_any (s64, q, __a, __b)
#define __aarch64_vdupq_laneq_u8(__a, __b) \
- __aarch64_vdup_lane_any (u8, q, q, __a, __b)
+ __aarch64_vdup_lane_any (u8, q, __a, __b)
#define __aarch64_vdupq_laneq_u16(__a, __b) \
- __aarch64_vdup_lane_any (u16, q, q, __a, __b)
+ __aarch64_vdup_lane_any (u16, q, __a, __b)
#define __aarch64_vdupq_laneq_u32(__a, __b) \
- __aarch64_vdup_lane_any (u32, q, q, __a, __b)
+ __aarch64_vdup_lane_any (u32, q, __a, __b)
#define __aarch64_vdupq_laneq_u64(__a, __b) \
- __aarch64_vdup_lane_any (u64, q, q, __a, __b)
+ __aarch64_vdup_lane_any (u64, q, __a, __b)
/* Internal macro for lane indices. */
@@ -618,8 +547,15 @@ typedef struct poly16x8x4_t
#define __aarch64_lane(__vec, __idx) __idx
#endif
-/* vset_lane and vld1_lane internal macro. */
+/* vget_lane internal macro. */
+#define __aarch64_vget_lane_any(__vec, __index) \
+ __extension__ \
+ ({ \
+ __AARCH64_LANE_CHECK (__vec, __index); \
+ __vec[__aarch64_lane (__vec, __index)]; \
+ })
+/* vset_lane and vld1_lane internal macro. */
#define __aarch64_vset_lane_any(__elem, __vec, __index) \
__extension__ \
({ \
@@ -2754,73 +2690,73 @@ vcreate_p16 (uint64_t __a)
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vget_lane_f32 (float32x2_t __a, const int __b)
{
- return __aarch64_vget_lane_f32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline float64_t __attribute__ ((__always_inline__))
vget_lane_f64 (float64x1_t __a, const int __b)
{
- return __aarch64_vget_lane_f64 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
vget_lane_p8 (poly8x8_t __a, const int __b)
{
- return __aarch64_vget_lane_p8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
vget_lane_p16 (poly16x4_t __a, const int __b)
{
- return __aarch64_vget_lane_p16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int8_t __attribute__ ((__always_inline__))
vget_lane_s8 (int8x8_t __a, const int __b)
{
- return __aarch64_vget_lane_s8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int16_t __attribute__ ((__always_inline__))
vget_lane_s16 (int16x4_t __a, const int __b)
{
- return __aarch64_vget_lane_s16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int32_t __attribute__ ((__always_inline__))
vget_lane_s32 (int32x2_t __a, const int __b)
{
- return __aarch64_vget_lane_s32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int64_t __attribute__ ((__always_inline__))
vget_lane_s64 (int64x1_t __a, const int __b)
{
- return __aarch64_vget_lane_s64 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
vget_lane_u8 (uint8x8_t __a, const int __b)
{
- return __aarch64_vget_lane_u8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
vget_lane_u16 (uint16x4_t __a, const int __b)
{
- return __aarch64_vget_lane_u16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
vget_lane_u32 (uint32x2_t __a, const int __b)
{
- return __aarch64_vget_lane_u32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
vget_lane_u64 (uint64x1_t __a, const int __b)
{
- return __aarch64_vget_lane_u64 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
/* vgetq_lane */
@@ -2828,73 +2764,73 @@ vget_lane_u64 (uint64x1_t __a, const int __b)
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vgetq_lane_f32 (float32x4_t __a, const int __b)
{
- return __aarch64_vgetq_lane_f32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline float64_t __attribute__ ((__always_inline__))
vgetq_lane_f64 (float64x2_t __a, const int __b)
{
- return __aarch64_vgetq_lane_f64 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
vgetq_lane_p8 (poly8x16_t __a, const int __b)
{
- return __aarch64_vgetq_lane_p8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
vgetq_lane_p16 (poly16x8_t __a, const int __b)
{
- return __aarch64_vgetq_lane_p16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int8_t __attribute__ ((__always_inline__))
vgetq_lane_s8 (int8x16_t __a, const int __b)
{
- return __aarch64_vgetq_lane_s8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int16_t __attribute__ ((__always_inline__))
vgetq_lane_s16 (int16x8_t __a, const int __b)
{
- return __aarch64_vgetq_lane_s16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int32_t __attribute__ ((__always_inline__))
vgetq_lane_s32 (int32x4_t __a, const int __b)
{
- return __aarch64_vgetq_lane_s32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int64_t __attribute__ ((__always_inline__))
vgetq_lane_s64 (int64x2_t __a, const int __b)
{
- return __aarch64_vgetq_lane_s64 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
vgetq_lane_u8 (uint8x16_t __a, const int __b)
{
- return __aarch64_vgetq_lane_u8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
vgetq_lane_u16 (uint16x8_t __a, const int __b)
{
- return __aarch64_vgetq_lane_u16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
vgetq_lane_u32 (uint32x4_t __a, const int __b)
{
- return __aarch64_vgetq_lane_u32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
vgetq_lane_u64 (uint64x2_t __a, const int __b)
{
- return __aarch64_vgetq_lane_u64 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
/* vreinterpret */
@@ -14708,57 +14644,57 @@ vdupq_laneq_u64 (uint64x2_t __a, const int __b)
__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
vdupb_lane_p8 (poly8x8_t __a, const int __b)
{
- return __aarch64_vget_lane_p8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int8_t __attribute__ ((__always_inline__))
vdupb_lane_s8 (int8x8_t __a, const int __b)
{
- return __aarch64_vget_lane_s8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
vdupb_lane_u8 (uint8x8_t __a, const int __b)
{
- return __aarch64_vget_lane_u8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
/* vduph_lane */
__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
vduph_lane_p16 (poly16x4_t __a, const int __b)
{
- return __aarch64_vget_lane_p16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int16_t __attribute__ ((__always_inline__))
vduph_lane_s16 (int16x4_t __a, const int __b)
{
- return __aarch64_vget_lane_s16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
vduph_lane_u16 (uint16x4_t __a, const int __b)
{
- return __aarch64_vget_lane_u16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
/* vdups_lane */
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vdups_lane_f32 (float32x2_t __a, const int __b)
{
- return __aarch64_vget_lane_f32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int32_t __attribute__ ((__always_inline__))
vdups_lane_s32 (int32x2_t __a, const int __b)
{
- return __aarch64_vget_lane_s32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
vdups_lane_u32 (uint32x2_t __a, const int __b)
{
- return __aarch64_vget_lane_u32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
/* vdupd_lane */
@@ -14787,76 +14723,76 @@ vdupd_lane_u64 (uint64x1_t __a, const int __b)
__extension__ static __inline poly8_t __attribute__ ((__always_inline__))
vdupb_laneq_p8 (poly8x16_t __a, const int __b)
{
- return __aarch64_vgetq_lane_p8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int8_t __attribute__ ((__always_inline__))
vdupb_laneq_s8 (int8x16_t __a, const int __attribute__ ((unused)) __b)
{
- return __aarch64_vgetq_lane_s8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint8_t __attribute__ ((__always_inline__))
vdupb_laneq_u8 (uint8x16_t __a, const int __b)
{
- return __aarch64_vgetq_lane_u8 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
/* vduph_laneq */
__extension__ static __inline poly16_t __attribute__ ((__always_inline__))
vduph_laneq_p16 (poly16x8_t __a, const int __b)
{
- return __aarch64_vgetq_lane_p16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int16_t __attribute__ ((__always_inline__))
vduph_laneq_s16 (int16x8_t __a, const int __b)
{
- return __aarch64_vgetq_lane_s16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint16_t __attribute__ ((__always_inline__))
vduph_laneq_u16 (uint16x8_t __a, const int __b)
{
- return __aarch64_vgetq_lane_u16 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
/* vdups_laneq */
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vdups_laneq_f32 (float32x4_t __a, const int __b)
{
- return __aarch64_vgetq_lane_f32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int32_t __attribute__ ((__always_inline__))
vdups_laneq_s32 (int32x4_t __a, const int __b)
{
- return __aarch64_vgetq_lane_s32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint32_t __attribute__ ((__always_inline__))
vdups_laneq_u32 (uint32x4_t __a, const int __b)
{
- return __aarch64_vgetq_lane_u32 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
/* vdupd_laneq */
__extension__ static __inline float64_t __attribute__ ((__always_inline__))
vdupd_laneq_f64 (float64x2_t __a, const int __b)
{
- return __aarch64_vgetq_lane_f64 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline int64_t __attribute__ ((__always_inline__))
vdupd_laneq_s64 (int64x2_t __a, const int __b)
{
- return __aarch64_vgetq_lane_s64 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
vdupd_laneq_u64 (uint64x2_t __a, const int __b)
{
- return __aarch64_vgetq_lane_u64 (__a, __b);
+ return __aarch64_vget_lane_any (__a, __b);
}
/* vext */
@@ -15218,7 +15154,7 @@ __extension__ static __inline float32_t __attribute__ ((__always_inline__))
vfmas_lane_f32 (float32_t __a, float32_t __b,
float32x2_t __c, const int __lane)
{
- return __builtin_fmaf (__b, __aarch64_vget_lane_f32 (__c, __lane), __a);
+ return __builtin_fmaf (__b, __aarch64_vget_lane_any (__c, __lane), __a);
}
/* vfma_laneq */
@@ -15236,7 +15172,7 @@ __extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
vfma_laneq_f64 (float64x1_t __a, float64x1_t __b,
float64x2_t __c, const int __lane)
{
- float64_t __c0 = __aarch64_vgetq_lane_f64 (__c, __lane);
+ float64_t __c0 = __aarch64_vget_lane_any (__c, __lane);
return (float64x1_t) {__builtin_fma (__b[0], __c0, __a[0])};
}
@@ -15244,14 +15180,14 @@ __extension__ static __inline float64_t __attribute__ ((__always_inline__))
vfmad_laneq_f64 (float64_t __a, float64_t __b,
float64x2_t __c, const int __lane)
{
- return __builtin_fma (__b, __aarch64_vgetq_lane_f64 (__c, __lane), __a);
+ return __builtin_fma (__b, __aarch64_vget_lane_any (__c, __lane), __a);
}
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vfmas_laneq_f32 (float32_t __a, float32_t __b,
float32x4_t __c, const int __lane)
{
- return __builtin_fmaf (__b, __aarch64_vgetq_lane_f32 (__c, __lane), __a);
+ return __builtin_fmaf (__b, __aarch64_vget_lane_any (__c, __lane), __a);
}
/* vfmaq_lane */
@@ -15348,7 +15284,7 @@ __extension__ static __inline float32_t __attribute__ ((__always_inline__))
vfmss_lane_f32 (float32_t __a, float32_t __b,
float32x2_t __c, const int __lane)
{
- return __builtin_fmaf (-__b, __aarch64_vget_lane_f32 (__c, __lane), __a);
+ return __builtin_fmaf (-__b, __aarch64_vget_lane_any (__c, __lane), __a);
}
/* vfms_laneq */
@@ -15366,7 +15302,7 @@ __extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
vfms_laneq_f64 (float64x1_t __a, float64x1_t __b,
float64x2_t __c, const int __lane)
{
- float64_t __c0 = __aarch64_vgetq_lane_f64 (__c, __lane);
+ float64_t __c0 = __aarch64_vget_lane_any (__c, __lane);
return (float64x1_t) {__builtin_fma (-__b[0], __c0, __a[0])};
}
@@ -15374,14 +15310,14 @@ __extension__ static __inline float64_t __attribute__ ((__always_inline__))
vfmsd_laneq_f64 (float64_t __a, float64_t __b,
float64x2_t __c, const int __lane)
{
- return __builtin_fma (-__b, __aarch64_vgetq_lane_f64 (__c, __lane), __a);
+ return __builtin_fma (-__b, __aarch64_vget_lane_any (__c, __lane), __a);
}
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vfmss_laneq_f32 (float32_t __a, float32_t __b,
float32x4_t __c, const int __lane)
{
- return __builtin_fmaf (-__b, __aarch64_vgetq_lane_f32 (__c, __lane), __a);
+ return __builtin_fmaf (-__b, __aarch64_vget_lane_any (__c, __lane), __a);
}
/* vfmsq_lane */
@@ -18382,35 +18318,35 @@ __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vmla_lane_f32 (float32x2_t __a, float32x2_t __b,
float32x2_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_f32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmla_lane_s16 (int16x4_t __a, int16x4_t __b,
int16x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_s16 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmla_lane_s32 (int32x2_t __a, int32x2_t __b,
int32x2_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_s32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmla_lane_u16 (uint16x4_t __a, uint16x4_t __b,
uint16x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_u16 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmla_lane_u32 (uint32x2_t __a, uint32x2_t __b,
uint32x2_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_u32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
/* vmla_laneq */
@@ -18419,35 +18355,35 @@ __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vmla_laneq_f32 (float32x2_t __a, float32x2_t __b,
float32x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vgetq_lane_f32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmla_laneq_s16 (int16x4_t __a, int16x4_t __b,
int16x8_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vgetq_lane_s16 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmla_laneq_s32 (int32x2_t __a, int32x2_t __b,
int32x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vgetq_lane_s32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmla_laneq_u16 (uint16x4_t __a, uint16x4_t __b,
uint16x8_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vgetq_lane_u16 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmla_laneq_u32 (uint32x2_t __a, uint32x2_t __b,
uint32x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vgetq_lane_u32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
/* vmlaq_lane */
@@ -18456,35 +18392,35 @@ __extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vmlaq_lane_f32 (float32x4_t __a, float32x4_t __b,
float32x2_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_f32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmlaq_lane_s16 (int16x8_t __a, int16x8_t __b,
int16x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_s16 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmlaq_lane_s32 (int32x4_t __a, int32x4_t __b,
int32x2_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_s32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmlaq_lane_u16 (uint16x8_t __a, uint16x8_t __b,
uint16x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_u16 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmlaq_lane_u32 (uint32x4_t __a, uint32x4_t __b,
uint32x2_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vget_lane_u32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
/* vmlaq_laneq */
@@ -18493,35 +18429,35 @@ __extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vmlaq_laneq_f32 (float32x4_t __a, float32x4_t __b,
float32x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vgetq_lane_f32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmlaq_laneq_s16 (int16x8_t __a, int16x8_t __b,
int16x8_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vgetq_lane_s16 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmlaq_laneq_s32 (int32x4_t __a, int32x4_t __b,
int32x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vgetq_lane_s32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmlaq_laneq_u16 (uint16x8_t __a, uint16x8_t __b,
uint16x8_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vgetq_lane_u16 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmlaq_laneq_u32 (uint32x4_t __a, uint32x4_t __b,
uint32x4_t __c, const int __lane)
{
- return (__a + (__b * __aarch64_vgetq_lane_u32 (__c, __lane)));
+ return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
}
/* vmls */
@@ -18556,35 +18492,35 @@ __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vmls_lane_f32 (float32x2_t __a, float32x2_t __b,
float32x2_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_f32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmls_lane_s16 (int16x4_t __a, int16x4_t __b,
int16x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_s16 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmls_lane_s32 (int32x2_t __a, int32x2_t __b,
int32x2_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_s32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmls_lane_u16 (uint16x4_t __a, uint16x4_t __b,
uint16x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_u16 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmls_lane_u32 (uint32x2_t __a, uint32x2_t __b,
uint32x2_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_u32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
/* vmls_laneq */
@@ -18593,35 +18529,35 @@ __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vmls_laneq_f32 (float32x2_t __a, float32x2_t __b,
float32x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vgetq_lane_f32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmls_laneq_s16 (int16x4_t __a, int16x4_t __b,
int16x8_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vgetq_lane_s16 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmls_laneq_s32 (int32x2_t __a, int32x2_t __b,
int32x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vgetq_lane_s32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmls_laneq_u16 (uint16x4_t __a, uint16x4_t __b,
uint16x8_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vgetq_lane_u16 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmls_laneq_u32 (uint32x2_t __a, uint32x2_t __b,
uint32x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vgetq_lane_u32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
/* vmlsq_lane */
@@ -18630,35 +18566,35 @@ __extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vmlsq_lane_f32 (float32x4_t __a, float32x4_t __b,
float32x2_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_f32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmlsq_lane_s16 (int16x8_t __a, int16x8_t __b,
int16x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_s16 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmlsq_lane_s32 (int32x4_t __a, int32x4_t __b,
int32x2_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_s32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmlsq_lane_u16 (uint16x8_t __a, uint16x8_t __b,
uint16x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_u16 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmlsq_lane_u32 (uint32x4_t __a, uint32x4_t __b,
uint32x2_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vget_lane_u32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
/* vmlsq_laneq */
@@ -18667,34 +18603,34 @@ __extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vmlsq_laneq_f32 (float32x4_t __a, float32x4_t __b,
float32x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vgetq_lane_f32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmlsq_laneq_s16 (int16x8_t __a, int16x8_t __b,
int16x8_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vgetq_lane_s16 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmlsq_laneq_s32 (int32x4_t __a, int32x4_t __b,
int32x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vgetq_lane_s32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmlsq_laneq_u16 (uint16x8_t __a, uint16x8_t __b,
uint16x8_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vgetq_lane_u16 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmlsq_laneq_u32 (uint32x4_t __a, uint32x4_t __b,
uint32x4_t __c, const int __lane)
{
- return (__a - (__b * __aarch64_vgetq_lane_u32 (__c, __lane)));
+ return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
}
/* vmov_n_ */
@@ -18848,7 +18784,7 @@ vmovq_n_u64 (uint64_t __a)
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vmul_lane_f32 (float32x2_t __a, float32x2_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_f32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
@@ -18860,25 +18796,25 @@ vmul_lane_f64 (float64x1_t __a, float64x1_t __b, const int __lane)
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmul_lane_s16 (int16x4_t __a, int16x4_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_s16 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmul_lane_s32 (int32x2_t __a, int32x2_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_s32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmul_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_u16 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmul_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_u32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
/* vmuld_lane */
@@ -18886,13 +18822,13 @@ vmul_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __lane)
__extension__ static __inline float64_t __attribute__ ((__always_inline__))
vmuld_lane_f64 (float64_t __a, float64x1_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_f64 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline float64_t __attribute__ ((__always_inline__))
vmuld_laneq_f64 (float64_t __a, float64x2_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_f64 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
/* vmuls_lane */
@@ -18900,13 +18836,13 @@ vmuld_laneq_f64 (float64_t __a, float64x2_t __b, const int __lane)
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vmuls_lane_f32 (float32_t __a, float32x2_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_f32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vmuls_laneq_f32 (float32_t __a, float32x4_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_f32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
/* vmul_laneq */
@@ -18914,37 +18850,37 @@ vmuls_laneq_f32 (float32_t __a, float32x4_t __b, const int __lane)
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vmul_laneq_f32 (float32x2_t __a, float32x4_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_f32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline float64x1_t __attribute__ ((__always_inline__))
vmul_laneq_f64 (float64x1_t __a, float64x2_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_f64 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vmul_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_s16 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vmul_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_s32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vmul_laneq_u16 (uint16x4_t __a, uint16x8_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_u16 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vmul_laneq_u32 (uint32x2_t __a, uint32x4_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_u32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
/* vmul_n */
@@ -18960,7 +18896,7 @@ vmul_n_f64 (float64x1_t __a, float64_t __b)
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vmulq_lane_f32 (float32x4_t __a, float32x2_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_f32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
@@ -18973,25 +18909,25 @@ vmulq_lane_f64 (float64x2_t __a, float64x1_t __b, const int __lane)
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmulq_lane_s16 (int16x8_t __a, int16x4_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_s16 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmulq_lane_s32 (int32x4_t __a, int32x2_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_s32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmulq_lane_u16 (uint16x8_t __a, uint16x4_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_u16 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmulq_lane_u32 (uint32x4_t __a, uint32x2_t __b, const int __lane)
{
- return __a * __aarch64_vget_lane_u32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
/* vmulq_laneq */
@@ -18999,37 +18935,37 @@ vmulq_lane_u32 (uint32x4_t __a, uint32x2_t __b, const int __lane)
__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
vmulq_laneq_f32 (float32x4_t __a, float32x4_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_f32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
vmulq_laneq_f64 (float64x2_t __a, float64x2_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_f64 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmulq_laneq_s16 (int16x8_t __a, int16x8_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_s16 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmulq_laneq_s32 (int32x4_t __a, int32x4_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_s32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmulq_laneq_u16 (uint16x8_t __a, uint16x8_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_u16 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmulq_laneq_u32 (uint32x4_t __a, uint32x4_t __b, const int __lane)
{
- return __a * __aarch64_vgetq_lane_u32 (__b, __lane);
+ return __a * __aarch64_vget_lane_any (__b, __lane);
}
/* vneg */
@@ -25221,31 +25157,6 @@ __INTERLEAVE_LIST (zip)
/* End of optimal implementations in approved order. */
#undef __aarch64_vget_lane_any
-#undef __aarch64_vget_lane_f32
-#undef __aarch64_vget_lane_f64
-#undef __aarch64_vget_lane_p8
-#undef __aarch64_vget_lane_p16
-#undef __aarch64_vget_lane_s8
-#undef __aarch64_vget_lane_s16
-#undef __aarch64_vget_lane_s32
-#undef __aarch64_vget_lane_s64
-#undef __aarch64_vget_lane_u8
-#undef __aarch64_vget_lane_u16
-#undef __aarch64_vget_lane_u32
-#undef __aarch64_vget_lane_u64
-
-#undef __aarch64_vgetq_lane_f32
-#undef __aarch64_vgetq_lane_f64
-#undef __aarch64_vgetq_lane_p8
-#undef __aarch64_vgetq_lane_p16
-#undef __aarch64_vgetq_lane_s8
-#undef __aarch64_vgetq_lane_s16
-#undef __aarch64_vgetq_lane_s32
-#undef __aarch64_vgetq_lane_s64
-#undef __aarch64_vgetq_lane_u8
-#undef __aarch64_vgetq_lane_u16
-#undef __aarch64_vgetq_lane_u32
-#undef __aarch64_vgetq_lane_u64
#undef __aarch64_vdup_lane_any
#undef __aarch64_vdup_lane_f32