summaryrefslogtreecommitdiff
path: root/lib/Headers/avx512fintrin.h
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2018-06-07 02:46:02 +0000
committerCraig Topper <craig.topper@intel.com>2018-06-07 02:46:02 +0000
commit57ebd133fb0101cd93bb97bd61275a9fc6a5c642 (patch)
treed494e8a5590329d98809e29ed51a75787bde8dbf /lib/Headers/avx512fintrin.h
parentf9e757c085f864c96f76f4807e94f6bfb6146f49 (diff)
downloadclang-57ebd133fb0101cd93bb97bd61275a9fc6a5c642.tar.gz
[X86] Add back _mask, _maskz, and _mask3 builtins for some 512-bit fmadd/fmsub/fmaddsub/fmsubadd builtins.
Summary: We recently switch to using a selects in the intrinsics header files for FMA instructions. But the 512-bit versions support flavors with rounding mode which must be an Integer Constant Expression. This has forced those intrinsics to be implemented as macros. As it stands now the mask and mask3 intrinsics evaluate one of their macro arguments twice. If that argument itself is another intrinsic macro, we can end up over expanding macros. Or if its something we can CSE later it would show up multiple times when it shouldn't. I tried adding __extension__ around the macro and making it an expression statement and declaring a local variable. But whatever name you choose for the local variable can never be used as the name of an input to the macro in user code. If that happens you would end up with the same name on the LHS and RHS of an assignment after expansion. We might be safe if we use __ in front of the variable names because those names are reserved and user code shouldn't use that, but I wasn't sure I wanted to make that claim. The other option which I've chosen here, is to add back _mask, _maskz, and _mask3 flavors of the builtin which we will expand in CGBuiltin.cpp to replicate the argument as needed and insert any fneg needed on the third operand to make a subtract. The _maskz isn't truly necessary if we have an unmasked version or if we use the masked version with a -1 mask and wrap a select around it. But I've chosen to make things more uniform. I separated out the scalar builtin handling to avoid too many things going on in EmitX86FMAExpr. It was different enough due to the extract and insert that the minor duplication of the CreateCall was probably worth it. Reviewers: tkrupa, RKSimon, spatel, GBuella Reviewed By: tkrupa Subscribers: cfe-commits Differential Revision: https://reviews.llvm.org/D47724 git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@334159 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Headers/avx512fintrin.h')
-rw-r--r--lib/Headers/avx512fintrin.h956
1 files changed, 432 insertions, 524 deletions
diff --git a/lib/Headers/avx512fintrin.h b/lib/Headers/avx512fintrin.h
index 2b3633e377..47a0c9d433 100644
--- a/lib/Headers/avx512fintrin.h
+++ b/lib/Headers/avx512fintrin.h
@@ -2578,910 +2578,818 @@ _mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) {
(__mmask8)-1, (int)(R))
#define _mm512_fmadd_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), (int)(R))
+ (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R))
#define _mm512_mask_fmadd_round_pd(A, U, B, C, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- __builtin_ia32_vfmaddpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (int)(R)), \
- (__v8df)(__m512d)(A))
+ (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
#define _mm512_mask3_fmadd_round_pd(A, B, C, U, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- __builtin_ia32_vfmaddpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (int)(R)), \
- (__v8df)(__m512d)(C))
+ (__m512d)__builtin_ia32_vfmaddpd512_mask3((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
#define _mm512_maskz_fmadd_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- __builtin_ia32_vfmaddpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (int)(R)), \
- (__v8df)_mm512_setzero_pd())
+ (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
#define _mm512_fmsub_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (int)(R))
+ (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R))
#define _mm512_mask_fmsub_round_pd(A, U, B, C, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- __builtin_ia32_vfmaddpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (int)(R)), \
- (__v8df)(__m512d)(A))
+ (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
#define _mm512_maskz_fmsub_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- __builtin_ia32_vfmaddpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (int)(R)), \
- (__v8df)_mm512_setzero_pd())
+ (__m512d)__builtin_ia32_vfmaddpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
#define _mm512_fnmadd_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512(-(__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), (int)(R))
+ (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R))
#define _mm512_mask3_fnmadd_round_pd(A, B, C, U, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- __builtin_ia32_vfmaddpd512(-(__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (int)(R)), \
- (__v8df)(__m512d)(C))
+ (__m512d)__builtin_ia32_vfmaddpd512_mask3(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
#define _mm512_maskz_fnmadd_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- __builtin_ia32_vfmaddpd512(-(__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (int)(R)), \
- (__v8df)_mm512_setzero_pd())
+ (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
#define _mm512_fnmsub_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddpd512(-(__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (int)(R))
+ (__m512d)__builtin_ia32_vfmaddpd512_mask(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R))
#define _mm512_maskz_fnmsub_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- __builtin_ia32_vfmaddpd512(-(__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (int)(R)), \
- (__v8df)_mm512_setzero_pd())
+ (__m512d)__builtin_ia32_vfmaddpd512_maskz(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_fmadd_pd(__m512d __A, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_vfmaddpd512 ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __C,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask_fmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_selectpd_512((__mmask8) __U,
- __builtin_ia32_vfmaddpd512 ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v8df) __A);
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask3_fmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
{
- return (__m512d) __builtin_ia32_selectpd_512((__mmask8) __U,
- __builtin_ia32_vfmaddpd512 ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v8df) __C);
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_maskz_fmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_selectpd_512((__mmask8) __U,
- __builtin_ia32_vfmaddpd512 ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v8df) _mm512_setzero_pd());
+ return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_fmsub_pd(__m512d __A, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_vfmaddpd512 ((__v8df) __A,
- (__v8df) __B,
- -(__v8df) __C,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask_fmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_selectpd_512((__mmask8) __U,
- __builtin_ia32_vfmaddpd512 ((__v8df) __A,
- (__v8df) __B,
- -(__v8df) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v8df) __A);
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_maskz_fmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_selectpd_512((__mmask8) __U,
- __builtin_ia32_vfmaddpd512 ((__v8df) __A,
- (__v8df) __B,
- -(__v8df) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v8df) _mm512_setzero_pd());
+ return (__m512d) __builtin_ia32_vfmaddpd512_maskz ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_vfmaddpd512 (-(__v8df) __A,
- (__v8df) __B,
- (__v8df) __C,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ -(__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask3_fnmadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
{
- return (__m512d) __builtin_ia32_selectpd_512((__mmask8) __U,
- __builtin_ia32_vfmaddpd512 (-(__v8df) __A,
- (__v8df) __B,
- (__v8df) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v8df) __C);
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask3 (-(__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_maskz_fnmadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_selectpd_512((__mmask8) __U,
- __builtin_ia32_vfmaddpd512 (-(__v8df) __A,
- (__v8df) __B,
- (__v8df) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v8df) _mm512_setzero_pd());
+ return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_vfmaddpd512 (-(__v8df) __A,
- (__v8df) __B,
- -(__v8df) __C,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ -(__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_maskz_fnmsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_selectpd_512((__mmask8) __U,
- __builtin_ia32_vfmaddpd512 (-(__v8df) __A,
- (__v8df) __B,
- -(__v8df) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v8df) _mm512_setzero_pd());
+ return (__m512d) __builtin_ia32_vfmaddpd512_maskz (-(__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
#define _mm512_fmadd_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), (int)(R))
+ (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R))
#define _mm512_mask_fmadd_round_ps(A, U, B, C, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- __builtin_ia32_vfmaddps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (int)(R)), \
- (__v16sf)(__m512)(A))
+ (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R))
#define _mm512_mask3_fmadd_round_ps(A, B, C, U, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- __builtin_ia32_vfmaddps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (int)(R)), \
- (__v16sf)(__m512)(C))
+ (__m512)__builtin_ia32_vfmaddps512_mask3((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R))
#define _mm512_maskz_fmadd_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- __builtin_ia32_vfmaddps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (int)(R)), \
- (__v16sf)_mm512_setzero_ps())
+ (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R))
#define _mm512_fmsub_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (int)(R))
+ (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R))
#define _mm512_mask_fmsub_round_ps(A, U, B, C, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- __builtin_ia32_vfmaddps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (int)(R)), \
- (__v16sf)(__m512)(A))
+ (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R))
#define _mm512_maskz_fmsub_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- __builtin_ia32_vfmaddps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (int)(R)), \
- (__v16sf)_mm512_setzero_ps())
+ (__m512)__builtin_ia32_vfmaddps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R))
#define _mm512_fnmadd_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512(-(__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), (int)(R))
+ (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ -(__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R))
#define _mm512_mask3_fnmadd_round_ps(A, B, C, U, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- __builtin_ia32_vfmaddps512(-(__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (int)(R)), \
- (__v16sf)(__m512)(C))
+ (__m512)__builtin_ia32_vfmaddps512_mask3(-(__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R))
#define _mm512_maskz_fnmadd_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- __builtin_ia32_vfmaddps512(-(__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (int)(R)), \
- (__v16sf)_mm512_setzero_ps())
+ (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R))
#define _mm512_fnmsub_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddps512(-(__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (int)(R))
+ (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ -(__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R))
#define _mm512_maskz_fnmsub_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- __builtin_ia32_vfmaddps512(-(__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (int)(R)), \
- (__v16sf)_mm512_setzero_ps())
+ (__m512)__builtin_ia32_vfmaddps512_maskz(-(__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R))
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_fmadd_ps(__m512 __A, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_vfmaddps512 ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_mask_fmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_selectps_512((__mmask16) __U,
- __builtin_ia32_vfmaddps512 ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v16sf) __A);
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_mask3_fmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
{
- return (__m512) __builtin_ia32_selectps_512((__mmask16) __U,
- __builtin_ia32_vfmaddps512 ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v16sf) __C);
+ return (__m512) __builtin_ia32_vfmaddps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_maskz_fmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_selectps_512((__mmask16) __U,
- __builtin_ia32_vfmaddps512 ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v16sf) _mm512_setzero_ps());
+ return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_fmsub_ps(__m512 __A, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_vfmaddps512 ((__v16sf) __A,
- (__v16sf) __B,
- -(__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_mask_fmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_selectps_512((__mmask16) __U,
- __builtin_ia32_vfmaddps512 ((__v16sf) __A,
- (__v16sf) __B,
- -(__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v16sf) __A);
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_maskz_fmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_selectps_512((__mmask16) __U,
- __builtin_ia32_vfmaddps512 ((__v16sf) __A,
- (__v16sf) __B,
- -(__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v16sf) _mm512_setzero_ps());
+ return (__m512) __builtin_ia32_vfmaddps512_maskz ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_vfmaddps512 (-(__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ -(__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_mask3_fnmadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
{
- return (__m512) __builtin_ia32_selectps_512((__mmask16) __U,
- __builtin_ia32_vfmaddps512 (-(__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v16sf) __C);
+ return (__m512) __builtin_ia32_vfmaddps512_mask3 (-(__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_maskz_fnmadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_selectps_512((__mmask16) __U,
- __builtin_ia32_vfmaddps512 (-(__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v16sf) _mm512_setzero_ps());
+ return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_vfmaddps512 (-(__v16sf) __A,
- (__v16sf) __B,
- -(__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ -(__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_maskz_fnmsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_selectps_512((__mmask16) __U,
- __builtin_ia32_vfmaddps512 (-(__v16sf) __A,
- (__v16sf) __B,
- -(__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v16sf) _mm512_setzero_ps());
+ return (__m512) __builtin_ia32_vfmaddps512_maskz (-(__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
#define _mm512_fmaddsub_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddsubpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (int)(R))
+ (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R))
#define _mm512_mask_fmaddsub_round_pd(A, U, B, C, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- __builtin_ia32_vfmaddsubpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (int)(R)), \
- (__v8df)(__m512d)(A))
+ (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
#define _mm512_mask3_fmaddsub_round_pd(A, B, C, U, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- __builtin_ia32_vfmaddsubpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (int)(R)), \
- (__v8df)(__m512d)(C))
+ (__m512d)__builtin_ia32_vfmaddsubpd512_mask3((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
#define _mm512_maskz_fmaddsub_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- __builtin_ia32_vfmaddsubpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (int)(R)), \
- (__v8df)_mm512_setzero_pd())
+ (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
#define _mm512_fmsubadd_round_pd(A, B, C, R) \
- (__m512d)__builtin_ia32_vfmaddsubpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (int)(R))
+ (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)-1, (int)(R))
#define _mm512_mask_fmsubadd_round_pd(A, U, B, C, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- __builtin_ia32_vfmaddsubpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (int)(R)), \
- (__v8df)(__m512d)(A))
+ (__m512d)__builtin_ia32_vfmaddsubpd512_mask((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
#define _mm512_maskz_fmsubadd_round_pd(U, A, B, C, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- __builtin_ia32_vfmaddsubpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (int)(R)), \
- (__v8df)_mm512_setzero_pd())
+ (__m512d)__builtin_ia32_vfmaddsubpd512_maskz((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_vfmaddsubpd512 ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __C,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask_fmaddsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_selectpd_512((__mmask8) __U,
- __builtin_ia32_vfmaddsubpd512 ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v8df) __A);
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask3_fmaddsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
{
- return (__m512d) __builtin_ia32_selectpd_512((__mmask8) __U,
- __builtin_ia32_vfmaddsubpd512 ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v8df) __C);
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_maskz_fmaddsub_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_selectpd_512((__mmask8) __U,
- __builtin_ia32_vfmaddsubpd512 ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v8df) _mm512_setzero_pd());
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_vfmaddsubpd512 ((__v8df) __A,
- (__v8df) __B,
- -(__v8df) __C,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) -1,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask_fmsubadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_selectpd_512((__mmask8) __U,
- __builtin_ia32_vfmaddsubpd512 ((__v8df) __A,
- (__v8df) __B,
- -(__v8df) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v8df) __A);
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_mask ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_maskz_fmsubadd_pd(__mmask8 __U, __m512d __A, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_selectpd_512((__mmask8) __U,
- __builtin_ia32_vfmaddsubpd512 ((__v8df) __A,
- (__v8df) __B,
- -(__v8df) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v8df) _mm512_setzero_pd());
+ return (__m512d) __builtin_ia32_vfmaddsubpd512_maskz ((__v8df) __A,
+ (__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
#define _mm512_fmaddsub_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddsubps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (int)(R))
+ (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R))
#define _mm512_mask_fmaddsub_round_ps(A, U, B, C, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- __builtin_ia32_vfmaddsubps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (int)(R)), \
- (__v16sf)(__m512)(A))
+ (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R))
#define _mm512_mask3_fmaddsub_round_ps(A, B, C, U, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- __builtin_ia32_vfmaddsubps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (int)(R)), \
- (__v16sf)(__m512)(C))
+ (__m512)__builtin_ia32_vfmaddsubps512_mask3((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R))
#define _mm512_maskz_fmaddsub_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- __builtin_ia32_vfmaddsubps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (int)(R)), \
- (__v16sf)_mm512_setzero_ps())
+ (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R))
#define _mm512_fmsubadd_round_ps(A, B, C, R) \
- (__m512)__builtin_ia32_vfmaddsubps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (int)(R))
+ (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)-1, (int)(R))
#define _mm512_mask_fmsubadd_round_ps(A, U, B, C, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- __builtin_ia32_vfmaddsubps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (int)(R)), \
- (__v16sf)(__m512)(A))
+ (__m512)__builtin_ia32_vfmaddsubps512_mask((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R))
#define _mm512_maskz_fmsubadd_round_ps(U, A, B, C, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- __builtin_ia32_vfmaddsubps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (int)(R)), \
- (__v16sf)_mm512_setzero_ps())
+ (__m512)__builtin_ia32_vfmaddsubps512_maskz((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R))
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_vfmaddsubps512 ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_mask_fmaddsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_selectps_512((__mmask16) __U,
- __builtin_ia32_vfmaddsubps512 ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v16sf) __A);
+ return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_mask3_fmaddsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
{
- return (__m512) __builtin_ia32_selectps_512((__mmask16) __U,
- __builtin_ia32_vfmaddsubps512 ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v16sf) __C);
+ return (__m512) __builtin_ia32_vfmaddsubps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_maskz_fmaddsub_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_selectps_512((__mmask16) __U,
- __builtin_ia32_vfmaddsubps512 ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v16sf) _mm512_setzero_ps());
+ return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_vfmaddsubps512 ((__v16sf) __A,
- (__v16sf) __B,
- -(__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) -1,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_mask_fmsubadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_selectps_512((__mmask16) __U,
- __builtin_ia32_vfmaddsubps512 ((__v16sf) __A,
- (__v16sf) __B,
- -(__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v16sf) __A);
+ return (__m512) __builtin_ia32_vfmaddsubps512_mask ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_maskz_fmsubadd_ps(__mmask16 __U, __m512 __A, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_selectps_512((__mmask16) __U,
- __builtin_ia32_vfmaddsubps512 ((__v16sf) __A,
- (__v16sf) __B,
- -(__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v16sf) _mm512_setzero_ps());
+ return (__m512) __builtin_ia32_vfmaddsubps512_maskz ((__v16sf) __A,
+ (__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
#define _mm512_mask3_fmsub_round_pd(A, B, C, U, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__m512d)__builtin_ia32_vfmaddpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (int)(R)), \
- (__v8df)(__m512d)(C))
+ (__m512d)__builtin_ia32_vfmsubpd512_mask3((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask3_fmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
{
- return (__m512d) __builtin_ia32_selectpd_512((__mmask8) __U,
- (__m512d)__builtin_ia32_vfmaddpd512 ((__v8df) __A,
- (__v8df) __B,
- -(__v8df) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v8df) __C);
+ return (__m512d)__builtin_ia32_vfmsubpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
#define _mm512_mask3_fmsub_round_ps(A, B, C, U, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- (__m512)__builtin_ia32_vfmaddps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (int)(R)), \
- (__v16sf)(__m512)(C))
+ (__m512)__builtin_ia32_vfmsubps512_mask3((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R))
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_mask3_fmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
{
- return (__m512) __builtin_ia32_selectps_512((__mmask16) __U,
- (__m512)__builtin_ia32_vfmaddps512 ((__v16sf) __A,
- (__v16sf) __B,
- -(__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v16sf) __C);
+ return (__m512)__builtin_ia32_vfmsubps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
#define _mm512_mask3_fmsubadd_round_pd(A, B, C, U, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__m512d)__builtin_ia32_vfmaddsubpd512((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (int)(R)), \
- (__v8df)(__m512d)(C))
+ (__m512d)__builtin_ia32_vfmsubaddpd512_mask3((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask3_fmsubadd_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
{
- return (__m512d) __builtin_ia32_selectpd_512((__mmask8) __U,
- (__m512d)__builtin_ia32_vfmaddsubpd512 ((__v8df) __A,
- (__v8df) __B,
- -(__v8df) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v8df) __C);
+ return (__m512d)__builtin_ia32_vfmsubaddpd512_mask3 ((__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
#define _mm512_mask3_fmsubadd_round_ps(A, B, C, U, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- (__m512)__builtin_ia32_vfmaddsubps512((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (int)(R)), \
- (__v16sf)(__m512)(C))
+ (__m512)__builtin_ia32_vfmsubaddps512_mask3((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R))
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_mask3_fmsubadd_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
{
- return (__m512) __builtin_ia32_selectps_512((__mmask16) __U,
- (__m512)__builtin_ia32_vfmaddsubps512 ((__v16sf) __A,
- (__v16sf) __B,
- -(__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v16sf) __C);
+ return (__m512)__builtin_ia32_vfmsubaddps512_mask3 ((__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
#define _mm512_mask_fnmadd_round_pd(A, U, B, C, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- __builtin_ia32_vfmaddpd512(-(__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(C), \
- (int)(R)), \
- (__v8df)(__m512d)(A))
+ (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ -(__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask_fnmadd_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_selectpd_512((__mmask8) __U,
- __builtin_ia32_vfmaddpd512 (-(__v8df) __A,
- (__v8df) __B,
- (__v8df) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v8df) __A);
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ -(__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
#define _mm512_mask_fnmadd_round_ps(A, U, B, C, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- __builtin_ia32_vfmaddps512(-(__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(C), \
- (int)(R)), \
- (__v16sf)(__m512)(A))
+ (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ -(__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R))
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_mask_fnmadd_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_selectps_512((__mmask16) __U,
- __builtin_ia32_vfmaddps512 (-(__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v16sf) __A);
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ -(__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
#define _mm512_mask_fnmsub_round_pd(A, U, B, C, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__m512d)__builtin_ia32_vfmaddpd512((__v8df)(__m512d)(A), \
- -(__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (int)(R)), \
- (__v8df)(__m512d)(A))
+ (__m512d)__builtin_ia32_vfmaddpd512_mask((__v8df)(__m512d)(A), \
+ -(__v8df)(__m512d)(B), \
+ -(__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
#define _mm512_mask3_fnmsub_round_pd(A, B, C, U, R) \
- (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
- (__m512d)__builtin_ia32_vfmaddpd512((__v8df)(__m512d)(A), \
- -(__v8df)(__m512d)(B), \
- -(__v8df)(__m512d)(C), \
- (int)(R)), \
- (__v8df)(__m512d)(C))
+ (__m512d)__builtin_ia32_vfmsubpd512_mask3(-(__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), \
+ (__v8df)(__m512d)(C), \
+ (__mmask8)(U), (int)(R))
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask_fnmsub_pd(__m512d __A, __mmask8 __U, __m512d __B, __m512d __C)
{
- return (__m512d) __builtin_ia32_selectpd_512((__mmask16) __U,
- (__m512d) __builtin_ia32_vfmaddpd512 ((__v8df) __A,
- -(__v8df) __B,
- -(__v8df) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v8df) __A);
+ return (__m512d) __builtin_ia32_vfmaddpd512_mask ((__v8df) __A,
+ -(__v8df) __B,
+ -(__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask3_fnmsub_pd(__m512d __A, __m512d __B, __m512d __C, __mmask8 __U)
{
- return (__m512d) __builtin_ia32_selectpd_512((__mmask16) __U,
- (__m512d) __builtin_ia32_vfmaddpd512 ((__v8df) __A,
- -(__v8df) __B,
- -(__v8df) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v8df) __C);
+ return (__m512d) __builtin_ia32_vfmsubpd512_mask3 (-(__v8df) __A,
+ (__v8df) __B,
+ (__v8df) __C,
+ (__mmask8) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
#define _mm512_mask_fnmsub_round_ps(A, U, B, C, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- (__m512)__builtin_ia32_vfmaddps512 ((__v16sf)(__m512)(A), \
- -(__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (int)(R)), \
- (__v16sf)(__m512)(A))
+ (__m512)__builtin_ia32_vfmaddps512_mask((__v16sf)(__m512)(A), \
+ -(__v16sf)(__m512)(B), \
+ -(__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R))
#define _mm512_mask3_fnmsub_round_ps(A, B, C, U, R) \
- (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
- (__m512)__builtin_ia32_vfmaddps512 ((__v16sf)(__m512)(A), \
- -(__v16sf)(__m512)(B), \
- -(__v16sf)(__m512)(C), \
- (int)(R)), \
- (__v16sf)(__m512)(C))
+ (__m512)__builtin_ia32_vfmsubps512_mask3(-(__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), \
+ (__v16sf)(__m512)(C), \
+ (__mmask16)(U), (int)(R))
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_mask_fnmsub_ps(__m512 __A, __mmask16 __U, __m512 __B, __m512 __C)
{
- return (__m512) __builtin_ia32_selectps_512((__mmask16) __U,
- (__m512) __builtin_ia32_vfmaddps512 ((__v16sf) __A,
- -(__v16sf) __B,
- -(__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v16sf) __A);
+ return (__m512) __builtin_ia32_vfmaddps512_mask ((__v16sf) __A,
+ -(__v16sf) __B,
+ -(__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_mask3_fnmsub_ps(__m512 __A, __m512 __B, __m512 __C, __mmask16 __U)
{
- return (__m512) __builtin_ia32_selectps_512((__mmask16) __U,
- (__m512) __builtin_ia32_vfmaddps512 ((__v16sf) __A,
- -(__v16sf) __B,
- -(__v16sf) __C,
- _MM_FROUND_CUR_DIRECTION),
- (__v16sf) __C);
+ return (__m512) __builtin_ia32_vfmsubps512_mask3 (-(__v16sf) __A,
+ (__v16sf) __B,
+ (__v16sf) __C,
+ (__mmask16) __U,
+ _MM_FROUND_CUR_DIRECTION);
}