summaryrefslogtreecommitdiff
path: root/gcc/config/i386/smmintrin.h
diff options
context:
space:
mode:
authormeissner <meissner@138bc75d-0d04-0410-961f-82ee72b054a4>2007-09-13 02:17:51 +0000
committermeissner <meissner@138bc75d-0d04-0410-961f-82ee72b054a4>2007-09-13 02:17:51 +0000
commit448e99f5e858735f9de2ab5cf614dd2b171cee5d (patch)
tree2550bf2be428ffb45e9bcb30a6c3186b44ebdc0d /gcc/config/i386/smmintrin.h
parent3e66c14e753471249291f2403f35ebcbc436d353 (diff)
downloadgcc-448e99f5e858735f9de2ab5cf614dd2b171cee5d.tar.gz
Add AMD SSE5 support; Add iterator over function arguments; Add stdarg_p, prototype_p, function_args_count functions
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@128455 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/config/i386/smmintrin.h')
-rw-r--r--gcc/config/i386/smmintrin.h117
1 files changed, 1 insertions, 116 deletions
diff --git a/gcc/config/i386/smmintrin.h b/gcc/config/i386/smmintrin.h
index 01943cd2bb7..fc20d1cf990 100644
--- a/gcc/config/i386/smmintrin.h
+++ b/gcc/config/i386/smmintrin.h
@@ -37,32 +37,10 @@
/* We need definitions from the SSSE3, SSE3, SSE2 and SSE header
files. */
#include <tmmintrin.h>
+#include <mmintrin-common.h>
/* SSE4.1 */
-/* Rounding mode macros. */
-#define _MM_FROUND_TO_NEAREST_INT 0x00
-#define _MM_FROUND_TO_NEG_INF 0x01
-#define _MM_FROUND_TO_POS_INF 0x02
-#define _MM_FROUND_TO_ZERO 0x03
-#define _MM_FROUND_CUR_DIRECTION 0x04
-
-#define _MM_FROUND_RAISE_EXC 0x00
-#define _MM_FROUND_NO_EXC 0x08
-
-#define _MM_FROUND_NINT \
- (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC)
-#define _MM_FROUND_FLOOR \
- (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC)
-#define _MM_FROUND_CEIL \
- (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC)
-#define _MM_FROUND_TRUNC \
- (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC)
-#define _MM_FROUND_RINT \
- (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC)
-#define _MM_FROUND_NEARBYINT \
- (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC)
-
/* Integer blend instructions - select data from 2 sources using
constant/variable mask. */
@@ -236,38 +214,6 @@ _mm_mul_epi32 (__m128i __X, __m128i __Y)
return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__X, (__v4si)__Y);
}
-/* Packed integer 128-bit bitwise comparison. Return 1 if
- (__V & __M) == 0. */
-static __inline int __attribute__((__always_inline__))
-_mm_testz_si128 (__m128i __M, __m128i __V)
-{
- return __builtin_ia32_ptestz128 ((__v2di)__M, (__v2di)__V);
-}
-
-/* Packed integer 128-bit bitwise comparison. Return 1 if
- (__V & ~__M) == 0. */
-static __inline int __attribute__((__always_inline__))
-_mm_testc_si128 (__m128i __M, __m128i __V)
-{
- return __builtin_ia32_ptestc128 ((__v2di)__M, (__v2di)__V);
-}
-
-/* Packed integer 128-bit bitwise comparison. Return 1 if
- (__V & __M) != 0 && (__V & ~__M) != 0. */
-static __inline int __attribute__((__always_inline__))
-_mm_testnzc_si128 (__m128i __M, __m128i __V)
-{
- return __builtin_ia32_ptestnzc128 ((__v2di)__M, (__v2di)__V);
-}
-
-/* Macros for packed integer 128-bit comparison intrinsics. */
-#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
-
-#define _mm_test_all_ones(V) \
- _mm_testc_si128 ((V), _mm_cmpeq_epi32 ((V), (V)))
-
-#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128 ((M), (V))
-
/* Insert single precision float into packed single precision array
element selected by index N. The bits [7-6] of N define S
index, the bits [5-4] define D index, and bits [3-0] define
@@ -405,67 +351,6 @@ _mm_minpos_epu16 (__m128i __X)
return (__m128i) __builtin_ia32_phminposuw128 ((__v8hi)__X);
}
-/* Packed/scalar double precision floating point rounding. */
-
-#ifdef __OPTIMIZE__
-static __inline __m128d __attribute__((__always_inline__))
-_mm_round_pd (__m128d __V, const int __M)
-{
- return (__m128d) __builtin_ia32_roundpd ((__v2df)__V, __M);
-}
-
-static __inline __m128d __attribute__((__always_inline__))
-_mm_round_sd(__m128d __D, __m128d __V, const int __M)
-{
- return (__m128d) __builtin_ia32_roundsd ((__v2df)__D,
- (__v2df)__V,
- __M);
-}
-#else
-#define _mm_round_pd(V, M) \
- ((__m128d) __builtin_ia32_roundpd ((__v2df)(V), (M)))
-
-#define _mm_round_sd(D, V, M) \
- ((__m128d) __builtin_ia32_roundsd ((__v2df)(D), (__v2df)(V), (M)))
-#endif
-
-/* Packed/scalar single precision floating point rounding. */
-
-#ifdef __OPTIMIZE__
-static __inline __m128 __attribute__((__always_inline__))
-_mm_round_ps (__m128 __V, const int __M)
-{
- return (__m128) __builtin_ia32_roundps ((__v4sf)__V, __M);
-}
-
-static __inline __m128 __attribute__((__always_inline__))
-_mm_round_ss (__m128 __D, __m128 __V, const int __M)
-{
- return (__m128) __builtin_ia32_roundss ((__v4sf)__D,
- (__v4sf)__V,
- __M);
-}
-#else
-#define _mm_round_ps(V, M) \
- ((__m128) __builtin_ia32_roundps ((__v4sf)(V), (M)))
-
-#define _mm_round_ss(D, V, M) \
- ((__m128) __builtin_ia32_roundss ((__v4sf)(D), (__v4sf)(V), (M)))
-#endif
-
-/* Macros for ceil/floor intrinsics. */
-#define _mm_ceil_pd(V) _mm_round_pd ((V), _MM_FROUND_CEIL)
-#define _mm_ceil_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_CEIL)
-
-#define _mm_floor_pd(V) _mm_round_pd((V), _MM_FROUND_FLOOR)
-#define _mm_floor_sd(D, V) _mm_round_sd ((D), (V), _MM_FROUND_FLOOR)
-
-#define _mm_ceil_ps(V) _mm_round_ps ((V), _MM_FROUND_CEIL)
-#define _mm_ceil_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_CEIL)
-
-#define _mm_floor_ps(V) _mm_round_ps ((V), _MM_FROUND_FLOOR)
-#define _mm_floor_ss(D, V) _mm_round_ss ((D), (V), _MM_FROUND_FLOOR)
-
/* Packed integer sign-extension. */
static __inline __m128i __attribute__((__always_inline__))