diff options
author | uros <uros@138bc75d-0d04-0410-961f-82ee72b054a4> | 2008-03-13 13:33:47 +0000 |
---|---|---|
committer | uros <uros@138bc75d-0d04-0410-961f-82ee72b054a4> | 2008-03-13 13:33:47 +0000 |
commit | 517b02860af00c3a34bdf6c1a499683a916496b4 (patch) | |
tree | 0ca19fa288a9af69b989a25372e7675c5943fbe1 /gcc/config/i386/mmintrin.h | |
parent | f7c302cfb235598759996067ac71da235e62e4b5 (diff) | |
download | gcc-517b02860af00c3a34bdf6c1a499683a916496b4.tar.gz |
PR target/34000
PR target/35553
* config/i386/xmmintrin.h: Change all static inline functions to
extern inline and add __gnu_inline__ attribute.
* config/i386/bmintrin.h: Ditto.
* config/i386/smmintrin.h: Ditto.
* config/i386/tmmintrin.h: Ditto.
* config/i386/mmintrin-common.h: Ditto.
* config/i386/ammintrin.h: Ditto.
* config/i386/emmintrin.h: Ditto.
* config/i386/pmmintrin.h: Ditto.
* config/i386/mmintrin.h: Ditto.
* config/i386/mm3dnow.h: Ditto.
testsuite/ChangeLog:
PR target/34000
PR target/35553
* g++.dg/other/i386-3.C: New test.
* gcc.target/i386/sse-13.c: Redefine extern instead of static.
* gcc.target/i386/sse-14.c: Ditto.
* gcc.target/i386/mmx-1.c: Ditto.
* gcc.target/i386/mmx-2.c: Ditto.
* gcc.target/i386/3dnow-1.c: Ditto.
* gcc.target/i386/3dnow-2.c: Ditto.
* gcc.target/i386/3dnowA-1.c: Ditto.
* gcc.target/i386/3dnowA-2.c: Ditto.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@133169 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/config/i386/mmintrin.h')
-rw-r--r-- | gcc/config/i386/mmintrin.h | 258 |
1 files changed, 129 insertions, 129 deletions
diff --git a/gcc/config/i386/mmintrin.h b/gcc/config/i386/mmintrin.h index 94800ad688e..1c09be30e1a 100644 --- a/gcc/config/i386/mmintrin.h +++ b/gcc/config/i386/mmintrin.h @@ -45,26 +45,26 @@ typedef char __v8qi __attribute__ ((__vector_size__ (8))); typedef long long __v1di __attribute__ ((__vector_size__ (8))); /* Empty the multimedia state. */ -static __inline void __attribute__((__always_inline__, __artificial__)) +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_empty (void) { __builtin_ia32_emms (); } -static __inline void __attribute__((__always_inline__, __artificial__)) +extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_empty (void) { _mm_empty (); } /* Convert I to a __m64 object. The integer is zero-extended to 64-bits. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtsi32_si64 (int __i) { return (__m64) __builtin_ia32_vec_init_v2si (__i, 0); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_from_int (int __i) { return _mm_cvtsi32_si64 (__i); @@ -74,26 +74,26 @@ _m_from_int (int __i) /* Convert I to a __m64 object. */ /* Intel intrinsic. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_from_int64 (long long __i) { return (__m64) __i; } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtsi64_m64 (long long __i) { return (__m64) __i; } /* Microsoft intrinsic. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtsi64x_si64 (long long __i) { return (__m64) __i; } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_set_pi64x (long long __i) { return (__m64) __i; @@ -101,13 +101,13 @@ _mm_set_pi64x (long long __i) #endif /* Convert the lower 32 bits of the __m64 object into an integer. */ -static __inline int __attribute__((__always_inline__, __artificial__)) +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtsi64_si32 (__m64 __i) { return __builtin_ia32_vec_ext_v2si ((__v2si)__i, 0); } -static __inline int __attribute__((__always_inline__, __artificial__)) +extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_to_int (__m64 __i) { return _mm_cvtsi64_si32 (__i); @@ -117,20 +117,20 @@ _m_to_int (__m64 __i) /* Convert the __m64 object to a 64bit integer. */ /* Intel intrinsic. */ -static __inline long long __attribute__((__always_inline__, __artificial__)) +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_to_int64 (__m64 __i) { return (long long)__i; } -static __inline long long __attribute__((__always_inline__, __artificial__)) +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtm64_si64 (__m64 __i) { return (long long)__i; } /* Microsoft intrinsic. */ -static __inline long long __attribute__((__always_inline__, __artificial__)) +extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cvtsi64_si64x (__m64 __i) { return (long long)__i; @@ -140,13 +140,13 @@ _mm_cvtsi64_si64x (__m64 __i) /* Pack the four 16-bit values from M1 into the lower four 8-bit values of the result, and the four 16-bit values from M2 into the upper four 8-bit values of the result, all with signed saturation. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_packs_pi16 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_packsswb ((__v4hi)__m1, (__v4hi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_packsswb (__m64 __m1, __m64 __m2) { return _mm_packs_pi16 (__m1, __m2); @@ -155,13 +155,13 @@ _m_packsswb (__m64 __m1, __m64 __m2) /* Pack the two 32-bit values from M1 in to the lower two 16-bit values of the result, and the two 32-bit values from M2 into the upper two 16-bit values of the result, all with signed saturation. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_packs_pi32 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_packssdw ((__v2si)__m1, (__v2si)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_packssdw (__m64 __m1, __m64 __m2) { return _mm_packs_pi32 (__m1, __m2); @@ -170,13 +170,13 @@ _m_packssdw (__m64 __m1, __m64 __m2) /* Pack the four 16-bit values from M1 into the lower four 8-bit values of the result, and the four 16-bit values from M2 into the upper four 8-bit values of the result, all with unsigned saturation. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_packs_pu16 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_packuswb ((__v4hi)__m1, (__v4hi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_packuswb (__m64 __m1, __m64 __m2) { return _mm_packs_pu16 (__m1, __m2); @@ -184,13 +184,13 @@ _m_packuswb (__m64 __m1, __m64 __m2) /* Interleave the four 8-bit values from the high half of M1 with the four 8-bit values from the high half of M2. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_unpackhi_pi8 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_punpckhbw ((__v8qi)__m1, (__v8qi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_punpckhbw (__m64 __m1, __m64 __m2) { return _mm_unpackhi_pi8 (__m1, __m2); @@ -198,13 +198,13 @@ _m_punpckhbw (__m64 __m1, __m64 __m2) /* Interleave the two 16-bit values from the high half of M1 with the two 16-bit values from the high half of M2. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_unpackhi_pi16 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_punpckhwd ((__v4hi)__m1, (__v4hi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_punpckhwd (__m64 __m1, __m64 __m2) { return _mm_unpackhi_pi16 (__m1, __m2); @@ -212,13 +212,13 @@ _m_punpckhwd (__m64 __m1, __m64 __m2) /* Interleave the 32-bit value from the high half of M1 with the 32-bit value from the high half of M2. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_unpackhi_pi32 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_punpckhdq ((__v2si)__m1, (__v2si)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_punpckhdq (__m64 __m1, __m64 __m2) { return _mm_unpackhi_pi32 (__m1, __m2); @@ -226,13 +226,13 @@ _m_punpckhdq (__m64 __m1, __m64 __m2) /* Interleave the four 8-bit values from the low half of M1 with the four 8-bit values from the low half of M2. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_unpacklo_pi8 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_punpcklbw ((__v8qi)__m1, (__v8qi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_punpcklbw (__m64 __m1, __m64 __m2) { return _mm_unpacklo_pi8 (__m1, __m2); @@ -240,13 +240,13 @@ _m_punpcklbw (__m64 __m1, __m64 __m2) /* Interleave the two 16-bit values from the low half of M1 with the two 16-bit values from the low half of M2. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_unpacklo_pi16 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_punpcklwd ((__v4hi)__m1, (__v4hi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_punpcklwd (__m64 __m1, __m64 __m2) { return _mm_unpacklo_pi16 (__m1, __m2); @@ -254,52 +254,52 @@ _m_punpcklwd (__m64 __m1, __m64 __m2) /* Interleave the 32-bit value from the low half of M1 with the 32-bit value from the low half of M2. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_unpacklo_pi32 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_punpckldq ((__v2si)__m1, (__v2si)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_punpckldq (__m64 __m1, __m64 __m2) { return _mm_unpacklo_pi32 (__m1, __m2); } /* Add the 8-bit values in M1 to the 8-bit values in M2. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_add_pi8 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_paddb ((__v8qi)__m1, (__v8qi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_paddb (__m64 __m1, __m64 __m2) { return _mm_add_pi8 (__m1, __m2); } /* Add the 16-bit values in M1 to the 16-bit values in M2. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_add_pi16 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_paddw ((__v4hi)__m1, (__v4hi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_paddw (__m64 __m1, __m64 __m2) { return _mm_add_pi16 (__m1, __m2); } /* Add the 32-bit values in M1 to the 32-bit values in M2. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_add_pi32 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_paddd ((__v2si)__m1, (__v2si)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_paddd (__m64 __m1, __m64 __m2) { return _mm_add_pi32 (__m1, __m2); @@ -307,7 +307,7 @@ _m_paddd (__m64 __m1, __m64 __m2) /* Add the 64-bit values in M1 to the 64-bit values in M2. */ #ifdef __SSE2__ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_add_si64 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_paddq ((__v1di)__m1, (__v1di)__m2); @@ -316,13 +316,13 @@ _mm_add_si64 (__m64 __m1, __m64 __m2) /* Add the 8-bit values in M1 to the 8-bit values in M2 using signed saturated arithmetic. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_adds_pi8 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_paddsb ((__v8qi)__m1, (__v8qi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_paddsb (__m64 __m1, __m64 __m2) { return _mm_adds_pi8 (__m1, __m2); @@ -330,13 +330,13 @@ _m_paddsb (__m64 __m1, __m64 __m2) /* Add the 16-bit values in M1 to the 16-bit values in M2 using signed saturated arithmetic. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_adds_pi16 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_paddsw ((__v4hi)__m1, (__v4hi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_paddsw (__m64 __m1, __m64 __m2) { return _mm_adds_pi16 (__m1, __m2); @@ -344,13 +344,13 @@ _m_paddsw (__m64 __m1, __m64 __m2) /* Add the 8-bit values in M1 to the 8-bit values in M2 using unsigned saturated arithmetic. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_adds_pu8 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_paddusb ((__v8qi)__m1, (__v8qi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_paddusb (__m64 __m1, __m64 __m2) { return _mm_adds_pu8 (__m1, __m2); @@ -358,52 +358,52 @@ _m_paddusb (__m64 __m1, __m64 __m2) /* Add the 16-bit values in M1 to the 16-bit values in M2 using unsigned saturated arithmetic. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_adds_pu16 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_paddusw ((__v4hi)__m1, (__v4hi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_paddusw (__m64 __m1, __m64 __m2) { return _mm_adds_pu16 (__m1, __m2); } /* Subtract the 8-bit values in M2 from the 8-bit values in M1. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_sub_pi8 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_psubb ((__v8qi)__m1, (__v8qi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psubb (__m64 __m1, __m64 __m2) { return _mm_sub_pi8 (__m1, __m2); } /* Subtract the 16-bit values in M2 from the 16-bit values in M1. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_sub_pi16 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_psubw ((__v4hi)__m1, (__v4hi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psubw (__m64 __m1, __m64 __m2) { return _mm_sub_pi16 (__m1, __m2); } /* Subtract the 32-bit values in M2 from the 32-bit values in M1. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_sub_pi32 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_psubd ((__v2si)__m1, (__v2si)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psubd (__m64 __m1, __m64 __m2) { return _mm_sub_pi32 (__m1, __m2); @@ -411,7 +411,7 @@ _m_psubd (__m64 __m1, __m64 __m2) /* Add the 64-bit values in M1 to the 64-bit values in M2. */ #ifdef __SSE2__ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_sub_si64 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_psubq ((__v1di)__m1, (__v1di)__m2); @@ -420,13 +420,13 @@ _mm_sub_si64 (__m64 __m1, __m64 __m2) /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using signed saturating arithmetic. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_subs_pi8 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_psubsb ((__v8qi)__m1, (__v8qi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psubsb (__m64 __m1, __m64 __m2) { return _mm_subs_pi8 (__m1, __m2); @@ -434,13 +434,13 @@ _m_psubsb (__m64 __m1, __m64 __m2) /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using signed saturating arithmetic. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_subs_pi16 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_psubsw ((__v4hi)__m1, (__v4hi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psubsw (__m64 __m1, __m64 __m2) { return _mm_subs_pi16 (__m1, __m2); @@ -448,13 +448,13 @@ _m_psubsw (__m64 __m1, __m64 __m2) /* Subtract the 8-bit values in M2 from the 8-bit values in M1 using unsigned saturating arithmetic. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_subs_pu8 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_psubusb ((__v8qi)__m1, (__v8qi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psubusb (__m64 __m1, __m64 __m2) { return _mm_subs_pu8 (__m1, __m2); @@ -462,13 +462,13 @@ _m_psubusb (__m64 __m1, __m64 __m2) /* Subtract the 16-bit values in M2 from the 16-bit values in M1 using unsigned saturating arithmetic. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_subs_pu16 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_psubusw ((__v4hi)__m1, (__v4hi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psubusw (__m64 __m1, __m64 __m2) { return _mm_subs_pu16 (__m1, __m2); @@ -477,13 +477,13 @@ _m_psubusw (__m64 __m1, __m64 __m2) /* Multiply four 16-bit values in M1 by four 16-bit values in M2 producing four 32-bit intermediate results, which are then summed by pairs to produce two 32-bit results. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_madd_pi16 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_pmaddwd ((__v4hi)__m1, (__v4hi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pmaddwd (__m64 __m1, __m64 __m2) { return _mm_madd_pi16 (__m1, __m2); @@ -491,13 +491,13 @@ _m_pmaddwd (__m64 __m1, __m64 __m2) /* Multiply four signed 16-bit values in M1 by four signed 16-bit values in M2 and produce the high 16 bits of the 32-bit results. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mulhi_pi16 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_pmulhw ((__v4hi)__m1, (__v4hi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pmulhw (__m64 __m1, __m64 __m2) { return _mm_mulhi_pi16 (__m1, __m2); @@ -505,226 +505,226 @@ _m_pmulhw (__m64 __m1, __m64 __m2) /* Multiply four 16-bit values in M1 by four 16-bit values in M2 and produce the low 16 bits of the results. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_mullo_pi16 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_pmullw ((__v4hi)__m1, (__v4hi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pmullw (__m64 __m1, __m64 __m2) { return _mm_mullo_pi16 (__m1, __m2); } /* Shift four 16-bit values in M left by COUNT. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_sll_pi16 (__m64 __m, __m64 __count) { return (__m64) __builtin_ia32_psllw ((__v4hi)__m, (__v4hi)__count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psllw (__m64 __m, __m64 __count) { return _mm_sll_pi16 (__m, __count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_slli_pi16 (__m64 __m, int __count) { return (__m64) __builtin_ia32_psllwi ((__v4hi)__m, __count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psllwi (__m64 __m, int __count) { return _mm_slli_pi16 (__m, __count); } /* Shift two 32-bit values in M left by COUNT. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_sll_pi32 (__m64 __m, __m64 __count) { return (__m64) __builtin_ia32_pslld ((__v2si)__m, (__v2si)__count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pslld (__m64 __m, __m64 __count) { return _mm_sll_pi32 (__m, __count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_slli_pi32 (__m64 __m, int __count) { return (__m64) __builtin_ia32_pslldi ((__v2si)__m, __count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pslldi (__m64 __m, int __count) { return _mm_slli_pi32 (__m, __count); } /* Shift the 64-bit value in M left by COUNT. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_sll_si64 (__m64 __m, __m64 __count) { return (__m64) __builtin_ia32_psllq ((__v1di)__m, (__v1di)__count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psllq (__m64 __m, __m64 __count) { return _mm_sll_si64 (__m, __count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_slli_si64 (__m64 __m, int __count) { return (__m64) __builtin_ia32_psllqi ((__v1di)__m, __count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psllqi (__m64 __m, int __count) { return _mm_slli_si64 (__m, __count); } /* Shift four 16-bit values in M right by COUNT; shift in the sign bit. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_sra_pi16 (__m64 __m, __m64 __count) { return (__m64) __builtin_ia32_psraw ((__v4hi)__m, (__v4hi)__count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psraw (__m64 __m, __m64 __count) { return _mm_sra_pi16 (__m, __count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_srai_pi16 (__m64 __m, int __count) { return (__m64) __builtin_ia32_psrawi ((__v4hi)__m, __count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psrawi (__m64 __m, int __count) { return _mm_srai_pi16 (__m, __count); } /* Shift two 32-bit values in M right by COUNT; shift in the sign bit. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_sra_pi32 (__m64 __m, __m64 __count) { return (__m64) __builtin_ia32_psrad ((__v2si)__m, (__v2si)__count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psrad (__m64 __m, __m64 __count) { return _mm_sra_pi32 (__m, __count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_srai_pi32 (__m64 __m, int __count) { return (__m64) __builtin_ia32_psradi ((__v2si)__m, __count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psradi (__m64 __m, int __count) { return _mm_srai_pi32 (__m, __count); } /* Shift four 16-bit values in M right by COUNT; shift in zeros. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_srl_pi16 (__m64 __m, __m64 __count) { return (__m64) __builtin_ia32_psrlw ((__v4hi)__m, (__v4hi)__count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psrlw (__m64 __m, __m64 __count) { return _mm_srl_pi16 (__m, __count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_srli_pi16 (__m64 __m, int __count) { return (__m64) __builtin_ia32_psrlwi ((__v4hi)__m, __count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psrlwi (__m64 __m, int __count) { return _mm_srli_pi16 (__m, __count); } /* Shift two 32-bit values in M right by COUNT; shift in zeros. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_srl_pi32 (__m64 __m, __m64 __count) { return (__m64) __builtin_ia32_psrld ((__v2si)__m, (__v2si)__count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psrld (__m64 __m, __m64 __count) { return _mm_srl_pi32 (__m, __count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_srli_pi32 (__m64 __m, int __count) { return (__m64) __builtin_ia32_psrldi ((__v2si)__m, __count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psrldi (__m64 __m, int __count) { return _mm_srli_pi32 (__m, __count); } /* Shift the 64-bit value in M left by COUNT; shift in zeros. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_srl_si64 (__m64 __m, __m64 __count) { return (__m64) __builtin_ia32_psrlq ((__v1di)__m, (__v1di)__count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psrlq (__m64 __m, __m64 __count) { return _mm_srl_si64 (__m, __count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_srli_si64 (__m64 __m, int __count) { return (__m64) __builtin_ia32_psrlqi ((__v1di)__m, __count); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_psrlqi (__m64 __m, int __count) { return _mm_srli_si64 (__m, __count); } /* Bit-wise AND the 64-bit values in M1 and M2. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_and_si64 (__m64 __m1, __m64 __m2) { return __builtin_ia32_pand (__m1, __m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pand (__m64 __m1, __m64 __m2) { return _mm_and_si64 (__m1, __m2); @@ -732,39 +732,39 @@ _m_pand (__m64 __m1, __m64 __m2) /* Bit-wise complement the 64-bit value in M1 and bit-wise AND it with the 64-bit value in M2. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_andnot_si64 (__m64 __m1, __m64 __m2) { return __builtin_ia32_pandn (__m1, __m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pandn (__m64 __m1, __m64 __m2) { return _mm_andnot_si64 (__m1, __m2); } /* Bit-wise inclusive OR the 64-bit values in M1 and M2. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_or_si64 (__m64 __m1, __m64 __m2) { return __builtin_ia32_por (__m1, __m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_por (__m64 __m1, __m64 __m2) { return _mm_or_si64 (__m1, __m2); } /* Bit-wise exclusive OR the 64-bit values in M1 and M2. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_xor_si64 (__m64 __m1, __m64 __m2) { return __builtin_ia32_pxor (__m1, __m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pxor (__m64 __m1, __m64 __m2) { return _mm_xor_si64 (__m1, __m2); @@ -772,25 +772,25 @@ _m_pxor (__m64 __m1, __m64 __m2) /* Compare eight 8-bit values. The result of the comparison is 0xFF if the test is true and zero if false. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpeq_pi8 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_pcmpeqb ((__v8qi)__m1, (__v8qi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pcmpeqb (__m64 __m1, __m64 __m2) { return _mm_cmpeq_pi8 (__m1, __m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpgt_pi8 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_pcmpgtb ((__v8qi)__m1, (__v8qi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pcmpgtb (__m64 __m1, __m64 __m2) { return _mm_cmpgt_pi8 (__m1, __m2); @@ -798,25 +798,25 @@ _m_pcmpgtb (__m64 __m1, __m64 __m2) /* Compare four 16-bit values. The result of the comparison is 0xFFFF if the test is true and zero if false. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpeq_pi16 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_pcmpeqw ((__v4hi)__m1, (__v4hi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pcmpeqw (__m64 __m1, __m64 __m2) { return _mm_cmpeq_pi16 (__m1, __m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpgt_pi16 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_pcmpgtw ((__v4hi)__m1, (__v4hi)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pcmpgtw (__m64 __m1, __m64 __m2) { return _mm_cmpgt_pi16 (__m1, __m2); @@ -824,53 +824,53 @@ _m_pcmpgtw (__m64 __m1, __m64 __m2) /* Compare two 32-bit values. The result of the comparison is 0xFFFFFFFF if the test is true and zero if false. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpeq_pi32 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_pcmpeqd ((__v2si)__m1, (__v2si)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pcmpeqd (__m64 __m1, __m64 __m2) { return _mm_cmpeq_pi32 (__m1, __m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_cmpgt_pi32 (__m64 __m1, __m64 __m2) { return (__m64) __builtin_ia32_pcmpgtd ((__v2si)__m1, (__v2si)__m2); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _m_pcmpgtd (__m64 __m1, __m64 __m2) { return _mm_cmpgt_pi32 (__m1, __m2); } /* Creates a 64-bit zero. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_setzero_si64 (void) { return (__m64)0LL; } /* Creates a vector of two 32-bit values; I0 is least significant. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_set_pi32 (int __i1, int __i0) { return (__m64) __builtin_ia32_vec_init_v2si (__i0, __i1); } /* Creates a vector of four 16-bit values; W0 is least significant. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_set_pi16 (short __w3, short __w2, short __w1, short __w0) { return (__m64) __builtin_ia32_vec_init_v4hi (__w0, __w1, __w2, __w3); } /* Creates a vector of eight 8-bit values; B0 is least significant. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_set_pi8 (char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0) { @@ -879,19 +879,19 @@ _mm_set_pi8 (char __b7, char __b6, char __b5, char __b4, } /* Similar, but with the arguments in reverse order. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_setr_pi32 (int __i0, int __i1) { return _mm_set_pi32 (__i1, __i0); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_setr_pi16 (short __w0, short __w1, short __w2, short __w3) { return _mm_set_pi16 (__w3, __w2, __w1, __w0); } -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7) { @@ -899,21 +899,21 @@ _mm_setr_pi8 (char __b0, char __b1, char __b2, char __b3, } /* Creates a vector of two 32-bit values, both elements containing I. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_set1_pi32 (int __i) { return _mm_set_pi32 (__i, __i); } /* Creates a vector of four 16-bit values, all elements containing W. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_set1_pi16 (short __w) { return _mm_set_pi16 (__w, __w, __w, __w); } /* Creates a vector of eight 8-bit values, all elements containing B. */ -static __inline __m64 __attribute__((__always_inline__, __artificial__)) +extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__)) _mm_set1_pi8 (char __b) { return _mm_set_pi8 (__b, __b, __b, __b, __b, __b, __b, __b); |