summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeffrey Walton <noloader@gmail.com>2018-11-14 20:16:38 -0500
committerJeffrey Walton <noloader@gmail.com>2018-11-14 20:16:38 -0500
commit96d3fa208ed7d31c04f6428fa06b13581fc0367e (patch)
treeb9f219fd3350f614067ea9d03c95dc4e3f65fe64
parent7bd02896a0a87bf40da07698e1edcd2929a77d95 (diff)
downloadcryptopp-git-96d3fa208ed7d31c04f6428fa06b13581fc0367e.tar.gz
Fix compile when using XLC with LLVM front-end without -qxlcompatmacros
-rw-r--r--gcm_simd.cpp8
-rw-r--r--ppc_simd.h42
-rw-r--r--sha_simd.cpp24
3 files changed, 37 insertions, 37 deletions
diff --git a/gcm_simd.cpp b/gcm_simd.cpp
index 92647cfb..a967eec0 100644
--- a/gcm_simd.cpp
+++ b/gcm_simd.cpp
@@ -201,7 +201,7 @@ inline uint64x2_p VMULL2LE(const uint64x2_p& val)
// _mm_clmulepi64_si128(a, b, 0x00)
inline uint64x2_p VMULL_00LE(const uint64x2_p& a, const uint64x2_p& b)
{
-#if defined(__xlc__) || defined(__xlC__)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return VMULL2LE(__vpmsumd (VectorGetHigh(a), VectorGetHigh(b)));
#else
return VMULL2LE(__builtin_crypto_vpmsumd (VectorGetHigh(a), VectorGetHigh(b)));
@@ -214,7 +214,7 @@ inline uint64x2_p VMULL_01LE(const uint64x2_p& a, const uint64x2_p& b)
// Small speedup. VectorGetHigh(b) ensures the high dword of 'b' is 0.
// The 0 used in the vmull yields 0 for the high product, so the high
// dword of 'a' is "don't care".
-#if defined(__xlc__) || defined(__xlC__)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return VMULL2LE(__vpmsumd (a, VectorGetHigh(b)));
#else
return VMULL2LE(__builtin_crypto_vpmsumd (a, VectorGetHigh(b)));
@@ -227,7 +227,7 @@ inline uint64x2_p VMULL_10LE(const uint64x2_p& a, const uint64x2_p& b)
// Small speedup. VectorGetHigh(a) ensures the high dword of 'a' is 0.
// The 0 used in the vmull yields 0 for the high product, so the high
// dword of 'b' is "don't care".
-#if defined(__xlc__) || defined(__xlC__)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return VMULL2LE(__vpmsumd (VectorGetHigh(a), b));
#else
return VMULL2LE(__builtin_crypto_vpmsumd (VectorGetHigh(a), b));
@@ -240,7 +240,7 @@ inline uint64x2_p VMULL_11LE(const uint64x2_p& a, const uint64x2_p& b)
// Small speedup. VectorGetLow(a) ensures the high dword of 'a' is 0.
// The 0 used in the vmull yields 0 for the high product, so the high
// dword of 'b' is "don't care".
-#if defined(__xlc__) || defined(__xlC__)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return VMULL2LE(__vpmsumd (VectorGetLow(a), b));
#else
return VMULL2LE(__builtin_crypto_vpmsumd (VectorGetLow(a), b));
diff --git a/ppc_simd.h b/ppc_simd.h
index 1efd2030..eaf0268e 100644
--- a/ppc_simd.h
+++ b/ppc_simd.h
@@ -408,7 +408,7 @@ inline bool VectorNotEqual(const T1& vec1, const T2& vec2)
/// \since Crypto++ 6.0
inline uint32x4_p VectorLoadBE(const byte src[16])
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return (uint32x4_p)vec_xl_be(0, (byte*)src);
#else
# if (CRYPTOPP_BIG_ENDIAN)
@@ -429,7 +429,7 @@ inline uint32x4_p VectorLoadBE(const byte src[16])
/// \since Crypto++ 6.0
inline uint32x4_p VectorLoadBE(int off, const byte src[16])
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return (uint32x4_p)vec_xl_be(off, (byte*)src);
#else
# if (CRYPTOPP_BIG_ENDIAN)
@@ -448,7 +448,7 @@ inline uint32x4_p VectorLoadBE(int off, const byte src[16])
/// \since Crypto++ 6.0
inline uint32x4_p VectorLoad(const byte src[16])
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return (uint32x4_p)vec_xl(0, (byte*)src);
#else
return (uint32x4_p)vec_vsx_ld(0, (byte*)src);
@@ -464,7 +464,7 @@ inline uint32x4_p VectorLoad(const byte src[16])
/// \since Crypto++ 6.0
inline uint32x4_p VectorLoad(int off, const byte src[16])
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return (uint32x4_p)vec_xl(off, (byte*)src);
#else
return (uint32x4_p)vec_vsx_ld(off, (byte*)src);
@@ -506,7 +506,7 @@ inline uint32x4_p VectorLoad(int off, const word32 src[4])
template <class T>
inline void VectorStoreBE(const T& src, byte dest[16])
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
vec_xst_be((uint8x16_p)src, 0, (byte*)dest);
#else
# if (CRYPTOPP_BIG_ENDIAN)
@@ -530,7 +530,7 @@ inline void VectorStoreBE(const T& src, byte dest[16])
template <class T>
inline void VectorStoreBE(const T& src, int off, byte dest[16])
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
vec_xst_be((uint8x16_p)src, off, (byte*)dest);
#else
# if (CRYPTOPP_BIG_ENDIAN)
@@ -551,7 +551,7 @@ inline void VectorStoreBE(const T& src, int off, byte dest[16])
template<class T>
inline void VectorStore(const T& src, byte dest[16])
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
vec_xst((uint8x16_p)src, 0, (byte*)dest);
#else
vec_vsx_st((uint8x16_p)src, 0, (byte*)dest);
@@ -568,7 +568,7 @@ inline void VectorStore(const T& src, byte dest[16])
template<class T>
inline void VectorStore(byte dest[16], const T& src)
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
vec_xst((uint8x16_p)src, 0, (byte*)dest);
#else
vec_vsx_st((uint8x16_p)src, 0, (byte*)dest);
@@ -586,7 +586,7 @@ inline void VectorStore(byte dest[16], const T& src)
template<class T>
inline void VectorStore(const T& src, int off, byte dest[16])
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
vec_xst((uint8x16_p)src, off, (byte*)dest);
#else
vec_vsx_st((uint8x16_p)src, off, (byte*)dest);
@@ -736,9 +736,9 @@ inline void VectorStoreBE(const T& src, byte dest[16])
template <class T1, class T2>
inline T1 VectorEncrypt(const T1& state, const T2& key)
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return (T1)__vcipher((uint8x16_p)state, (uint8x16_p)key);
-#elif defined(CRYPTOPP_GCC_VERSION)
+#elif defined(__GNUC__)
return (T1)__builtin_crypto_vcipher((uint64x2_p)state, (uint64x2_p)key);
#else
CRYPTOPP_ASSERT(0);
@@ -756,9 +756,9 @@ inline T1 VectorEncrypt(const T1& state, const T2& key)
template <class T1, class T2>
inline T1 VectorEncryptLast(const T1& state, const T2& key)
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return (T1)__vcipherlast((uint8x16_p)state, (uint8x16_p)key);
-#elif defined(CRYPTOPP_GCC_VERSION)
+#elif defined(__GNUC__)
return (T1)__builtin_crypto_vcipherlast((uint64x2_p)state, (uint64x2_p)key);
#else
CRYPTOPP_ASSERT(0);
@@ -776,9 +776,9 @@ inline T1 VectorEncryptLast(const T1& state, const T2& key)
template <class T1, class T2>
inline T1 VectorDecrypt(const T1& state, const T2& key)
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return (T1)__vncipher((uint8x16_p)state, (uint8x16_p)key);
-#elif defined(CRYPTOPP_GCC_VERSION)
+#elif defined(__GNUC__)
return (T1)__builtin_crypto_vncipher((uint64x2_p)state, (uint64x2_p)key);
#else
CRYPTOPP_ASSERT(0);
@@ -796,9 +796,9 @@ inline T1 VectorDecrypt(const T1& state, const T2& key)
template <class T1, class T2>
inline T1 VectorDecryptLast(const T1& state, const T2& key)
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return (T1)__vncipherlast((uint8x16_p)state, (uint8x16_p)key);
-#elif defined(CRYPTOPP_GCC_VERSION)
+#elif defined(__GNUC__)
return (T1)__builtin_crypto_vncipherlast((uint64x2_p)state, (uint64x2_p)key);
#else
CRYPTOPP_ASSERT(0);
@@ -820,9 +820,9 @@ inline T1 VectorDecryptLast(const T1& state, const T2& key)
template <int func, int subfunc, class T>
inline T VectorSHA256(const T& vec)
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return (T)__vshasigmaw((uint32x4_p)vec, func, subfunc);
-#elif defined(CRYPTOPP_GCC_VERSION)
+#elif defined(__GNUC__)
return (T)__builtin_crypto_vshasigmaw((uint32x4_p)vec, func, subfunc);
#else
CRYPTOPP_ASSERT(0);
@@ -840,9 +840,9 @@ inline T VectorSHA256(const T& vec)
template <int func, int subfunc, class T>
inline T VectorSHA512(const T& vec)
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return (T)__vshasigmad((uint64x2_p)vec, func, subfunc);
-#elif defined(CRYPTOPP_GCC_VERSION)
+#elif defined(__GNUC__)
return (T)__builtin_crypto_vshasigmad((uint64x2_p)vec, func, subfunc);
#else
CRYPTOPP_ASSERT(0);
diff --git a/sha_simd.cpp b/sha_simd.cpp
index bb903c9d..8a40b7ea 100644
--- a/sha_simd.cpp
+++ b/sha_simd.cpp
@@ -1093,7 +1093,7 @@ typedef __vector unsigned long long uint64x2_p8;
template <class T> static inline
uint32x4_p8 VectorLoad32x4u(const T* data, int offset)
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return (uint32x4_p8)vec_xl(offset, (uint8_t*)data);
#else
return (uint32x4_p8)vec_vsx_ld(offset, data);
@@ -1104,7 +1104,7 @@ uint32x4_p8 VectorLoad32x4u(const T* data, int offset)
template <class T> static inline
void VectorStore32x4u(const uint32x4_p8 val, T* data, int offset)
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
vec_xst((uint8x16_p8)val, offset, (uint8_t*)data);
#else
vec_vsx_st((uint8x16_p8)val, offset, (uint8_t*)data);
@@ -1142,7 +1142,7 @@ uint32x4_p8 VectorMaj(const uint32x4_p8 x, const uint32x4_p8 y, const uint32x4_p
static inline
uint32x4_p8 Vector_sigma0(const uint32x4_p8 val)
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return __vshasigmaw(val, 0, 0);
#else
return __builtin_crypto_vshasigmaw(val, 0, 0);
@@ -1152,7 +1152,7 @@ uint32x4_p8 Vector_sigma0(const uint32x4_p8 val)
static inline
uint32x4_p8 Vector_sigma1(const uint32x4_p8 val)
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return __vshasigmaw(val, 0, 0xf);
#else
return __builtin_crypto_vshasigmaw(val, 0, 0xf);
@@ -1162,7 +1162,7 @@ uint32x4_p8 Vector_sigma1(const uint32x4_p8 val)
static inline
uint32x4_p8 VectorSigma0(const uint32x4_p8 val)
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return __vshasigmaw(val, 1, 0);
#else
return __builtin_crypto_vshasigmaw(val, 1, 0);
@@ -1172,7 +1172,7 @@ uint32x4_p8 VectorSigma0(const uint32x4_p8 val)
static inline
uint32x4_p8 VectorSigma1(const uint32x4_p8 val)
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return __vshasigmaw(val, 1, 0xf);
#else
return __builtin_crypto_vshasigmaw(val, 1, 0xf);
@@ -1385,7 +1385,7 @@ uint64x2_p8 VectorPermute64x2(const uint64x2_p8 val, const uint8x16_p8 mask)
template <class T> static inline
uint64x2_p8 VectorLoad64x2u(const T* data, int offset)
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return (uint64x2_p8)vec_xl(offset, (uint8_t*)data);
#else
return (uint64x2_p8)vec_vsx_ld(offset, (const uint8_t*)data);
@@ -1396,7 +1396,7 @@ uint64x2_p8 VectorLoad64x2u(const T* data, int offset)
template <class T> static inline
void VectorStore64x2u(const uint64x2_p8 val, T* data, int offset)
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
vec_xst((uint8x16_p8)val, offset, (uint8_t*)data);
#else
vec_vsx_st((uint8x16_p8)val, offset, (uint8_t*)data);
@@ -1433,7 +1433,7 @@ uint64x2_p8 VectorMaj(const uint64x2_p8 x, const uint64x2_p8 y, const uint64x2_p
static inline
uint64x2_p8 Vector_sigma0(const uint64x2_p8 val)
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return __vshasigmad(val, 0, 0);
#else
return __builtin_crypto_vshasigmad(val, 0, 0);
@@ -1443,7 +1443,7 @@ uint64x2_p8 Vector_sigma0(const uint64x2_p8 val)
static inline
uint64x2_p8 Vector_sigma1(const uint64x2_p8 val)
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return __vshasigmad(val, 0, 0xf);
#else
return __builtin_crypto_vshasigmad(val, 0, 0xf);
@@ -1453,7 +1453,7 @@ uint64x2_p8 Vector_sigma1(const uint64x2_p8 val)
static inline
uint64x2_p8 VectorSigma0(const uint64x2_p8 val)
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return __vshasigmad(val, 1, 0);
#else
return __builtin_crypto_vshasigmad(val, 1, 0);
@@ -1463,7 +1463,7 @@ uint64x2_p8 VectorSigma0(const uint64x2_p8 val)
static inline
uint64x2_p8 VectorSigma1(const uint64x2_p8 val)
{
-#if defined(CRYPTOPP_XLC_VERSION)
+#if defined(__xlc__) || defined(__xlC__) || defined(__clang__)
return __vshasigmad(val, 1, 0xf);
#else
return __builtin_crypto_vshasigmad(val, 1, 0xf);