summaryrefslogtreecommitdiff
path: root/speck128_simd.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'speck128_simd.cpp')
-rw-r--r--speck128_simd.cpp52
1 files changed, 30 insertions, 22 deletions
diff --git a/speck128_simd.cpp b/speck128_simd.cpp
index 75e2c48f..14efeede 100644
--- a/speck128_simd.cpp
+++ b/speck128_simd.cpp
@@ -1,4 +1,4 @@
-// speck-simd.cpp - written and placed in the public domain by Jeffrey Walton
+// speck128_simd.cpp - written and placed in the public domain by Jeffrey Walton
//
// This source file uses intrinsics and built-ins to gain access to
// SSSE3, ARM NEON and ARMv8a, and Power7 Altivec instructions. A separate
@@ -12,6 +12,14 @@
#include "misc.h"
#include "adv_simd.h"
+#ifndef CRYPTOPP_INLINE
+# if defined(CRYPTOPP_DEBUG)
+# define CRYPTOPP_INLINE static
+# else
+# define CRYPTOPP_INLINE inline
+# endif
+#endif
+
// Uncomment for benchmarking C++ against SSE or NEON.
// Do so in both speck.cpp and speck-simd.cpp.
// #undef CRYPTOPP_SSSE3_AVAILABLE
@@ -42,7 +50,7 @@
# include <arm_acle.h>
#endif
-#if defined(CRYPTOPP_POWER7_AVAILABLE)
+#if defined(CRYPTOPP_POWER8_AVAILABLE)
# include "ppc_simd.h"
#endif
@@ -60,7 +68,7 @@ using CryptoPP::word64;
#if (CRYPTOPP_ARM_NEON_AVAILABLE)
template <class T>
-inline T UnpackHigh64(const T& a, const T& b)
+CRYPTOPP_INLINE T UnpackHigh64(const T& a, const T& b)
{
const uint64x1_t x(vget_high_u64((uint64x2_t)a));
const uint64x1_t y(vget_high_u64((uint64x2_t)b));
@@ -68,7 +76,7 @@ inline T UnpackHigh64(const T& a, const T& b)
}
template <class T>
-inline T UnpackLow64(const T& a, const T& b)
+CRYPTOPP_INLINE T UnpackLow64(const T& a, const T& b)
{
const uint64x1_t x(vget_low_u64((uint64x2_t)a));
const uint64x1_t y(vget_low_u64((uint64x2_t)b));
@@ -76,7 +84,7 @@ inline T UnpackLow64(const T& a, const T& b)
}
template <unsigned int R>
-inline uint64x2_t RotateLeft64(const uint64x2_t& val)
+CRYPTOPP_INLINE uint64x2_t RotateLeft64(const uint64x2_t& val)
{
const uint64x2_t a(vshlq_n_u64(val, R));
const uint64x2_t b(vshrq_n_u64(val, 64 - R));
@@ -84,7 +92,7 @@ inline uint64x2_t RotateLeft64(const uint64x2_t& val)
}
template <unsigned int R>
-inline uint64x2_t RotateRight64(const uint64x2_t& val)
+CRYPTOPP_INLINE uint64x2_t RotateRight64(const uint64x2_t& val)
{
const uint64x2_t a(vshlq_n_u64(val, 64 - R));
const uint64x2_t b(vshrq_n_u64(val, R));
@@ -94,7 +102,7 @@ inline uint64x2_t RotateRight64(const uint64x2_t& val)
#if defined(__aarch32__) || defined(__aarch64__)
// Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks.
template <>
-inline uint64x2_t RotateLeft64<8>(const uint64x2_t& val)
+CRYPTOPP_INLINE uint64x2_t RotateLeft64<8>(const uint64x2_t& val)
{
#if (CRYPTOPP_BIG_ENDIAN)
const uint8_t maskb[16] = { 14,13,12,11, 10,9,8,15, 6,5,4,3, 2,1,0,7 };
@@ -110,7 +118,7 @@ inline uint64x2_t RotateLeft64<8>(const uint64x2_t& val)
// Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks.
template <>
-inline uint64x2_t RotateRight64<8>(const uint64x2_t& val)
+CRYPTOPP_INLINE uint64x2_t RotateRight64<8>(const uint64x2_t& val)
{
#if (CRYPTOPP_BIG_ENDIAN)
const uint8_t maskb[16] = { 8,15,14,13, 12,11,10,9, 0,7,6,5, 4,3,2,1 };
@@ -125,7 +133,7 @@ inline uint64x2_t RotateRight64<8>(const uint64x2_t& val)
}
#endif
-inline void SPECK128_Enc_Block(uint64x2_t &block0, uint64x2_t &block1,
+CRYPTOPP_INLINE void SPECK128_Enc_Block(uint64x2_t &block0, uint64x2_t &block1,
const word64 *subkeys, unsigned int rounds)
{
// [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
@@ -148,7 +156,7 @@ inline void SPECK128_Enc_Block(uint64x2_t &block0, uint64x2_t &block1,
block1 = UnpackHigh64(y1, x1);
}
-inline void SPECK128_Enc_6_Blocks(uint64x2_t &block0, uint64x2_t &block1,
+CRYPTOPP_INLINE void SPECK128_Enc_6_Blocks(uint64x2_t &block0, uint64x2_t &block1,
uint64x2_t &block2, uint64x2_t &block3, uint64x2_t &block4, uint64x2_t &block5,
const word64 *subkeys, unsigned int rounds)
{
@@ -190,7 +198,7 @@ inline void SPECK128_Enc_6_Blocks(uint64x2_t &block0, uint64x2_t &block1,
block5 = UnpackHigh64(y3, x3);
}
-inline void SPECK128_Dec_Block(uint64x2_t &block0, uint64x2_t &block1,
+CRYPTOPP_INLINE void SPECK128_Dec_Block(uint64x2_t &block0, uint64x2_t &block1,
const word64 *subkeys, unsigned int rounds)
{
// [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
@@ -213,7 +221,7 @@ inline void SPECK128_Dec_Block(uint64x2_t &block0, uint64x2_t &block1,
block1 = UnpackHigh64(y1, x1);
}
-inline void SPECK128_Dec_6_Blocks(uint64x2_t &block0, uint64x2_t &block1,
+CRYPTOPP_INLINE void SPECK128_Dec_6_Blocks(uint64x2_t &block0, uint64x2_t &block1,
uint64x2_t &block2, uint64x2_t &block3, uint64x2_t &block4, uint64x2_t &block5,
const word64 *subkeys, unsigned int rounds)
{
@@ -278,7 +286,7 @@ inline void SPECK128_Dec_6_Blocks(uint64x2_t &block0, uint64x2_t &block1,
#endif
template <unsigned int R>
-inline __m128i RotateLeft64(const __m128i& val)
+CRYPTOPP_INLINE __m128i RotateLeft64(const __m128i& val)
{
#if defined(CRYPTOPP_AVX512_ROTATE)
return _mm_rol_epi64(val, R);
@@ -291,7 +299,7 @@ inline __m128i RotateLeft64(const __m128i& val)
}
template <unsigned int R>
-inline __m128i RotateRight64(const __m128i& val)
+CRYPTOPP_INLINE __m128i RotateRight64(const __m128i& val)
{
#if defined(CRYPTOPP_AVX512_ROTATE)
return _mm_ror_epi64(val, R);
@@ -305,7 +313,7 @@ inline __m128i RotateRight64(const __m128i& val)
// Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks.
template <>
-inline __m128i RotateLeft64<8>(const __m128i& val)
+CRYPTOPP_INLINE __m128i RotateLeft64<8>(const __m128i& val)
{
#if defined(__XOP__)
return _mm_roti_epi64(val, 8);
@@ -317,7 +325,7 @@ inline __m128i RotateLeft64<8>(const __m128i& val)
// Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks.
template <>
-inline __m128i RotateRight64<8>(const __m128i& val)
+CRYPTOPP_INLINE __m128i RotateRight64<8>(const __m128i& val)
{
#if defined(__XOP__)
return _mm_roti_epi64(val, 64-8);
@@ -327,7 +335,7 @@ inline __m128i RotateRight64<8>(const __m128i& val)
#endif
}
-inline void SPECK128_Enc_Block(__m128i &block0, __m128i &block1,
+CRYPTOPP_INLINE void SPECK128_Enc_Block(__m128i &block0, __m128i &block1,
const word64 *subkeys, unsigned int rounds)
{
// [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
@@ -351,7 +359,7 @@ inline void SPECK128_Enc_Block(__m128i &block0, __m128i &block1,
block1 = _mm_unpackhi_epi64(y1, x1);
}
-inline void SPECK128_Enc_6_Blocks(__m128i &block0, __m128i &block1,
+CRYPTOPP_INLINE void SPECK128_Enc_6_Blocks(__m128i &block0, __m128i &block1,
__m128i &block2, __m128i &block3, __m128i &block4, __m128i &block5,
const word64 *subkeys, unsigned int rounds)
{
@@ -394,7 +402,7 @@ inline void SPECK128_Enc_6_Blocks(__m128i &block0, __m128i &block1,
block5 = _mm_unpackhi_epi64(y3, x3);
}
-inline void SPECK128_Dec_Block(__m128i &block0, __m128i &block1,
+CRYPTOPP_INLINE void SPECK128_Dec_Block(__m128i &block0, __m128i &block1,
const word64 *subkeys, unsigned int rounds)
{
// [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
@@ -418,7 +426,7 @@ inline void SPECK128_Dec_Block(__m128i &block0, __m128i &block1,
block1 = _mm_unpackhi_epi64(y1, x1);
}
-inline void SPECK128_Dec_6_Blocks(__m128i &block0, __m128i &block1,
+CRYPTOPP_INLINE void SPECK128_Dec_6_Blocks(__m128i &block0, __m128i &block1,
__m128i &block2, __m128i &block3, __m128i &block4, __m128i &block5,
const word64 *subkeys, unsigned int rounds)
{
@@ -477,7 +485,7 @@ using CryptoPP::VectorXor;
// Rotate left by bit count
template<unsigned int C>
-inline uint64x2_p RotateLeft64(const uint64x2_p val)
+CRYPTOPP_INLINE uint64x2_p RotateLeft64(const uint64x2_p val)
{
const uint64x2_p m = {C, C};
return vec_rl(val, m);
@@ -485,7 +493,7 @@ inline uint64x2_p RotateLeft64(const uint64x2_p val)
// Rotate right by bit count
template<unsigned int C>
-inline uint64x2_p RotateRight64(const uint64x2_p val)
+CRYPTOPP_INLINE uint64x2_p RotateRight64(const uint64x2_p val)
{
const uint64x2_p m = {64-C, 64-C};
return vec_rl(val, m);