diff options
author | Jeffrey Walton <noloader@gmail.com> | 2018-08-15 10:02:00 -0400 |
---|---|---|
committer | Jeffrey Walton <noloader@gmail.com> | 2018-08-15 10:02:00 -0400 |
commit | f28fb0a1760b6d7117468339e4299ef67cf29cf5 (patch) | |
tree | 591a14933abda3d9d11c55da6f64d3ccb9f9621f /speck128-simd.cpp | |
parent | 7a43a040488bd302bfbc29dfa655452ccfb3d5a3 (diff) | |
download | cryptopp-git-f28fb0a1760b6d7117468339e4299ef67cf29cf5.tar.gz |
Add SIMON-128 and SPECK-128 source files
Diffstat (limited to 'speck128-simd.cpp')
-rw-r--r-- | speck128-simd.cpp | 738 |
1 files changed, 738 insertions, 0 deletions
diff --git a/speck128-simd.cpp b/speck128-simd.cpp new file mode 100644 index 00000000..fe42038d --- /dev/null +++ b/speck128-simd.cpp @@ -0,0 +1,738 @@ +// speck-simd.cpp - written and placed in the public domain by Jeffrey Walton
+//
+// This source file uses intrinsics and built-ins to gain access to
+// SSSE3, ARM NEON and ARMv8a, and Power7 Altivec instructions. A separate
+// source file is needed because additional CXXFLAGS are required to enable
+// the appropriate instructions sets in some build configurations.
+
+#include "pch.h"
+#include "config.h"
+
+#include "speck.h"
+#include "misc.h"
+#include "adv-simd.h"
+
+// Uncomment for benchmarking C++ against SSE or NEON.
+// Do so in both speck.cpp and speck-simd.cpp.
+// #undef CRYPTOPP_SSSE3_AVAILABLE
+// #undef CRYPTOPP_SSE41_AVAILABLE
+// #undef CRYPTOPP_ARM_NEON_AVAILABLE
+
+#if (CRYPTOPP_SSSE3_AVAILABLE)
+# include <pmmintrin.h>
+# include <tmmintrin.h>
+#endif
+
+#if (CRYPTOPP_SSE41_AVAILABLE)
+# include <smmintrin.h>
+#endif
+
+#if defined(__AVX512F__) && defined(__AVX512VL__)
+# define CRYPTOPP_AVX512_ROTATE 1
+# include <immintrin.h>
+#endif
+
+#if (CRYPTOPP_ARM_NEON_AVAILABLE)
+# include <arm_neon.h>
+#endif
+
+// Can't use CRYPTOPP_ARM_XXX_AVAILABLE because too many
+// compilers don't follow ACLE conventions for the include.
+#if defined(CRYPTOPP_ARM_ACLE_AVAILABLE)
+# include <stdint.h>
+# include <arm_acle.h>
+#endif
+
+#if defined(CRYPTOPP_POWER7_AVAILABLE)
+# include "ppc-simd.h"
+#endif
+
+// Squash MS LNK4221 and libtool warnings
+extern const char SPECK128_SIMD_FNAME[] = __FILE__;
+
+ANONYMOUS_NAMESPACE_BEGIN
+
+using CryptoPP::byte;
+using CryptoPP::word32;
+using CryptoPP::word64;
+
+// *************************** ARM NEON ************************** //
+
+#if (CRYPTOPP_ARM_NEON_AVAILABLE)
+
+template <class T>
+inline T UnpackHigh64(const T& a, const T& b)
+{
+ const uint64x1_t x(vget_high_u64((uint64x2_t)a));
+ const uint64x1_t y(vget_high_u64((uint64x2_t)b));
+ return (T)vcombine_u64(x, y);
+}
+
+template <class T>
+inline T UnpackLow64(const T& a, const T& b)
+{
+ const uint64x1_t x(vget_low_u64((uint64x2_t)a));
+ const uint64x1_t y(vget_low_u64((uint64x2_t)b));
+ return (T)vcombine_u64(x, y);
+}
+
+template <unsigned int R>
+inline uint64x2_t RotateLeft64(const uint64x2_t& val)
+{
+ const uint64x2_t a(vshlq_n_u64(val, R));
+ const uint64x2_t b(vshrq_n_u64(val, 64 - R));
+ return vorrq_u64(a, b);
+}
+
+template <unsigned int R>
+inline uint64x2_t RotateRight64(const uint64x2_t& val)
+{
+ const uint64x2_t a(vshlq_n_u64(val, 64 - R));
+ const uint64x2_t b(vshrq_n_u64(val, R));
+ return vorrq_u64(a, b);
+}
+
+#if defined(__aarch32__) || defined(__aarch64__)
+// Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks.
+template <>
+inline uint64x2_t RotateLeft64<8>(const uint64x2_t& val)
+{
+#if defined(CRYPTOPP_BIG_ENDIAN)
+ const uint8_t maskb[16] = { 14,13,12,11, 10,9,8,15, 6,5,4,3, 2,1,0,7 };
+ const uint8x16_t mask = vld1q_u8(maskb);
+#else
+ const uint8_t maskb[16] = { 7,0,1,2, 3,4,5,6, 15,8,9,10, 11,12,13,14 };
+ const uint8x16_t mask = vld1q_u8(maskb);
+#endif
+
+ return vreinterpretq_u64_u8(
+ vqtbl1q_u8(vreinterpretq_u8_u64(val), mask));
+}
+
+// Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks.
+template <>
+inline uint64x2_t RotateRight64<8>(const uint64x2_t& val)
+{
+#if defined(CRYPTOPP_BIG_ENDIAN)
+ const uint8_t maskb[16] = { 8,15,14,13, 12,11,10,9, 0,7,6,5, 4,3,2,1 };
+ const uint8x16_t mask = vld1q_u8(maskb);
+#else
+ const uint8_t maskb[16] = { 1,2,3,4, 5,6,7,0, 9,10,11,12, 13,14,15,8 };
+ const uint8x16_t mask = vld1q_u8(maskb);
+#endif
+
+ return vreinterpretq_u64_u8(
+ vqtbl1q_u8(vreinterpretq_u8_u64(val), mask));
+}
+#endif
+
+inline void SPECK128_Enc_Block(uint64x2_t &block0, uint64x2_t &block1,
+ const word64 *subkeys, unsigned int rounds)
+{
+ // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
+ uint64x2_t x1 = UnpackHigh64(block0, block1);
+ uint64x2_t y1 = UnpackLow64(block0, block1);
+
+ for (int i=0; i < static_cast<int>(rounds); ++i)
+ {
+ const uint64x2_t rk = vld1q_dup_u64(subkeys+i);
+
+ x1 = RotateRight64<8>(x1);
+ x1 = vaddq_u64(x1, y1);
+ x1 = veorq_u64(x1, rk);
+ y1 = RotateLeft64<3>(y1);
+ y1 = veorq_u64(y1, x1);
+ }
+
+ // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
+ block0 = UnpackLow64(y1, x1);
+ block1 = UnpackHigh64(y1, x1);
+}
+
+inline void SPECK128_Enc_6_Blocks(uint64x2_t &block0, uint64x2_t &block1,
+ uint64x2_t &block2, uint64x2_t &block3, uint64x2_t &block4, uint64x2_t &block5,
+ const word64 *subkeys, unsigned int rounds)
+{
+ // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
+ uint64x2_t x1 = UnpackHigh64(block0, block1);
+ uint64x2_t y1 = UnpackLow64(block0, block1);
+ uint64x2_t x2 = UnpackHigh64(block2, block3);
+ uint64x2_t y2 = UnpackLow64(block2, block3);
+ uint64x2_t x3 = UnpackHigh64(block4, block5);
+ uint64x2_t y3 = UnpackLow64(block4, block5);
+
+ for (int i=0; i < static_cast<int>(rounds); ++i)
+ {
+ const uint64x2_t rk = vld1q_dup_u64(subkeys+i);
+
+ x1 = RotateRight64<8>(x1);
+ x2 = RotateRight64<8>(x2);
+ x3 = RotateRight64<8>(x3);
+ x1 = vaddq_u64(x1, y1);
+ x2 = vaddq_u64(x2, y2);
+ x3 = vaddq_u64(x3, y3);
+ x1 = veorq_u64(x1, rk);
+ x2 = veorq_u64(x2, rk);
+ x3 = veorq_u64(x3, rk);
+ y1 = RotateLeft64<3>(y1);
+ y2 = RotateLeft64<3>(y2);
+ y3 = RotateLeft64<3>(y3);
+ y1 = veorq_u64(y1, x1);
+ y2 = veorq_u64(y2, x2);
+ y3 = veorq_u64(y3, x3);
+ }
+
+ // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
+ block0 = UnpackLow64(y1, x1);
+ block1 = UnpackHigh64(y1, x1);
+ block2 = UnpackLow64(y2, x2);
+ block3 = UnpackHigh64(y2, x2);
+ block4 = UnpackLow64(y3, x3);
+ block5 = UnpackHigh64(y3, x3);
+}
+
+inline void SPECK128_Dec_Block(uint64x2_t &block0, uint64x2_t &block1,
+ const word64 *subkeys, unsigned int rounds)
+{
+ // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
+ uint64x2_t x1 = UnpackHigh64(block0, block1);
+ uint64x2_t y1 = UnpackLow64(block0, block1);
+
+ for (int i = static_cast<int>(rounds-1); i >= 0; --i)
+ {
+ const uint64x2_t rk = vld1q_dup_u64(subkeys+i);
+
+ y1 = veorq_u64(y1, x1);
+ y1 = RotateRight64<3>(y1);
+ x1 = veorq_u64(x1, rk);
+ x1 = vsubq_u64(x1, y1);
+ x1 = RotateLeft64<8>(x1);
+ }
+
+ // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
+ block0 = UnpackLow64(y1, x1);
+ block1 = UnpackHigh64(y1, x1);
+}
+
+inline void SPECK128_Dec_6_Blocks(uint64x2_t &block0, uint64x2_t &block1,
+ uint64x2_t &block2, uint64x2_t &block3, uint64x2_t &block4, uint64x2_t &block5,
+ const word64 *subkeys, unsigned int rounds)
+{
+ // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
+ uint64x2_t x1 = UnpackHigh64(block0, block1);
+ uint64x2_t y1 = UnpackLow64(block0, block1);
+ uint64x2_t x2 = UnpackHigh64(block2, block3);
+ uint64x2_t y2 = UnpackLow64(block2, block3);
+ uint64x2_t x3 = UnpackHigh64(block4, block5);
+ uint64x2_t y3 = UnpackLow64(block4, block5);
+
+ for (int i = static_cast<int>(rounds-1); i >= 0; --i)
+ {
+ const uint64x2_t rk = vld1q_dup_u64(subkeys+i);
+
+ y1 = veorq_u64(y1, x1);
+ y2 = veorq_u64(y2, x2);
+ y3 = veorq_u64(y3, x3);
+ y1 = RotateRight64<3>(y1);
+ y2 = RotateRight64<3>(y2);
+ y3 = RotateRight64<3>(y3);
+ x1 = veorq_u64(x1, rk);
+ x2 = veorq_u64(x2, rk);
+ x3 = veorq_u64(x3, rk);
+ x1 = vsubq_u64(x1, y1);
+ x2 = vsubq_u64(x2, y2);
+ x3 = vsubq_u64(x3, y3);
+ x1 = RotateLeft64<8>(x1);
+ x2 = RotateLeft64<8>(x2);
+ x3 = RotateLeft64<8>(x3);
+ }
+
+ // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
+ block0 = UnpackLow64(y1, x1);
+ block1 = UnpackHigh64(y1, x1);
+ block2 = UnpackLow64(y2, x2);
+ block3 = UnpackHigh64(y2, x2);
+ block4 = UnpackLow64(y3, x3);
+ block5 = UnpackHigh64(y3, x3);
+}
+
+#endif // CRYPTOPP_ARM_NEON_AVAILABLE
+
+// ***************************** IA-32 ***************************** //
+
+#if defined(CRYPTOPP_SSSE3_AVAILABLE)
+
+// Clang __m128i casts, http://bugs.llvm.org/show_bug.cgi?id=20670
+#ifndef M128_CAST
+# define M128_CAST(x) ((__m128i *)(void *)(x))
+#endif
+#ifndef CONST_M128_CAST
+# define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x))
+#endif
+
+// GCC double casts, https://www.spinics.net/lists/gcchelp/msg47735.html
+#ifndef DOUBLE_CAST
+# define DOUBLE_CAST(x) ((double *)(void *)(x))
+#endif
+#ifndef CONST_DOUBLE_CAST
+# define CONST_DOUBLE_CAST(x) ((const double *)(const void *)(x))
+#endif
+
+template <unsigned int R>
+inline __m128i RotateLeft64(const __m128i& val)
+{
+#if defined(CRYPTOPP_AVX512_ROTATE)
+ return _mm_rol_epi64(val, R);
+#else
+ return _mm_or_si128(
+ _mm_slli_epi64(val, R), _mm_srli_epi64(val, 64-R));
+#endif
+}
+
+template <unsigned int R>
+inline __m128i RotateRight64(const __m128i& val)
+{
+#if defined(CRYPTOPP_AVX512_ROTATE)
+ return _mm_ror_epi64(val, R);
+#else
+ return _mm_or_si128(
+ _mm_slli_epi64(val, 64-R), _mm_srli_epi64(val, R));
+#endif
+}
+
+// Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks.
+template <>
+inline __m128i RotateLeft64<8>(const __m128i& val)
+{
+ const __m128i mask = _mm_set_epi8(14,13,12,11, 10,9,8,15, 6,5,4,3, 2,1,0,7);
+ return _mm_shuffle_epi8(val, mask);
+}
+
+// Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks.
+template <>
+inline __m128i RotateRight64<8>(const __m128i& val)
+{
+ const __m128i mask = _mm_set_epi8(8,15,14,13, 12,11,10,9, 0,7,6,5, 4,3,2,1);
+ return _mm_shuffle_epi8(val, mask);
+}
+
+inline void SPECK128_Enc_Block(__m128i &block0, __m128i &block1,
+ const word64 *subkeys, unsigned int rounds)
+{
+ // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
+ __m128i x1 = _mm_unpackhi_epi64(block0, block1);
+ __m128i y1 = _mm_unpacklo_epi64(block0, block1);
+
+ for (int i=0; i < static_cast<int>(rounds); ++i)
+ {
+ const __m128i rk = _mm_castpd_si128(
+ _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys+i)));
+
+ x1 = RotateRight64<8>(x1);
+ x1 = _mm_add_epi64(x1, y1);
+ x1 = _mm_xor_si128(x1, rk);
+ y1 = RotateLeft64<3>(y1);
+ y1 = _mm_xor_si128(y1, x1);
+ }
+
+ // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
+ block0 = _mm_unpacklo_epi64(y1, x1);
+ block1 = _mm_unpackhi_epi64(y1, x1);
+}
+
+inline void SPECK128_Enc_6_Blocks(__m128i &block0, __m128i &block1,
+ __m128i &block2, __m128i &block3, __m128i &block4, __m128i &block5,
+ const word64 *subkeys, unsigned int rounds)
+{
+ // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
+ __m128i x1 = _mm_unpackhi_epi64(block0, block1);
+ __m128i y1 = _mm_unpacklo_epi64(block0, block1);
+ __m128i x2 = _mm_unpackhi_epi64(block2, block3);
+ __m128i y2 = _mm_unpacklo_epi64(block2, block3);
+ __m128i x3 = _mm_unpackhi_epi64(block4, block5);
+ __m128i y3 = _mm_unpacklo_epi64(block4, block5);
+
+ for (int i=0; i < static_cast<int>(rounds); ++i)
+ {
+ const __m128i rk = _mm_castpd_si128(
+ _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys+i)));
+
+ x1 = RotateRight64<8>(x1);
+ x2 = RotateRight64<8>(x2);
+ x3 = RotateRight64<8>(x3);
+ x1 = _mm_add_epi64(x1, y1);
+ x2 = _mm_add_epi64(x2, y2);
+ x3 = _mm_add_epi64(x3, y3);
+ x1 = _mm_xor_si128(x1, rk);
+ x2 = _mm_xor_si128(x2, rk);
+ x3 = _mm_xor_si128(x3, rk);
+ y1 = RotateLeft64<3>(y1);
+ y2 = RotateLeft64<3>(y2);
+ y3 = RotateLeft64<3>(y3);
+ y1 = _mm_xor_si128(y1, x1);
+ y2 = _mm_xor_si128(y2, x2);
+ y3 = _mm_xor_si128(y3, x3);
+ }
+
+ // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
+ block0 = _mm_unpacklo_epi64(y1, x1);
+ block1 = _mm_unpackhi_epi64(y1, x1);
+ block2 = _mm_unpacklo_epi64(y2, x2);
+ block3 = _mm_unpackhi_epi64(y2, x2);
+ block4 = _mm_unpacklo_epi64(y3, x3);
+ block5 = _mm_unpackhi_epi64(y3, x3);
+}
+
+inline void SPECK128_Dec_Block(__m128i &block0, __m128i &block1,
+ const word64 *subkeys, unsigned int rounds)
+{
+ // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
+ __m128i x1 = _mm_unpackhi_epi64(block0, block1);
+ __m128i y1 = _mm_unpacklo_epi64(block0, block1);
+
+ for (int i = static_cast<int>(rounds-1); i >= 0; --i)
+ {
+ const __m128i rk = _mm_castpd_si128(
+ _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys+i)));
+
+ y1 = _mm_xor_si128(y1, x1);
+ y1 = RotateRight64<3>(y1);
+ x1 = _mm_xor_si128(x1, rk);
+ x1 = _mm_sub_epi64(x1, y1);
+ x1 = RotateLeft64<8>(x1);
+ }
+
+ // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
+ block0 = _mm_unpacklo_epi64(y1, x1);
+ block1 = _mm_unpackhi_epi64(y1, x1);
+}
+
+inline void SPECK128_Dec_6_Blocks(__m128i &block0, __m128i &block1,
+ __m128i &block2, __m128i &block3, __m128i &block4, __m128i &block5,
+ const word64 *subkeys, unsigned int rounds)
+{
+ // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
+ __m128i x1 = _mm_unpackhi_epi64(block0, block1);
+ __m128i y1 = _mm_unpacklo_epi64(block0, block1);
+ __m128i x2 = _mm_unpackhi_epi64(block2, block3);
+ __m128i y2 = _mm_unpacklo_epi64(block2, block3);
+ __m128i x3 = _mm_unpackhi_epi64(block4, block5);
+ __m128i y3 = _mm_unpacklo_epi64(block4, block5);
+
+ for (int i = static_cast<int>(rounds-1); i >= 0; --i)
+ {
+ const __m128i rk = _mm_castpd_si128(
+ _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys+i)));
+
+ y1 = _mm_xor_si128(y1, x1);
+ y2 = _mm_xor_si128(y2, x2);
+ y3 = _mm_xor_si128(y3, x3);
+ y1 = RotateRight64<3>(y1);
+ y2 = RotateRight64<3>(y2);
+ y3 = RotateRight64<3>(y3);
+ x1 = _mm_xor_si128(x1, rk);
+ x2 = _mm_xor_si128(x2, rk);
+ x3 = _mm_xor_si128(x3, rk);
+ x1 = _mm_sub_epi64(x1, y1);
+ x2 = _mm_sub_epi64(x2, y2);
+ x3 = _mm_sub_epi64(x3, y3);
+ x1 = RotateLeft64<8>(x1);
+ x2 = RotateLeft64<8>(x2);
+ x3 = RotateLeft64<8>(x3);
+ }
+
+ // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
+ block0 = _mm_unpacklo_epi64(y1, x1);
+ block1 = _mm_unpackhi_epi64(y1, x1);
+ block2 = _mm_unpacklo_epi64(y2, x2);
+ block3 = _mm_unpackhi_epi64(y2, x2);
+ block4 = _mm_unpacklo_epi64(y3, x3);
+ block5 = _mm_unpackhi_epi64(y3, x3);
+}
+
+#endif // CRYPTOPP_SSSE3_AVAILABLE
+
+// ***************************** Power8 ***************************** //
+
+#if defined(CRYPTOPP_POWER8_AVAILABLE)
+
+using CryptoPP::uint8x16_p;
+using CryptoPP::uint32x4_p;
+using CryptoPP::uint64x2_p;
+
+using CryptoPP::VectorAdd;
+using CryptoPP::VectorSub;
+using CryptoPP::VectorXor;
+
+// Rotate left by bit count
+template<unsigned int C>
+inline uint64x2_p RotateLeft64(const uint64x2_p val)
+{
+ const uint64x2_p m = {C, C};
+ return vec_rl(val, m);
+}
+
+// Rotate right by bit count
+template<unsigned int C>
+inline uint64x2_p RotateRight64(const uint64x2_p val)
+{
+ const uint64x2_p m = {64-C, 64-C};
+ return vec_rl(val, m);
+}
+
+void SPECK128_Enc_Block(uint32x4_p &block, const word64 *subkeys, unsigned int rounds)
+{
+#if defined(CRYPTOPP_BIG_ENDIAN)
+ const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
+ const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
+#else
+ const uint8x16_p m1 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
+ const uint8x16_p m2 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
+#endif
+
+ // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
+ uint64x2_p x1 = (uint64x2_p)vec_perm(block, block, m1);
+ uint64x2_p y1 = (uint64x2_p)vec_perm(block, block, m2);
+
+ for (int i=0; i < static_cast<int>(rounds); ++i)
+ {
+ const uint64x2_p rk = vec_splats((unsigned long long)subkeys[i]);
+
+ x1 = RotateRight64<8>(x1);
+ x1 = VectorAdd(x1, y1);
+ x1 = VectorXor(x1, rk);
+
+ y1 = RotateLeft64<3>(y1);
+ y1 = VectorXor(y1, x1);
+ }
+
+#if defined(CRYPTOPP_BIG_ENDIAN)
+ const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
+ //const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
+#else
+ const uint8x16_p m3 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
+ //const uint8x16_p m4 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
+#endif
+
+ // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
+ block = (uint32x4_p)vec_perm(x1, y1, m3);
+}
+
+void SPECK128_Dec_Block(uint32x4_p &block, const word64 *subkeys, unsigned int rounds)
+{
+#if defined(CRYPTOPP_BIG_ENDIAN)
+ const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
+ const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
+#else
+ const uint8x16_p m1 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
+ const uint8x16_p m2 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
+#endif
+
+ // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
+ uint64x2_p x1 = (uint64x2_p)vec_perm(block, block, m1);
+ uint64x2_p y1 = (uint64x2_p)vec_perm(block, block, m2);
+
+ for (int i = static_cast<int>(rounds-1); i >= 0; --i)
+ {
+ const uint64x2_p rk = vec_splats((unsigned long long)subkeys[i]);
+
+ y1 = VectorXor(y1, x1);
+ y1 = RotateRight64<3>(y1);
+ x1 = VectorXor(x1, rk);
+ x1 = VectorSub(x1, y1);
+ x1 = RotateLeft64<8>(x1);
+ }
+
+#if defined(CRYPTOPP_BIG_ENDIAN)
+ const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
+ //const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
+#else
+ const uint8x16_p m3 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
+ //const uint8x16_p m4 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
+#endif
+
+ // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
+ block = (uint32x4_p)vec_perm(x1, y1, m3);
+}
+
+void SPECK128_Enc_6_Blocks(uint32x4_p &block0, uint32x4_p &block1,
+ uint32x4_p &block2, uint32x4_p &block3, uint32x4_p &block4,
+ uint32x4_p &block5, const word64 *subkeys, unsigned int rounds)
+{
+#if defined(CRYPTOPP_BIG_ENDIAN)
+ const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
+ const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
+#else
+ const uint8x16_p m1 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
+ const uint8x16_p m2 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
+#endif
+
+ // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
+ uint64x2_p x1 = (uint64x2_p)vec_perm(block0, block1, m1);
+ uint64x2_p y1 = (uint64x2_p)vec_perm(block0, block1, m2);
+ uint64x2_p x2 = (uint64x2_p)vec_perm(block2, block3, m1);
+ uint64x2_p y2 = (uint64x2_p)vec_perm(block2, block3, m2);
+ uint64x2_p x3 = (uint64x2_p)vec_perm(block4, block5, m1);
+ uint64x2_p y3 = (uint64x2_p)vec_perm(block4, block5, m2);
+
+ for (int i=0; i < static_cast<int>(rounds); ++i)
+ {
+ const uint64x2_p rk = vec_splats((unsigned long long)subkeys[i]);
+
+ x1 = RotateRight64<8>(x1);
+ x2 = RotateRight64<8>(x2);
+ x3 = RotateRight64<8>(x3);
+ x1 = VectorAdd(x1, y1);
+ x2 = VectorAdd(x2, y2);
+ x3 = VectorAdd(x3, y3);
+ x1 = VectorXor(x1, rk);
+ x2 = VectorXor(x2, rk);
+ x3 = VectorXor(x3, rk);
+
+ y1 = RotateLeft64<3>(y1);
+ y2 = RotateLeft64<3>(y2);
+ y3 = RotateLeft64<3>(y3);
+ y1 = VectorXor(y1, x1);
+ y2 = VectorXor(y2, x2);
+ y3 = VectorXor(y3, x3);
+ }
+
+#if defined(CRYPTOPP_BIG_ENDIAN)
+ const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
+ const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
+#else
+ const uint8x16_p m3 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
+ const uint8x16_p m4 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
+#endif
+
+ // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
+ block0 = (uint32x4_p)vec_perm(x1, y1, m3);
+ block1 = (uint32x4_p)vec_perm(x1, y1, m4);
+ block2 = (uint32x4_p)vec_perm(x2, y2, m3);
+ block3 = (uint32x4_p)vec_perm(x2, y2, m4);
+ block4 = (uint32x4_p)vec_perm(x3, y3, m3);
+ block5 = (uint32x4_p)vec_perm(x3, y3, m4);
+}
+
+void SPECK128_Dec_6_Blocks(uint32x4_p &block0, uint32x4_p &block1,
+ uint32x4_p &block2, uint32x4_p &block3, uint32x4_p &block4,
+ uint32x4_p &block5, const word64 *subkeys, unsigned int rounds)
+{
+#if defined(CRYPTOPP_BIG_ENDIAN)
+ const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
+ const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
+#else
+ const uint8x16_p m1 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
+ const uint8x16_p m2 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
+#endif
+
+ // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ...
+ uint64x2_p x1 = (uint64x2_p)vec_perm(block0, block1, m1);
+ uint64x2_p y1 = (uint64x2_p)vec_perm(block0, block1, m2);
+ uint64x2_p x2 = (uint64x2_p)vec_perm(block2, block3, m1);
+ uint64x2_p y2 = (uint64x2_p)vec_perm(block2, block3, m2);
+ uint64x2_p x3 = (uint64x2_p)vec_perm(block4, block5, m1);
+ uint64x2_p y3 = (uint64x2_p)vec_perm(block4, block5, m2);
+
+ for (int i = static_cast<int>(rounds-1); i >= 0; --i)
+ {
+ const uint64x2_p rk = vec_splats((unsigned long long)subkeys[i]);
+
+ y1 = VectorXor(y1, x1);
+ y2 = VectorXor(y2, x2);
+ y3 = VectorXor(y3, x3);
+ y1 = RotateRight64<3>(y1);
+ y2 = RotateRight64<3>(y2);
+ y3 = RotateRight64<3>(y3);
+
+ x1 = VectorXor(x1, rk);
+ x2 = VectorXor(x2, rk);
+ x3 = VectorXor(x3, rk);
+ x1 = VectorSub(x1, y1);
+ x2 = VectorSub(x2, y2);
+ x3 = VectorSub(x3, y3);
+ x1 = RotateLeft64<8>(x1);
+ x2 = RotateLeft64<8>(x2);
+ x3 = RotateLeft64<8>(x3);
+ }
+
+#if defined(CRYPTOPP_BIG_ENDIAN)
+ const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
+ const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
+#else
+ const uint8x16_p m3 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
+ const uint8x16_p m4 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
+#endif
+
+ // [A1 B1][A2 B2] ... => [A1 A2][B1 B2] ...
+ block0 = (uint32x4_p)vec_perm(x1, y1, m3);
+ block1 = (uint32x4_p)vec_perm(x1, y1, m4);
+ block2 = (uint32x4_p)vec_perm(x2, y2, m3);
+ block3 = (uint32x4_p)vec_perm(x2, y2, m4);
+ block4 = (uint32x4_p)vec_perm(x3, y3, m3);
+ block5 = (uint32x4_p)vec_perm(x3, y3, m4);
+}
+
+#endif // CRYPTOPP_POWER8_AVAILABLE
+
+ANONYMOUS_NAMESPACE_END
+
+///////////////////////////////////////////////////////////////////////
+
+NAMESPACE_BEGIN(CryptoPP)
+
+// *************************** ARM NEON **************************** //
+
+#if (CRYPTOPP_ARM_NEON_AVAILABLE)
+size_t SPECK128_Enc_AdvancedProcessBlocks_NEON(const word64* subKeys, size_t rounds,
+ const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
+{
+ return AdvancedProcessBlocks128_6x2_NEON(SPECK128_Enc_Block, SPECK128_Enc_6_Blocks,
+ subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
+}
+
+size_t SPECK128_Dec_AdvancedProcessBlocks_NEON(const word64* subKeys, size_t rounds,
+ const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
+{
+ return AdvancedProcessBlocks128_6x2_NEON(SPECK128_Dec_Block, SPECK128_Dec_6_Blocks,
+ subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
+}
+#endif // CRYPTOPP_ARM_NEON_AVAILABLE
+
+// ***************************** IA-32 ***************************** //
+
+#if defined(CRYPTOPP_SSSE3_AVAILABLE)
+size_t SPECK128_Enc_AdvancedProcessBlocks_SSSE3(const word64* subKeys, size_t rounds,
+ const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
+{
+ return AdvancedProcessBlocks128_6x2_SSE(SPECK128_Enc_Block, SPECK128_Enc_6_Blocks,
+ subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
+}
+
+size_t SPECK128_Dec_AdvancedProcessBlocks_SSSE3(const word64* subKeys, size_t rounds,
+ const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
+{
+ return AdvancedProcessBlocks128_6x2_SSE(SPECK128_Dec_Block, SPECK128_Dec_6_Blocks,
+ subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
+}
+#endif // CRYPTOPP_SSSE3_AVAILABLE
+
+// ***************************** Power8 ***************************** //
+
+#if defined(CRYPTOPP_POWER8_AVAILABLE)
+size_t SPECK128_Enc_AdvancedProcessBlocks_POWER8(const word64* subKeys, size_t rounds,
+ const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
+{
+ return AdvancedProcessBlocks128_6x1_ALTIVEC(SPECK128_Enc_Block, SPECK128_Enc_6_Blocks,
+ subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
+}
+
+size_t SPECK128_Dec_AdvancedProcessBlocks_POWER8(const word64* subKeys, size_t rounds,
+ const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
+{
+ return AdvancedProcessBlocks128_6x1_ALTIVEC(SPECK128_Dec_Block, SPECK128_Dec_6_Blocks,
+ subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
+}
+#endif // CRYPTOPP_POWER8_AVAILABLE
+
+NAMESPACE_END
|