summaryrefslogtreecommitdiff
path: root/chacha_avx.cpp
diff options
context:
space:
mode:
authorJeffrey Walton <noloader@gmail.com>2019-06-09 01:49:44 -0400
committerJeffrey Walton <noloader@gmail.com>2019-06-09 01:49:44 -0400
commit8fab1c3677198f941ae83fcf322edc420603d325 (patch)
treedff39d958e97521dad33c0b7467961101693f636 /chacha_avx.cpp
parent3ce1823fd190a8518c99882d22ca86e22a642650 (diff)
downloadcryptopp-git-8fab1c3677198f941ae83fcf322edc420603d325.tar.gz
Revert changes for lgtm findings
This broke SunCC to the point of no repair. SunCC is using AVX2 instructions for C++ and SSE2. Man this compiler sucks...
Diffstat (limited to 'chacha_avx.cpp')
-rw-r--r--chacha_avx.cpp92
1 files changed, 51 insertions, 41 deletions
diff --git a/chacha_avx.cpp b/chacha_avx.cpp
index cdf50266..20693488 100644
--- a/chacha_avx.cpp
+++ b/chacha_avx.cpp
@@ -24,12 +24,18 @@
# include <xmmintrin.h>
# include <emmintrin.h>
# include <immintrin.h>
-# include "sse_simd.h"
#endif
// Squash MS LNK4221 and libtool warnings
extern const char CHACHA_AVX_FNAME[] = __FILE__;
+// Sun Studio 12.4 OK, 12.5 and 12.6 compile error.
+#if (__SUNPRO_CC >= 0x5140) && (__SUNPRO_CC <= 0x5150)
+# define MAYBE_CONST
+#else
+# define MAYBE_CONST const
+#endif
+
// VS2017 and global optimization bug. TODO, figure out when
// we can re-enable full optimizations for VS2017. Also see
// https://github.com/weidai11/cryptopp/issues/649 and
@@ -85,10 +91,14 @@ NAMESPACE_BEGIN(CryptoPP)
void ChaCha_OperateKeystream_AVX2(const word32 *state, const byte* input, byte *output, unsigned int rounds)
{
- const __m256i state0 = _mm256_broadcastsi128_si256(load_m128i<0>(state));
- const __m256i state1 = _mm256_broadcastsi128_si256(load_m128i<1>(state));
- const __m256i state2 = _mm256_broadcastsi128_si256(load_m128i<2>(state));
- const __m256i state3 = _mm256_broadcastsi128_si256(load_m128i<3>(state));
+ MAYBE_CONST __m128i* state_mm = (MAYBE_CONST __m128i*)(state);
+ MAYBE_CONST __m256i* input_mm = (MAYBE_CONST __m256i*)(input);
+ __m256i* output_mm = reinterpret_cast<__m256i*>(output);
+
+ const __m256i state0 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 0));
+ const __m256i state1 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 1));
+ const __m256i state2 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 2));
+ const __m256i state3 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 3));
const __m256i CTR0 = _mm256_set_epi32(0, 0, 0, 0, 0, 0, 0, 4);
const __m256i CTR1 = _mm256_set_epi32(0, 0, 0, 1, 0, 0, 0, 5);
@@ -294,80 +304,80 @@ void ChaCha_OperateKeystream_AVX2(const word32 *state, const byte* input, byte *
X3_3 = _mm256_add_epi32(X3_3, state3);
X3_3 = _mm256_add_epi64(X3_3, CTR3);
- if (input)
+ if (input_mm)
{
- store_m256i<0>(output, _mm256_xor_si256(load_m256i<0>(input),
+ _mm256_storeu_si256(output_mm + 0, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 0),
_mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4))));
- store_m256i<1>(output, _mm256_xor_si256(load_m256i<1>(input),
+ _mm256_storeu_si256(output_mm + 1, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 1),
_mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4))));
- store_m256i<2>(output, _mm256_xor_si256(load_m256i<2>(input),
+ _mm256_storeu_si256(output_mm + 2, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 2),
_mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4))));
- store_m256i<3>(output, _mm256_xor_si256(load_m256i<3>(input),
+ _mm256_storeu_si256(output_mm + 3, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 3),
_mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4))));
}
else
{
- store_m256i<0>(output, _mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4)));
- store_m256i<1>(output, _mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4)));
- store_m256i<2>(output, _mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4)));
- store_m256i<3>(output, _mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4)));
+ _mm256_storeu_si256(output_mm + 0, _mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4)));
+ _mm256_storeu_si256(output_mm + 1, _mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4)));
+ _mm256_storeu_si256(output_mm + 2, _mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4)));
+ _mm256_storeu_si256(output_mm + 3, _mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4)));
}
- if (input)
+ if (input_mm)
{
- store_m256i<4>(output, _mm256_xor_si256(load_m256i<4>(input),
+ _mm256_storeu_si256(output_mm + 4, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 4),
_mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4))));
- store_m256i<5>(output, _mm256_xor_si256(load_m256i<5>(input),
+ _mm256_storeu_si256(output_mm + 5, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 5),
_mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4))));
- store_m256i<6>(output, _mm256_xor_si256(load_m256i<6>(input),
+ _mm256_storeu_si256(output_mm + 6, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 6),
_mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4))));
- store_m256i<7>(output, _mm256_xor_si256(load_m256i<7>(input),
+ _mm256_storeu_si256(output_mm + 7, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 7),
_mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4))));
}
else
{
- store_m256i<4>(output, _mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4)));
- store_m256i<5>(output, _mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4)));
- store_m256i<6>(output, _mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4)));
- store_m256i<7>(output, _mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4)));
+ _mm256_storeu_si256(output_mm + 4, _mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4)));
+ _mm256_storeu_si256(output_mm + 5, _mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4)));
+ _mm256_storeu_si256(output_mm + 6, _mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4)));
+ _mm256_storeu_si256(output_mm + 7, _mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4)));
}
- if (input)
+ if (input_mm)
{
- store_m256i<8>(output, _mm256_xor_si256(load_m256i<8>(input),
+ _mm256_storeu_si256(output_mm + 8, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 8),
_mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4))));
- store_m256i<9>(output, _mm256_xor_si256(load_m256i<9>(input),
+ _mm256_storeu_si256(output_mm + 9, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 9),
_mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4))));
- store_m256i<10>(output, _mm256_xor_si256(load_m256i<10>(input),
+ _mm256_storeu_si256(output_mm + 10, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 10),
_mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4))));
- store_m256i<11>(output, _mm256_xor_si256(load_m256i<11>(input),
+ _mm256_storeu_si256(output_mm + 11, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 11),
_mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4))));
}
else
{
- store_m256i<8>(output, _mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4)));
- store_m256i<9>(output, _mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4)));
- store_m256i<10>(output, _mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4)));
- store_m256i<11>(output, _mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4)));
+ _mm256_storeu_si256(output_mm + 8, _mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4)));
+ _mm256_storeu_si256(output_mm + 9, _mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4)));
+ _mm256_storeu_si256(output_mm + 10, _mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4)));
+ _mm256_storeu_si256(output_mm + 11, _mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4)));
}
- if (input)
+ if (input_mm)
{
- store_m256i<12>(output, _mm256_xor_si256(load_m256i<12>(input),
+ _mm256_storeu_si256(output_mm + 12, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 12),
_mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4))));
- store_m256i<13>(output, _mm256_xor_si256(load_m256i<13>(input),
+ _mm256_storeu_si256(output_mm + 13, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 13),
_mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4))));
- store_m256i<14>(output, _mm256_xor_si256(load_m256i<14>(input),
+ _mm256_storeu_si256(output_mm + 14, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 14),
_mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4))));
- store_m256i<15>(output, _mm256_xor_si256(load_m256i<15>(input),
+ _mm256_storeu_si256(output_mm + 15, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 15),
_mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4))));
}
else
{
- store_m256i<12>(output, _mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4)));
- store_m256i<13>(output, _mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4)));
- store_m256i<14>(output, _mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4)));
- store_m256i<15>(output, _mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4)));
+ _mm256_storeu_si256(output_mm + 12, _mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4)));
+ _mm256_storeu_si256(output_mm + 13, _mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4)));
+ _mm256_storeu_si256(output_mm + 14, _mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4)));
+ _mm256_storeu_si256(output_mm + 15, _mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4)));
}
// https://software.intel.com/en-us/articles/avoiding-avx-sse-transition-penalties