From 8fab1c3677198f941ae83fcf322edc420603d325 Mon Sep 17 00:00:00 2001 From: Jeffrey Walton Date: Sun, 9 Jun 2019 01:49:44 -0400 Subject: Revert changes for lgtm findings This broke SunCC to the point of no repair. SunCC is using AVX2 instructions for C++ and SSE2. Man this compiler sucks... --- chacha_simd.cpp | 85 +++++++++++++++++++++++++++++---------------------------- 1 file changed, 44 insertions(+), 41 deletions(-) (limited to 'chacha_simd.cpp') diff --git a/chacha_simd.cpp b/chacha_simd.cpp index e225579d..9fd6b0f1 100644 --- a/chacha_simd.cpp +++ b/chacha_simd.cpp @@ -38,7 +38,6 @@ #if (CRYPTOPP_SSE2_INTRIN_AVAILABLE) # include # include -# include "sse_simd.h" #endif #if defined(__SSSE3__) @@ -566,10 +565,14 @@ void ChaCha_OperateKeystream_NEON(const word32 *state, const byte* input, byte * void ChaCha_OperateKeystream_SSE2(const word32 *state, const byte* input, byte *output, unsigned int rounds) { - const __m128i state0 = load_m128i<0>(state); - const __m128i state1 = load_m128i<1>(state); - const __m128i state2 = load_m128i<2>(state); - const __m128i state3 = load_m128i<3>(state); + const __m128i* state_mm = reinterpret_cast(state); + const __m128i* input_mm = reinterpret_cast(input); + __m128i* output_mm = reinterpret_cast<__m128i*>(output); + + const __m128i state0 = _mm_load_si128(state_mm + 0); + const __m128i state1 = _mm_load_si128(state_mm + 1); + const __m128i state2 = _mm_load_si128(state_mm + 2); + const __m128i state3 = _mm_load_si128(state_mm + 3); __m128i r0_0 = state0; __m128i r0_1 = state1; @@ -769,57 +772,57 @@ void ChaCha_OperateKeystream_SSE2(const word32 *state, const byte* input, byte * r3_3 = _mm_add_epi32(r3_3, state3); r3_3 = _mm_add_epi64(r3_3, _mm_set_epi32(0, 0, 0, 3)); - if (input) + if (input_mm) { - r0_0 = _mm_xor_si128(load_m128i<0>(input), r0_0); - r0_1 = _mm_xor_si128(load_m128i<1>(input), r0_1); - r0_2 = _mm_xor_si128(load_m128i<2>(input), r0_2); - r0_3 = _mm_xor_si128(load_m128i<3>(input), r0_3); + r0_0 = _mm_xor_si128(_mm_loadu_si128(input_mm + 0), r0_0); + r0_1 = _mm_xor_si128(_mm_loadu_si128(input_mm + 1), r0_1); + r0_2 = _mm_xor_si128(_mm_loadu_si128(input_mm + 2), r0_2); + r0_3 = _mm_xor_si128(_mm_loadu_si128(input_mm + 3), r0_3); } - store_m128i<0>(output, r0_0); - store_m128i<1>(output, r0_1); - store_m128i<2>(output, r0_2); - store_m128i<3>(output, r0_3); + _mm_storeu_si128(output_mm + 0, r0_0); + _mm_storeu_si128(output_mm + 1, r0_1); + _mm_storeu_si128(output_mm + 2, r0_2); + _mm_storeu_si128(output_mm + 3, r0_3); - if (input) + if (input_mm) { - r1_0 = _mm_xor_si128(load_m128i<4>(input), r1_0); - r1_1 = _mm_xor_si128(load_m128i<5>(input), r1_1); - r1_2 = _mm_xor_si128(load_m128i<6>(input), r1_2); - r1_3 = _mm_xor_si128(load_m128i<7>(input), r1_3); + r1_0 = _mm_xor_si128(_mm_loadu_si128(input_mm + 4), r1_0); + r1_1 = _mm_xor_si128(_mm_loadu_si128(input_mm + 5), r1_1); + r1_2 = _mm_xor_si128(_mm_loadu_si128(input_mm + 6), r1_2); + r1_3 = _mm_xor_si128(_mm_loadu_si128(input_mm + 7), r1_3); } - store_m128i<4>(output, r1_0); - store_m128i<5>(output, r1_1); - store_m128i<6>(output, r1_2); - store_m128i<7>(output, r1_3); + _mm_storeu_si128(output_mm + 4, r1_0); + _mm_storeu_si128(output_mm + 5, r1_1); + _mm_storeu_si128(output_mm + 6, r1_2); + _mm_storeu_si128(output_mm + 7, r1_3); - if (input) + if (input_mm) { - r2_0 = _mm_xor_si128(load_m128i< 8>(input), r2_0); - r2_1 = _mm_xor_si128(load_m128i< 9>(input), r2_1); - r2_2 = _mm_xor_si128(load_m128i<10>(input), r2_2); - r2_3 = _mm_xor_si128(load_m128i<11>(input), r2_3); + r2_0 = _mm_xor_si128(_mm_loadu_si128(input_mm + 8), r2_0); + r2_1 = _mm_xor_si128(_mm_loadu_si128(input_mm + 9), r2_1); + r2_2 = _mm_xor_si128(_mm_loadu_si128(input_mm + 10), r2_2); + r2_3 = _mm_xor_si128(_mm_loadu_si128(input_mm + 11), r2_3); } - store_m128i< 8>(output, r2_0); - store_m128i< 9>(output, r2_1); - store_m128i<10>(output, r2_2); - store_m128i<11>(output, r2_3); + _mm_storeu_si128(output_mm + 8, r2_0); + _mm_storeu_si128(output_mm + 9, r2_1); + _mm_storeu_si128(output_mm + 10, r2_2); + _mm_storeu_si128(output_mm + 11, r2_3); - if (input) + if (input_mm) { - r3_0 = _mm_xor_si128(load_m128i<12>(input), r3_0); - r3_1 = _mm_xor_si128(load_m128i<13>(input), r3_1); - r3_2 = _mm_xor_si128(load_m128i<14>(input), r3_2); - r3_3 = _mm_xor_si128(load_m128i<15>(input), r3_3); + r3_0 = _mm_xor_si128(_mm_loadu_si128(input_mm + 12), r3_0); + r3_1 = _mm_xor_si128(_mm_loadu_si128(input_mm + 13), r3_1); + r3_2 = _mm_xor_si128(_mm_loadu_si128(input_mm + 14), r3_2); + r3_3 = _mm_xor_si128(_mm_loadu_si128(input_mm + 15), r3_3); } - store_m128i<12>(output, r3_0); - store_m128i<13>(output, r3_1); - store_m128i<14>(output, r3_2); - store_m128i<15>(output, r3_3); + _mm_storeu_si128(output_mm + 12, r3_0); + _mm_storeu_si128(output_mm + 13, r3_1); + _mm_storeu_si128(output_mm + 14, r3_2); + _mm_storeu_si128(output_mm + 15, r3_3); } #endif // CRYPTOPP_SSE2_INTRIN_AVAILABLE -- cgit v1.2.1