diff options
author | Nikos Mavrogiannopoulos <nmav@gnutls.org> | 2013-12-14 19:56:58 +0100 |
---|---|---|
committer | Nikos Mavrogiannopoulos <nmav@gnutls.org> | 2013-12-14 19:56:58 +0100 |
commit | 098857c141fed8c70600a1c457dd1efd211dc634 (patch) | |
tree | 5dfc7359894424e11e45278c285a9105224c7912 /lib/accelerated | |
parent | 2e7ffc58b45d876950f7456318adf0348b01ea50 (diff) | |
download | gnutls-098857c141fed8c70600a1c457dd1efd211dc634.tar.gz |
reorganized source files.
Diffstat (limited to 'lib/accelerated')
-rw-r--r-- | lib/accelerated/accelerated.c | 1 | ||||
-rw-r--r-- | lib/accelerated/x86/Makefile.am | 4 | ||||
-rw-r--r-- | lib/accelerated/x86/aes-cbc-x86-aesni.c | 132 | ||||
-rw-r--r-- | lib/accelerated/x86/aes-cbc-x86-ssse3.c | 133 | ||||
-rw-r--r-- | lib/accelerated/x86/aes-gcm-padlock.c | 2 | ||||
-rw-r--r-- | lib/accelerated/x86/aes-gcm-x86-aesni.c | 2 | ||||
-rw-r--r-- | lib/accelerated/x86/aes-gcm-x86-pclmul.c | 2 | ||||
-rw-r--r-- | lib/accelerated/x86/aes-gcm-x86-ssse3.c | 2 | ||||
-rw-r--r-- | lib/accelerated/x86/aes-padlock.c | 231 | ||||
-rw-r--r-- | lib/accelerated/x86/aes-padlock.h | 7 | ||||
-rw-r--r-- | lib/accelerated/x86/aes-x86.c | 413 | ||||
-rw-r--r-- | lib/accelerated/x86/aes-x86.h | 9 | ||||
-rw-r--r-- | lib/accelerated/x86/hmac-padlock.c | 4 | ||||
-rw-r--r-- | lib/accelerated/x86/hmac-x86-ssse3.c | 2 | ||||
-rw-r--r-- | lib/accelerated/x86/sha-padlock.c | 4 | ||||
-rw-r--r-- | lib/accelerated/x86/sha-padlock.h | 4 | ||||
-rw-r--r-- | lib/accelerated/x86/sha-x86-ssse3.c | 2 | ||||
-rw-r--r-- | lib/accelerated/x86/sha-x86.h | 4 | ||||
-rw-r--r-- | lib/accelerated/x86/x86-common.c | 505 |
19 files changed, 798 insertions, 665 deletions
diff --git a/lib/accelerated/accelerated.c b/lib/accelerated/accelerated.c index 86983746a3..7ad1cb5b28 100644 --- a/lib/accelerated/accelerated.c +++ b/lib/accelerated/accelerated.c @@ -32,7 +32,6 @@ void _gnutls_register_accel_crypto(void) #if defined(ASM_X86) if (gnutls_have_cpuid() != 0) { register_x86_crypto(); - register_padlock_crypto(); } #endif diff --git a/lib/accelerated/x86/Makefile.am b/lib/accelerated/x86/Makefile.am index 5e8f825782..e38551cd30 100644 --- a/lib/accelerated/x86/Makefile.am +++ b/lib/accelerated/x86/Makefile.am @@ -35,9 +35,9 @@ EXTRA_DIST = README license.txt files.mk noinst_LTLIBRARIES = libx86.la -libx86_la_SOURCES = sha-padlock.c hmac-padlock.c aes-x86.c aes-padlock.c aes-gcm-padlock.c \ +libx86_la_SOURCES = sha-padlock.c hmac-padlock.c x86-common.c aes-padlock.c aes-gcm-padlock.c \ aes-padlock.h aes-x86.h x86.h sha-padlock.h sha-x86-ssse3.c sha-x86.h hmac-x86-ssse3.c \ - aes-gcm-x86-ssse3.c aes-gcm-x86-aesni.c + aes-gcm-x86-ssse3.c aes-gcm-x86-aesni.c aes-cbc-x86-ssse3.c aes-cbc-x86-aesni.c include files.mk diff --git a/lib/accelerated/x86/aes-cbc-x86-aesni.c b/lib/accelerated/x86/aes-cbc-x86-aesni.c new file mode 100644 index 0000000000..6d4526fe17 --- /dev/null +++ b/lib/accelerated/x86/aes-cbc-x86-aesni.c @@ -0,0 +1,132 @@ +/* + * Copyright (C) 2011-2012 Free Software Foundation, Inc. + * + * Author: Nikos Mavrogiannopoulos + * + * This file is part of GnuTLS. + * + * The GnuTLS is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/> + * + */ + +/* + * The following code is an implementation of the AES-128-CBC cipher + * using intel's AES instruction set. + */ + +#include <gnutls_errors.h> +#include <gnutls_int.h> +#include <gnutls/crypto.h> +#include <gnutls_errors.h> +#include <aes-x86.h> +#include <sha-x86.h> +#include <x86.h> + +struct aes_ctx { + AES_KEY expanded_key; + uint8_t iv[16]; + int enc; +}; + +unsigned int _gnutls_x86_cpuid_s[4]; + +static int +aes_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx, int enc) +{ + /* we use key size to distinguish */ + if (algorithm != GNUTLS_CIPHER_AES_128_CBC + && algorithm != GNUTLS_CIPHER_AES_192_CBC + && algorithm != GNUTLS_CIPHER_AES_256_CBC) + return GNUTLS_E_INVALID_REQUEST; + + *_ctx = gnutls_calloc(1, sizeof(struct aes_ctx)); + if (*_ctx == NULL) { + gnutls_assert(); + return GNUTLS_E_MEMORY_ERROR; + } + + ((struct aes_ctx *) (*_ctx))->enc = enc; + + return 0; +} + +static int +aes_cipher_setkey(void *_ctx, const void *userkey, size_t keysize) +{ + struct aes_ctx *ctx = _ctx; + int ret; + + if (ctx->enc) + ret = + aesni_set_encrypt_key(userkey, keysize * 8, + ALIGN16(&ctx->expanded_key)); + else + ret = + aesni_set_decrypt_key(userkey, keysize * 8, + ALIGN16(&ctx->expanded_key)); + + if (ret != 0) + return gnutls_assert_val(GNUTLS_E_ENCRYPTION_FAILED); + + return 0; +} + +static int aes_setiv(void *_ctx, const void *iv, size_t iv_size) +{ + struct aes_ctx *ctx = _ctx; + + memcpy(ctx->iv, iv, 16); + return 0; +} + +static int +aes_encrypt(void *_ctx, const void *src, size_t src_size, + void *dst, size_t dst_size) +{ + struct aes_ctx *ctx = _ctx; + + aesni_cbc_encrypt(src, dst, src_size, ALIGN16(&ctx->expanded_key), + ctx->iv, 1); + return 0; +} + +static int +aes_decrypt(void *_ctx, const void *src, size_t src_size, + void *dst, size_t dst_size) +{ + struct aes_ctx *ctx = _ctx; + + aesni_cbc_encrypt(src, dst, src_size, ALIGN16(&ctx->expanded_key), + ctx->iv, 0); + + return 0; +} + +static void aes_deinit(void *_ctx) +{ + struct aes_ctx *ctx = _ctx; + + zeroize_temp_key(ctx, sizeof(*ctx)); + gnutls_free(ctx); +} + +const gnutls_crypto_cipher_st _gnutls_aesni_x86 = { + .init = aes_cipher_init, + .setkey = aes_cipher_setkey, + .setiv = aes_setiv, + .encrypt = aes_encrypt, + .decrypt = aes_decrypt, + .deinit = aes_deinit, +}; + diff --git a/lib/accelerated/x86/aes-cbc-x86-ssse3.c b/lib/accelerated/x86/aes-cbc-x86-ssse3.c new file mode 100644 index 0000000000..ff24578a76 --- /dev/null +++ b/lib/accelerated/x86/aes-cbc-x86-ssse3.c @@ -0,0 +1,133 @@ +/* + * Copyright (C) 2011-2012 Free Software Foundation, Inc. + * + * Author: Nikos Mavrogiannopoulos + * + * This file is part of GnuTLS. + * + * The GnuTLS is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/> + * + */ + +/* + * The following code is an implementation of the AES-128-CBC cipher + * using intel's AES instruction set. + */ + +#include <gnutls_errors.h> +#include <gnutls_int.h> +#include <gnutls/crypto.h> +#include <gnutls_errors.h> +#include <aes-x86.h> +#include <sha-x86.h> +#include <x86.h> + +struct aes_ctx { + AES_KEY expanded_key; + uint8_t iv[16]; + int enc; +}; + +unsigned int _gnutls_x86_cpuid_s[4]; + +static int +aes_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx, int enc) +{ + /* we use key size to distinguish */ + if (algorithm != GNUTLS_CIPHER_AES_128_CBC + && algorithm != GNUTLS_CIPHER_AES_192_CBC + && algorithm != GNUTLS_CIPHER_AES_256_CBC) + return GNUTLS_E_INVALID_REQUEST; + + *_ctx = gnutls_calloc(1, sizeof(struct aes_ctx)); + if (*_ctx == NULL) { + gnutls_assert(); + return GNUTLS_E_MEMORY_ERROR; + } + + ((struct aes_ctx *) (*_ctx))->enc = enc; + + return 0; +} + +static int +aes_ssse3_cipher_setkey(void *_ctx, const void *userkey, size_t keysize) +{ + struct aes_ctx *ctx = _ctx; + int ret; + + if (ctx->enc) + ret = + vpaes_set_encrypt_key(userkey, keysize * 8, + ALIGN16(&ctx->expanded_key)); + else + ret = + vpaes_set_decrypt_key(userkey, keysize * 8, + ALIGN16(&ctx->expanded_key)); + + if (ret != 0) + return gnutls_assert_val(GNUTLS_E_ENCRYPTION_FAILED); + + return 0; +} + +static int +aes_ssse3_encrypt(void *_ctx, const void *src, size_t src_size, + void *dst, size_t dst_size) +{ + struct aes_ctx *ctx = _ctx; + + vpaes_cbc_encrypt(src, dst, src_size, ALIGN16(&ctx->expanded_key), + ctx->iv, 1); + return 0; +} + +static int +aes_ssse3_decrypt(void *_ctx, const void *src, size_t src_size, + void *dst, size_t dst_size) +{ + struct aes_ctx *ctx = _ctx; + + vpaes_cbc_encrypt(src, dst, src_size, ALIGN16(&ctx->expanded_key), + ctx->iv, 0); + + return 0; +} + +static int aes_setiv(void *_ctx, const void *iv, size_t iv_size) +{ + struct aes_ctx *ctx = _ctx; + + memcpy(ctx->iv, iv, 16); + return 0; +} + +static void aes_deinit(void *_ctx) +{ + struct aes_ctx *ctx = _ctx; + + zeroize_temp_key(ctx, sizeof(*ctx)); + gnutls_free(ctx); +} + + +const gnutls_crypto_cipher_st _gnutls_aes_ssse3 = { + .init = aes_cipher_init, + .setkey = aes_ssse3_cipher_setkey, + .setiv = aes_setiv, + .encrypt = aes_ssse3_encrypt, + .decrypt = aes_ssse3_decrypt, + .deinit = aes_deinit, +}; + diff --git a/lib/accelerated/x86/aes-gcm-padlock.c b/lib/accelerated/x86/aes-gcm-padlock.c index 1c58656192..4b49521fd9 100644 --- a/lib/accelerated/x86/aes-gcm-padlock.c +++ b/lib/accelerated/x86/aes-gcm-padlock.c @@ -153,7 +153,7 @@ static void aes_gcm_tag(void *_ctx, void *tag, size_t tagsize) GCM_DIGEST(ctx, padlock_aes_encrypt, tagsize, tag); } -const gnutls_crypto_cipher_st aes_gcm_padlock_struct = { +const gnutls_crypto_cipher_st _gnutls_aes_gcm_padlock = { .init = aes_gcm_cipher_init, .setkey = aes_gcm_cipher_setkey, .setiv = aes_gcm_setiv, diff --git a/lib/accelerated/x86/aes-gcm-x86-aesni.c b/lib/accelerated/x86/aes-gcm-x86-aesni.c index 2651cd3fe4..55bc5b0fab 100644 --- a/lib/accelerated/x86/aes-gcm-x86-aesni.c +++ b/lib/accelerated/x86/aes-gcm-x86-aesni.c @@ -147,7 +147,7 @@ static void aes_gcm_deinit(void *_ctx) gnutls_free(ctx); } -const gnutls_crypto_cipher_st aes_gcm_x86_aesni = { +const gnutls_crypto_cipher_st _gnutls_aes_gcm_x86_aesni = { .init = aes_gcm_cipher_init, .setkey = aes_gcm_cipher_setkey, .setiv = aes_gcm_setiv, diff --git a/lib/accelerated/x86/aes-gcm-x86-pclmul.c b/lib/accelerated/x86/aes-gcm-x86-pclmul.c index d1249c6a00..c5f23b5abe 100644 --- a/lib/accelerated/x86/aes-gcm-x86-pclmul.c +++ b/lib/accelerated/x86/aes-gcm-x86-pclmul.c @@ -258,7 +258,7 @@ static void aes_gcm_tag(void *_ctx, void *tag, size_t tagsize) memcpy(tag, ctx->gcm.Xi.c, MIN(GCM_BLOCK_SIZE, tagsize)); } -const gnutls_crypto_cipher_st aes_gcm_pclmul = { +const gnutls_crypto_cipher_st _gnutls_aes_gcm_pclmul = { .init = aes_gcm_cipher_init, .setkey = aes_gcm_cipher_setkey, .setiv = aes_gcm_setiv, diff --git a/lib/accelerated/x86/aes-gcm-x86-ssse3.c b/lib/accelerated/x86/aes-gcm-x86-ssse3.c index 845bb3db60..68332387cc 100644 --- a/lib/accelerated/x86/aes-gcm-x86-ssse3.c +++ b/lib/accelerated/x86/aes-gcm-x86-ssse3.c @@ -146,7 +146,7 @@ static void aes_gcm_deinit(void *_ctx) gnutls_free(ctx); } -const gnutls_crypto_cipher_st aes_gcm_x86_ssse3 = { +const gnutls_crypto_cipher_st _gnutls_aes_gcm_x86_ssse3 = { .init = aes_gcm_cipher_init, .setkey = aes_gcm_cipher_setkey, .setiv = aes_gcm_setiv, diff --git a/lib/accelerated/x86/aes-padlock.c b/lib/accelerated/x86/aes-padlock.c index e9d7f526be..adc7e00394 100644 --- a/lib/accelerated/x86/aes-padlock.c +++ b/lib/accelerated/x86/aes-padlock.c @@ -32,8 +32,8 @@ #include <aes-x86.h> #include <x86.h> #ifdef HAVE_LIBNETTLE -#include <nettle/aes.h> /* for key generation in 192 and 256 bits */ -#include <sha-padlock.h> +# include <nettle/aes.h> /* for key generation in 192 and 256 bits */ +# include <sha-padlock.h> #endif #include <aes-padlock.h> @@ -157,7 +157,7 @@ static void aes_deinit(void *_ctx) gnutls_free(ctx); } -static const gnutls_crypto_cipher_st aes_padlock_struct = { +const gnutls_crypto_cipher_st _gnutls_aes_padlock = { .init = aes_cipher_init, .setkey = padlock_aes_cipher_setkey, .setiv = aes_setiv, @@ -166,228 +166,3 @@ static const gnutls_crypto_cipher_st aes_padlock_struct = { .deinit = aes_deinit, }; -static int check_padlock(void) -{ - unsigned int edx = padlock_capability(); - - return ((edx & (0x3 << 6)) == (0x3 << 6)); -} - -static int check_phe(void) -{ - unsigned int edx = padlock_capability(); - - return ((edx & (0x3 << 10)) == (0x3 << 10)); -} - -/* We are actually checking for SHA512 */ -static int check_phe_sha512(void) -{ - unsigned int edx = padlock_capability(); - - return ((edx & (0x3 << 25)) == (0x3 << 25)); -} - -static int check_phe_partial(void) -{ - const char *text = "test and test"; - uint32_t iv[5] = { 0x67452301UL, 0xEFCDAB89UL, - 0x98BADCFEUL, 0x10325476UL, 0xC3D2E1F0UL - }; - - padlock_sha1_blocks(iv, text, sizeof(text) - 1); - padlock_sha1_blocks(iv, text, sizeof(text) - 1); - - if (iv[0] == 0x9096E2D8UL && iv[1] == 0xA33074EEUL && - iv[2] == 0xCDBEE447UL && iv[3] == 0xEC7979D2UL && - iv[4] == 0x9D3FF5CFUL) - return 1; - else - return 0; -} - -static unsigned check_via(void) -{ - unsigned int a, b, c, d; - gnutls_cpuid(0, &a, &b, &c, &d); - - if ((memcmp(&b, "Cent", 4) == 0 && - memcmp(&d, "aurH", 4) == 0 && memcmp(&c, "auls", 4) == 0)) { - return 1; - } - - return 0; -} - -void register_padlock_crypto(void) -{ - int ret, phe; - - if (check_via() == 0) - return; - if (check_padlock()) { - _gnutls_debug_log - ("Padlock AES accelerator was detected\n"); - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_128_CBC, 80, &aes_padlock_struct); - if (ret < 0) { - gnutls_assert(); - } - - /* register GCM ciphers */ - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_128_GCM, 80, - &aes_gcm_padlock_struct); - if (ret < 0) { - gnutls_assert(); - } -#ifdef HAVE_LIBNETTLE - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_192_CBC, 80, &aes_padlock_struct); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_256_CBC, 80, &aes_padlock_struct); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_256_GCM, 80, - &aes_gcm_padlock_struct); - if (ret < 0) { - gnutls_assert(); - } -#endif - } -#ifdef HAVE_LIBNETTLE - phe = check_phe(); - - if (phe && check_phe_partial()) { - _gnutls_debug_log - ("Padlock SHA1 and SHA256 (partial) accelerator was detected\n"); - if (check_phe_sha512()) { - _gnutls_debug_log - ("Padlock SHA512 (partial) accelerator was detected\n"); - ret = - gnutls_crypto_single_digest_register - (GNUTLS_DIG_SHA384, 80, - &sha_padlock_nano_struct); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_digest_register - (GNUTLS_DIG_SHA512, 80, - &sha_padlock_nano_struct); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_mac_register - (GNUTLS_MAC_SHA384, 80, - &hmac_sha_padlock_nano_struct); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_mac_register - (GNUTLS_MAC_SHA512, 80, - &hmac_sha_padlock_nano_struct); - if (ret < 0) { - gnutls_assert(); - } - } - - ret = - gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA1, - 80, - &sha_padlock_nano_struct); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA224, - 80, - &sha_padlock_nano_struct); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA256, - 80, - &sha_padlock_nano_struct); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA1, - 80, - &hmac_sha_padlock_nano_struct); - if (ret < 0) { - gnutls_assert(); - } - - /* we don't register MAC_SHA224 because it is not used by TLS */ - - ret = - gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA256, - 80, - &hmac_sha_padlock_nano_struct); - if (ret < 0) { - gnutls_assert(); - } - } else if (phe) { - /* Original padlock PHE. Does not support incremental operations. - */ - _gnutls_debug_log - ("Padlock SHA1 and SHA256 accelerator was detected\n"); - ret = - gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA1, - 80, - &sha_padlock_struct); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA256, - 80, - &sha_padlock_struct); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA1, - 80, - &hmac_sha_padlock_struct); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA256, - 80, - &hmac_sha_padlock_struct); - if (ret < 0) { - gnutls_assert(); - } - } -#endif - - return; -} diff --git a/lib/accelerated/x86/aes-padlock.h b/lib/accelerated/x86/aes-padlock.h index 851b40b6f9..43a2f029a2 100644 --- a/lib/accelerated/x86/aes-padlock.h +++ b/lib/accelerated/x86/aes-padlock.h @@ -27,9 +27,10 @@ struct padlock_ctx { int enc; }; -extern const gnutls_crypto_cipher_st aes_gcm_padlock_struct; -extern const gnutls_crypto_mac_st hmac_sha_padlock_struct; -extern const gnutls_crypto_digest_st sha_padlock_struct; +extern const gnutls_crypto_cipher_st _gnutls_aes_padlock; +extern const gnutls_crypto_cipher_st _gnutls_aes_gcm_padlock; +extern const gnutls_crypto_mac_st _gnutls_hmac_sha_padlock; +extern const gnutls_crypto_digest_st _gnutls_sha_padlock; int padlock_aes_cipher_setkey(void *_ctx, const void *userkey, size_t keysize); diff --git a/lib/accelerated/x86/aes-x86.c b/lib/accelerated/x86/aes-x86.c deleted file mode 100644 index dcac4f083d..0000000000 --- a/lib/accelerated/x86/aes-x86.c +++ /dev/null @@ -1,413 +0,0 @@ -/* - * Copyright (C) 2011-2012 Free Software Foundation, Inc. - * - * Author: Nikos Mavrogiannopoulos - * - * This file is part of GnuTLS. - * - * The GnuTLS is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public License - * as published by the Free Software Foundation; either version 2.1 of - * the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/> - * - */ - -/* - * The following code is an implementation of the AES-128-CBC cipher - * using intel's AES instruction set. - */ - -#include <gnutls_errors.h> -#include <gnutls_int.h> -#include <gnutls/crypto.h> -#include <gnutls_errors.h> -#include <aes-x86.h> -#include <sha-x86.h> -#include <x86.h> - -struct aes_ctx { - AES_KEY expanded_key; - uint8_t iv[16]; - int enc; -}; - -unsigned int _gnutls_x86_cpuid_s[4]; - -static int -aes_cipher_init(gnutls_cipher_algorithm_t algorithm, void **_ctx, int enc) -{ - /* we use key size to distinguish */ - if (algorithm != GNUTLS_CIPHER_AES_128_CBC - && algorithm != GNUTLS_CIPHER_AES_192_CBC - && algorithm != GNUTLS_CIPHER_AES_256_CBC) - return GNUTLS_E_INVALID_REQUEST; - - *_ctx = gnutls_calloc(1, sizeof(struct aes_ctx)); - if (*_ctx == NULL) { - gnutls_assert(); - return GNUTLS_E_MEMORY_ERROR; - } - - ((struct aes_ctx *) (*_ctx))->enc = enc; - - return 0; -} - -static int -aes_cipher_setkey(void *_ctx, const void *userkey, size_t keysize) -{ - struct aes_ctx *ctx = _ctx; - int ret; - - if (ctx->enc) - ret = - aesni_set_encrypt_key(userkey, keysize * 8, - ALIGN16(&ctx->expanded_key)); - else - ret = - aesni_set_decrypt_key(userkey, keysize * 8, - ALIGN16(&ctx->expanded_key)); - - if (ret != 0) - return gnutls_assert_val(GNUTLS_E_ENCRYPTION_FAILED); - - return 0; -} - -static int aes_setiv(void *_ctx, const void *iv, size_t iv_size) -{ - struct aes_ctx *ctx = _ctx; - - memcpy(ctx->iv, iv, 16); - return 0; -} - -static int -aes_encrypt(void *_ctx, const void *src, size_t src_size, - void *dst, size_t dst_size) -{ - struct aes_ctx *ctx = _ctx; - - aesni_cbc_encrypt(src, dst, src_size, ALIGN16(&ctx->expanded_key), - ctx->iv, 1); - return 0; -} - -static int -aes_decrypt(void *_ctx, const void *src, size_t src_size, - void *dst, size_t dst_size) -{ - struct aes_ctx *ctx = _ctx; - - aesni_cbc_encrypt(src, dst, src_size, ALIGN16(&ctx->expanded_key), - ctx->iv, 0); - - return 0; -} - -static void aes_deinit(void *_ctx) -{ - struct aes_ctx *ctx = _ctx; - - zeroize_temp_key(ctx, sizeof(*ctx)); - gnutls_free(ctx); -} - -static const gnutls_crypto_cipher_st aesni_x86 = { - .init = aes_cipher_init, - .setkey = aes_cipher_setkey, - .setiv = aes_setiv, - .encrypt = aes_encrypt, - .decrypt = aes_decrypt, - .deinit = aes_deinit, -}; - -static int -aes_ssse3_cipher_setkey(void *_ctx, const void *userkey, size_t keysize) -{ - struct aes_ctx *ctx = _ctx; - int ret; - - if (ctx->enc) - ret = - vpaes_set_encrypt_key(userkey, keysize * 8, - ALIGN16(&ctx->expanded_key)); - else - ret = - vpaes_set_decrypt_key(userkey, keysize * 8, - ALIGN16(&ctx->expanded_key)); - - if (ret != 0) - return gnutls_assert_val(GNUTLS_E_ENCRYPTION_FAILED); - - return 0; -} - -static int -aes_ssse3_encrypt(void *_ctx, const void *src, size_t src_size, - void *dst, size_t dst_size) -{ - struct aes_ctx *ctx = _ctx; - - vpaes_cbc_encrypt(src, dst, src_size, ALIGN16(&ctx->expanded_key), - ctx->iv, 1); - return 0; -} - -static int -aes_ssse3_decrypt(void *_ctx, const void *src, size_t src_size, - void *dst, size_t dst_size) -{ - struct aes_ctx *ctx = _ctx; - - vpaes_cbc_encrypt(src, dst, src_size, ALIGN16(&ctx->expanded_key), - ctx->iv, 0); - - return 0; -} - -static const gnutls_crypto_cipher_st aes_ssse3 = { - .init = aes_cipher_init, - .setkey = aes_ssse3_cipher_setkey, - .setiv = aes_setiv, - .encrypt = aes_ssse3_encrypt, - .decrypt = aes_ssse3_decrypt, - .deinit = aes_deinit, -}; - -static unsigned check_optimized_aes(void) -{ - return (_gnutls_x86_cpuid_s[2] & 0x2000000); -} - -static unsigned check_ssse3(void) -{ - return (_gnutls_x86_cpuid_s[2] & 0x0000200); -} - -#ifdef ASM_X86_64 -static unsigned check_pclmul(void) -{ - return (_gnutls_x86_cpuid_s[2] & 0x2); -} -#endif - -static unsigned check_intel_or_amd(void) -{ - unsigned int a, b, c, d; - gnutls_cpuid(0, &a, &b, &c, &d); - - if ((memcmp(&b, "Genu", 4) == 0 && - memcmp(&d, "ineI", 4) == 0 && - memcmp(&c, "ntel", 4) == 0) || - (memcmp(&b, "Auth", 4) == 0 && - memcmp(&d, "enti", 4) == 0 && memcmp(&c, "cAMD", 4) == 0)) { - return 1; - } - - return 0; -} - -void register_x86_crypto(void) -{ - int ret; - - if (check_intel_or_amd() == 0) - return; - - gnutls_cpuid(1, &_gnutls_x86_cpuid_s[0], &_gnutls_x86_cpuid_s[1], - &_gnutls_x86_cpuid_s[2], &_gnutls_x86_cpuid_s[3]); - - if (check_ssse3()) { - _gnutls_debug_log("Intel SSSE3 was detected\n"); - - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_128_GCM, 90, - &aes_gcm_x86_ssse3); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_256_GCM, 90, - &aes_gcm_x86_ssse3); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_128_CBC, 90, &aes_ssse3); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_192_CBC, 90, &aes_ssse3); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_256_CBC, 90, &aes_ssse3); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA1, - 80, - &sha_x86_ssse3); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA224, - 80, - &sha_x86_ssse3); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA256, - 80, - &sha_x86_ssse3); - if (ret < 0) { - gnutls_assert(); - } - - - ret = - gnutls_crypto_single_mac_register(GNUTLS_DIG_SHA1, - 80, - &hmac_sha_x86_ssse3); - if (ret < 0) - gnutls_assert(); - - ret = - gnutls_crypto_single_mac_register(GNUTLS_DIG_SHA224, - 80, - &hmac_sha_x86_ssse3); - if (ret < 0) - gnutls_assert(); - - ret = - gnutls_crypto_single_mac_register(GNUTLS_DIG_SHA256, - 80, - &hmac_sha_x86_ssse3); - if (ret < 0) - gnutls_assert(); - -#ifdef ENABLE_SHA512 - ret = - gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA384, - 80, - &sha_x86_ssse3); - if (ret < 0) - gnutls_assert(); - - ret = - gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA512, - 80, - &sha_x86_ssse3); - if (ret < 0) - gnutls_assert(); - ret = - gnutls_crypto_single_mac_register(GNUTLS_DIG_SHA384, - 80, - &hmac_sha_x86_ssse3); - if (ret < 0) - gnutls_assert(); - - ret = - gnutls_crypto_single_mac_register(GNUTLS_DIG_SHA512, - 80, - &hmac_sha_x86_ssse3); - if (ret < 0) - gnutls_assert(); -#endif - } - - if (check_optimized_aes()) { - _gnutls_debug_log("Intel AES accelerator was detected\n"); - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_128_CBC, 80, &aesni_x86); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_192_CBC, 80, &aesni_x86); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_256_CBC, 80, &aesni_x86); - if (ret < 0) { - gnutls_assert(); - } -#ifdef ASM_X86_64 - if (check_pclmul()) { - /* register GCM ciphers */ - _gnutls_debug_log - ("Intel GCM accelerator was detected\n"); - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_128_GCM, 80, - &aes_gcm_pclmul); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_256_GCM, 80, - &aes_gcm_pclmul); - if (ret < 0) { - gnutls_assert(); - } - } else { - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_128_GCM, 80, - &aes_gcm_x86_aesni); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_256_GCM, 80, - &aes_gcm_x86_aesni); - if (ret < 0) { - gnutls_assert(); - } - - - } -#endif - } - - /* convert _gnutls_x86_cpuid_s the way openssl asm expects it */ - _gnutls_x86_cpuid_s[1] = _gnutls_x86_cpuid_s[2]; - - return; -} diff --git a/lib/accelerated/x86/aes-x86.h b/lib/accelerated/x86/aes-x86.h index 3a63d818c3..c0e56e31a8 100644 --- a/lib/accelerated/x86/aes-x86.h +++ b/lib/accelerated/x86/aes-x86.h @@ -4,7 +4,6 @@ #include <gnutls_int.h> void register_x86_crypto(void); -void register_padlock_crypto(void); #define ALIGN16(x) \ ((void *)(((ptrdiff_t)(x)+(ptrdiff_t)0x0f)&~((ptrdiff_t)0x0f))) @@ -43,8 +42,10 @@ void vpaes_cbc_encrypt(const unsigned char *in, unsigned char *out, void vpaes_encrypt(const unsigned char *in, unsigned char *out, const AES_KEY *key); void vpaes_decrypt(const unsigned char *in, unsigned char *out, const AES_KEY *key); -extern const gnutls_crypto_cipher_st aes_gcm_pclmul; -extern const gnutls_crypto_cipher_st aes_gcm_x86_ssse3; -extern const gnutls_crypto_cipher_st aes_gcm_x86_aesni; +extern const gnutls_crypto_cipher_st _gnutls_aes_gcm_pclmul; +extern const gnutls_crypto_cipher_st _gnutls_aes_gcm_x86_ssse3; +extern const gnutls_crypto_cipher_st _gnutls_aes_gcm_x86_aesni; +extern const gnutls_crypto_cipher_st _gnutls_aes_ssse3; +extern const gnutls_crypto_cipher_st _gnutls_aesni_x86; #endif diff --git a/lib/accelerated/x86/hmac-padlock.c b/lib/accelerated/x86/hmac-padlock.c index 265203f228..e516c385d9 100644 --- a/lib/accelerated/x86/hmac-padlock.c +++ b/lib/accelerated/x86/hmac-padlock.c @@ -327,7 +327,7 @@ wrap_padlock_hmac_fast(gnutls_mac_algorithm_t algo, return 0; } -const gnutls_crypto_mac_st hmac_sha_padlock_struct = { +const gnutls_crypto_mac_st _gnutls_hmac_sha_padlock = { .init = NULL, .setkey = NULL, .setnonce = NULL, @@ -337,7 +337,7 @@ const gnutls_crypto_mac_st hmac_sha_padlock_struct = { .fast = wrap_padlock_hmac_fast }; -const gnutls_crypto_mac_st hmac_sha_padlock_nano_struct = { +const gnutls_crypto_mac_st _gnutls_hmac_sha_padlock_nano = { .init = wrap_padlock_hmac_init, .setkey = wrap_padlock_hmac_setkey, .setnonce = NULL, diff --git a/lib/accelerated/x86/hmac-x86-ssse3.c b/lib/accelerated/x86/hmac-x86-ssse3.c index 3058ad67f2..0d5004d30c 100644 --- a/lib/accelerated/x86/hmac-x86-ssse3.c +++ b/lib/accelerated/x86/hmac-x86-ssse3.c @@ -287,7 +287,7 @@ static int wrap_x86_hmac_fast(gnutls_mac_algorithm_t algo, return 0; } -const gnutls_crypto_mac_st hmac_sha_x86_ssse3 = { +const gnutls_crypto_mac_st _gnutls_hmac_sha_x86_ssse3 = { .init = wrap_x86_hmac_init, .setkey = wrap_x86_hmac_setkey, .setnonce = NULL, diff --git a/lib/accelerated/x86/sha-padlock.c b/lib/accelerated/x86/sha-padlock.c index f568ab1039..3875cfca43 100644 --- a/lib/accelerated/x86/sha-padlock.c +++ b/lib/accelerated/x86/sha-padlock.c @@ -358,7 +358,7 @@ const struct nettle_hash padlock_sha256 = NN_HASH(sha256, padlock_sha256_update, const struct nettle_hash padlock_sha384 = NN_HASH(sha384, padlock_sha512_update, padlock_sha512_digest, SHA384); const struct nettle_hash padlock_sha512 = NN_HASH(sha512, padlock_sha512_update, padlock_sha512_digest, SHA512); -const gnutls_crypto_digest_st sha_padlock_struct = { +const gnutls_crypto_digest_st _gnutls_sha_padlock = { .init = NULL, .hash = NULL, .output = NULL, @@ -366,7 +366,7 @@ const gnutls_crypto_digest_st sha_padlock_struct = { .fast = wrap_padlock_hash_fast }; -const gnutls_crypto_digest_st sha_padlock_nano_struct = { +const gnutls_crypto_digest_st _gnutls_sha_padlock_nano = { .init = wrap_padlock_hash_init, .hash = wrap_padlock_hash_update, .output = wrap_padlock_hash_output, diff --git a/lib/accelerated/x86/sha-padlock.h b/lib/accelerated/x86/sha-padlock.h index 30d3ccec28..5cbe1d13b0 100644 --- a/lib/accelerated/x86/sha-padlock.h +++ b/lib/accelerated/x86/sha-padlock.h @@ -30,7 +30,7 @@ extern const struct nettle_hash padlock_sha256; extern const struct nettle_hash padlock_sha384; extern const struct nettle_hash padlock_sha512; -extern const gnutls_crypto_mac_st hmac_sha_padlock_nano_struct; -extern const gnutls_crypto_digest_st sha_padlock_nano_struct; +extern const gnutls_crypto_mac_st _gnutls_hmac_sha_padlock_nano; +extern const gnutls_crypto_digest_st _gnutls_sha_padlock_nano; #endif diff --git a/lib/accelerated/x86/sha-x86-ssse3.c b/lib/accelerated/x86/sha-x86-ssse3.c index 29dcd2dbd2..25b347b585 100644 --- a/lib/accelerated/x86/sha-x86-ssse3.c +++ b/lib/accelerated/x86/sha-x86-ssse3.c @@ -356,7 +356,7 @@ const struct nettle_hash x86_sha512 = NN_HASH(sha512, x86_sha512_update, sha512_digest, SHA512); #endif -const gnutls_crypto_digest_st sha_x86_ssse3 = { +const gnutls_crypto_digest_st _gnutls_sha_x86_ssse3 = { .init = wrap_x86_hash_init, .hash = wrap_x86_hash_update, .output = wrap_x86_hash_output, diff --git a/lib/accelerated/x86/sha-x86.h b/lib/accelerated/x86/sha-x86.h index 55994dce9e..3304212538 100644 --- a/lib/accelerated/x86/sha-x86.h +++ b/lib/accelerated/x86/sha-x86.h @@ -16,7 +16,7 @@ void x86_sha1_update(struct sha1_ctx *ctx, size_t length, const uint8_t * data); void x86_sha256_update(struct sha256_ctx *ctx, size_t length, const uint8_t * data); void x86_sha512_update(struct sha512_ctx *ctx, size_t length, const uint8_t * data); -extern const gnutls_crypto_digest_st sha_x86_ssse3; -extern const gnutls_crypto_mac_st hmac_sha_x86_ssse3; +extern const gnutls_crypto_digest_st _gnutls_sha_x86_ssse3; +extern const gnutls_crypto_mac_st _gnutls_hmac_sha_x86_ssse3; #endif diff --git a/lib/accelerated/x86/x86-common.c b/lib/accelerated/x86/x86-common.c new file mode 100644 index 0000000000..4d556c8acf --- /dev/null +++ b/lib/accelerated/x86/x86-common.c @@ -0,0 +1,505 @@ +/* + * Copyright (C) 2011-2012 Free Software Foundation, Inc. + * + * Author: Nikos Mavrogiannopoulos + * + * This file is part of GnuTLS. + * + * The GnuTLS is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/> + * + */ + +/* + * The following code is an implementation of the AES-128-CBC cipher + * using intel's AES instruction set. + */ + +#include <gnutls_errors.h> +#include <gnutls_int.h> +#include <gnutls/crypto.h> +#include <gnutls_errors.h> +#include <aes-x86.h> +#include <sha-x86.h> +#include <x86.h> +#ifdef HAVE_LIBNETTLE +# include <nettle/aes.h> /* for key generation in 192 and 256 bits */ +# include <sha-padlock.h> +#endif +#include <aes-padlock.h> + +unsigned int _gnutls_x86_cpuid_s[4]; + +static unsigned check_optimized_aes(void) +{ + return (_gnutls_x86_cpuid_s[2] & 0x2000000); +} + +static unsigned check_ssse3(void) +{ + return (_gnutls_x86_cpuid_s[2] & 0x0000200); +} + +#ifdef ASM_X86_64 +static unsigned check_pclmul(void) +{ + return (_gnutls_x86_cpuid_s[2] & 0x2); +} +#endif + +static int check_padlock(void) +{ + unsigned int edx = padlock_capability(); + + return ((edx & (0x3 << 6)) == (0x3 << 6)); +} + +static int check_phe(void) +{ + unsigned int edx = padlock_capability(); + + return ((edx & (0x3 << 10)) == (0x3 << 10)); +} + +/* We are actually checking for SHA512 */ +static int check_phe_sha512(void) +{ + unsigned int edx = padlock_capability(); + + return ((edx & (0x3 << 25)) == (0x3 << 25)); +} + +static int check_phe_partial(void) +{ + const char *text = "test and test"; + uint32_t iv[5] = { 0x67452301UL, 0xEFCDAB89UL, + 0x98BADCFEUL, 0x10325476UL, 0xC3D2E1F0UL + }; + + padlock_sha1_blocks(iv, text, sizeof(text) - 1); + padlock_sha1_blocks(iv, text, sizeof(text) - 1); + + if (iv[0] == 0x9096E2D8UL && iv[1] == 0xA33074EEUL && + iv[2] == 0xCDBEE447UL && iv[3] == 0xEC7979D2UL && + iv[4] == 0x9D3FF5CFUL) + return 1; + else + return 0; +} + +static unsigned check_via(void) +{ + unsigned int a, b, c, d; + gnutls_cpuid(0, &a, &b, &c, &d); + + if ((memcmp(&b, "Cent", 4) == 0 && + memcmp(&d, "aurH", 4) == 0 && memcmp(&c, "auls", 4) == 0)) { + return 1; + } + + return 0; +} + +static unsigned check_intel_or_amd(void) +{ + unsigned int a, b, c, d; + gnutls_cpuid(0, &a, &b, &c, &d); + + if ((memcmp(&b, "Genu", 4) == 0 && + memcmp(&d, "ineI", 4) == 0 && + memcmp(&c, "ntel", 4) == 0) || + (memcmp(&b, "Auth", 4) == 0 && + memcmp(&d, "enti", 4) == 0 && memcmp(&c, "cAMD", 4) == 0)) { + return 1; + } + + return 0; +} + +static +void register_x86_intel_crypto(void) +{ + int ret; + + if (check_intel_or_amd() == 0) + return; + + gnutls_cpuid(1, &_gnutls_x86_cpuid_s[0], &_gnutls_x86_cpuid_s[1], + &_gnutls_x86_cpuid_s[2], &_gnutls_x86_cpuid_s[3]); + + if (check_ssse3()) { + _gnutls_debug_log("Intel SSSE3 was detected\n"); + + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_128_GCM, 90, + &_gnutls_aes_gcm_x86_ssse3); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_256_GCM, 90, + &_gnutls_aes_gcm_x86_ssse3); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_128_CBC, 90, &_gnutls_aes_ssse3); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_192_CBC, 90, &_gnutls_aes_ssse3); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_256_CBC, 90, &_gnutls_aes_ssse3); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA1, + 80, + &_gnutls_sha_x86_ssse3); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA224, + 80, + &_gnutls_sha_x86_ssse3); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA256, + 80, + &_gnutls_sha_x86_ssse3); + if (ret < 0) { + gnutls_assert(); + } + + + ret = + gnutls_crypto_single_mac_register(GNUTLS_DIG_SHA1, + 80, + &_gnutls_hmac_sha_x86_ssse3); + if (ret < 0) + gnutls_assert(); + + ret = + gnutls_crypto_single_mac_register(GNUTLS_DIG_SHA224, + 80, + &_gnutls_hmac_sha_x86_ssse3); + if (ret < 0) + gnutls_assert(); + + ret = + gnutls_crypto_single_mac_register(GNUTLS_DIG_SHA256, + 80, + &_gnutls_hmac_sha_x86_ssse3); + if (ret < 0) + gnutls_assert(); + +#ifdef ENABLE_SHA512 + ret = + gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA384, + 80, + &_gnutls_sha_x86_ssse3); + if (ret < 0) + gnutls_assert(); + + ret = + gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA512, + 80, + &_gnutls_sha_x86_ssse3); + if (ret < 0) + gnutls_assert(); + ret = + gnutls_crypto_single_mac_register(GNUTLS_DIG_SHA384, + 80, + &_gnutls_hmac_sha_x86_ssse3); + if (ret < 0) + gnutls_assert(); + + ret = + gnutls_crypto_single_mac_register(GNUTLS_DIG_SHA512, + 80, + &_gnutls_hmac_sha_x86_ssse3); + if (ret < 0) + gnutls_assert(); +#endif + } + + if (check_optimized_aes()) { + _gnutls_debug_log("Intel AES accelerator was detected\n"); + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_128_CBC, 80, &_gnutls_aesni_x86); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_192_CBC, 80, &_gnutls_aesni_x86); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_256_CBC, 80, &_gnutls_aesni_x86); + if (ret < 0) { + gnutls_assert(); + } +#ifdef ASM_X86_64 + if (check_pclmul()) { + /* register GCM ciphers */ + _gnutls_debug_log + ("Intel GCM accelerator was detected\n"); + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_128_GCM, 80, + &_gnutls_aes_gcm_pclmul); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_256_GCM, 80, + &_gnutls_aes_gcm_pclmul); + if (ret < 0) { + gnutls_assert(); + } + } else { + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_128_GCM, 80, + &_gnutls_aes_gcm_x86_aesni); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_256_GCM, 80, + &_gnutls_aes_gcm_x86_aesni); + if (ret < 0) { + gnutls_assert(); + } + + + } +#endif + } + + /* convert _gnutls_x86_cpuid_s the way openssl asm expects it */ + _gnutls_x86_cpuid_s[1] = _gnutls_x86_cpuid_s[2]; + + return; +} + +static +void register_x86_padlock_crypto(void) +{ + int ret, phe; + + if (check_via() == 0) + return; + if (check_padlock()) { + _gnutls_debug_log + ("Padlock AES accelerator was detected\n"); + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_128_CBC, 80, &_gnutls_aes_padlock); + if (ret < 0) { + gnutls_assert(); + } + + /* register GCM ciphers */ + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_128_GCM, 80, + &_gnutls_aes_gcm_padlock); + if (ret < 0) { + gnutls_assert(); + } +#ifdef HAVE_LIBNETTLE + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_192_CBC, 80, &_gnutls_aes_padlock); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_256_CBC, 80, &_gnutls_aes_padlock); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_256_GCM, 80, + &_gnutls_aes_gcm_padlock); + if (ret < 0) { + gnutls_assert(); + } +#endif + } +#ifdef HAVE_LIBNETTLE + phe = check_phe(); + + if (phe && check_phe_partial()) { + _gnutls_debug_log + ("Padlock SHA1 and SHA256 (partial) accelerator was detected\n"); + if (check_phe_sha512()) { + _gnutls_debug_log + ("Padlock SHA512 (partial) accelerator was detected\n"); + ret = + gnutls_crypto_single_digest_register + (GNUTLS_DIG_SHA384, 80, + &_gnutls_sha_padlock_nano); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_digest_register + (GNUTLS_DIG_SHA512, 80, + &_gnutls_sha_padlock_nano); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_mac_register + (GNUTLS_MAC_SHA384, 80, + &_gnutls_hmac_sha_padlock_nano); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_mac_register + (GNUTLS_MAC_SHA512, 80, + &_gnutls_hmac_sha_padlock_nano); + if (ret < 0) { + gnutls_assert(); + } + } + + ret = + gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA1, + 80, + &_gnutls_sha_padlock_nano); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA224, + 80, + &_gnutls_sha_padlock_nano); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA256, + 80, + &_gnutls_sha_padlock_nano); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA1, + 80, + &_gnutls_hmac_sha_padlock_nano); + if (ret < 0) { + gnutls_assert(); + } + + /* we don't register MAC_SHA224 because it is not used by TLS */ + + ret = + gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA256, + 80, + &_gnutls_hmac_sha_padlock_nano); + if (ret < 0) { + gnutls_assert(); + } + } else if (phe) { + /* Original padlock PHE. Does not support incremental operations. + */ + _gnutls_debug_log + ("Padlock SHA1 and SHA256 accelerator was detected\n"); + ret = + gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA1, + 80, + &_gnutls_sha_padlock); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA256, + 80, + &_gnutls_sha_padlock); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA1, + 80, + &_gnutls_hmac_sha_padlock); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA256, + 80, + &_gnutls_hmac_sha_padlock); + if (ret < 0) { + gnutls_assert(); + } + } +#endif + + return; +} + +void register_x86_crypto(void) +{ + register_x86_intel_crypto(); + register_x86_padlock_crypto(); +} + |