diff options
author | Nikos Mavrogiannopoulos <nmav@redhat.com> | 2014-08-29 12:00:33 +0200 |
---|---|---|
committer | Nikos Mavrogiannopoulos <nmav@redhat.com> | 2014-08-29 12:00:35 +0200 |
commit | ec70c214d2f714f03c42926df7935f4dda41194c (patch) | |
tree | ac9fb350b4ca52a51a3d4699aec3473695f63b2a /lib/accelerated | |
parent | 5d8a9cb2f6f795165eabbee20a1ee015695e22ac (diff) | |
download | gnutls-ec70c214d2f714f03c42926df7935f4dda41194c.tar.gz |
added configuration option --disable-padlock
That allows keeping hardware acceleration in x86 but without
support for padlock.
Diffstat (limited to 'lib/accelerated')
-rw-r--r-- | lib/accelerated/x86/Makefile.am | 26 | ||||
-rw-r--r-- | lib/accelerated/x86/x86-common.c | 399 |
2 files changed, 226 insertions, 199 deletions
diff --git a/lib/accelerated/x86/Makefile.am b/lib/accelerated/x86/Makefile.am index e6bb215711..15733dd3af 100644 --- a/lib/accelerated/x86/Makefile.am +++ b/lib/accelerated/x86/Makefile.am @@ -35,10 +35,14 @@ EXTRA_DIST = README license.txt files.mk noinst_LTLIBRARIES = libx86.la -libx86_la_SOURCES = sha-padlock.c hmac-padlock.c x86-common.c aes-padlock.c aes-gcm-padlock.c \ - aes-padlock.h aes-x86.h x86-common.h sha-padlock.h sha-x86-ssse3.c sha-x86.h hmac-x86-ssse3.c \ +libx86_la_SOURCES = x86-common.c aes-x86.h x86-common.h sha-x86-ssse3.c sha-x86.h hmac-x86-ssse3.c \ aes-gcm-x86-ssse3.c aes-gcm-x86-aesni.c aes-cbc-x86-ssse3.c aes-cbc-x86-aesni.c +if ENABLE_PADLOCK +libx86_la_SOURCES += sha-padlock.c hmac-padlock.c aes-padlock.c aes-gcm-padlock.c \ + aes-padlock.h sha-padlock.h +endif + include files.mk if ASM_X86_64 @@ -47,14 +51,23 @@ libx86_la_SOURCES += aes-gcm-x86-pclmul.c if WINDOWS libx86_la_SOURCES += $(X86_64_FILES_COFF) +if ENABLE_PADLOCK +libx86_la_SOURCES += $(X86_64_PADLOCK_FILES_COFF) +endif endif if MACOSX libx86_la_SOURCES += $(X86_64_FILES_MACOSX) +if ENABLE_PADLOCK +libx86_la_SOURCES += $(X86_64_PADLOCK_FILES_MACOSX) +endif endif if ELF libx86_la_SOURCES += $(X86_64_FILES_ELF) +if ENABLE_PADLOCK +libx86_la_SOURCES += $(X86_64_PADLOCK_FILES_ELF) +endif endif else #ASM_X86_64 @@ -62,14 +75,23 @@ AM_CFLAGS += -DASM_X86_32 -DASM_X86 if WINDOWS libx86_la_SOURCES += $(X86_FILES_COFF) +if ENABLE_PADLOCK +libx86_la_SOURCES += $(X86_PADLOCK_FILES_COFF) +endif endif if MACOSX libx86_la_SOURCES += $(X86_FILES_MACOSX) +if ENABLE_PADLOCK +libx86_la_SOURCES += $(X86_PADLOCK_FILES_MACOSX) +endif endif if ELF libx86_la_SOURCES += $(X86_FILES_ELF) +if ENABLE_PADLOCK +libx86_la_SOURCES += $(X86_PADLOCK_FILES_ELF) +endif endif endif #ASM_X86_64 diff --git a/lib/accelerated/x86/x86-common.c b/lib/accelerated/x86/x86-common.c index 0ba20f04cc..cc67b08eb8 100644 --- a/lib/accelerated/x86/x86-common.c +++ b/lib/accelerated/x86/x86-common.c @@ -89,6 +89,24 @@ static void capabilities_to_intel_cpuid(unsigned capabilities) } } +static unsigned check_optimized_aes(void) +{ + return (_gnutls_x86_cpuid_s[1] & bit_AES); +} + +static unsigned check_ssse3(void) +{ + return (_gnutls_x86_cpuid_s[1] & bit_SSSE3); +} + +#ifdef ASM_X86_64 +static unsigned check_pclmul(void) +{ + return (_gnutls_x86_cpuid_s[1] & bit_PCLMUL); +} +#endif + +#ifdef ENABLE_PADLOCK static unsigned capabilities_to_via_edx(unsigned capabilities) { memset(_gnutls_x86_cpuid_s, 0, sizeof(_gnutls_x86_cpuid_s)); @@ -107,23 +125,6 @@ static unsigned capabilities_to_via_edx(unsigned capabilities) return _gnutls_x86_cpuid_s[2]; } -static unsigned check_optimized_aes(void) -{ - return (_gnutls_x86_cpuid_s[1] & bit_AES); -} - -static unsigned check_ssse3(void) -{ - return (_gnutls_x86_cpuid_s[1] & bit_SSSE3); -} - -#ifdef ASM_X86_64 -static unsigned check_pclmul(void) -{ - return (_gnutls_x86_cpuid_s[1] & bit_PCLMUL); -} -#endif - static int check_padlock(unsigned edx) { return ((edx & via_bit_PADLOCK) == via_bit_PADLOCK); @@ -171,6 +172,188 @@ static unsigned check_via(void) return 0; } +static +void register_x86_padlock_crypto(unsigned capabilities) +{ + int ret, phe; + unsigned edx; + + if (check_via() == 0) + return; + + if (capabilities == 0) + edx = padlock_capability(); + else + edx = capabilities_to_via_edx(capabilities); + + if (check_padlock(edx)) { + _gnutls_debug_log + ("Padlock AES accelerator was detected\n"); + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_128_CBC, 80, &_gnutls_aes_padlock); + if (ret < 0) { + gnutls_assert(); + } + + /* register GCM ciphers */ + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_128_GCM, 80, + &_gnutls_aes_gcm_padlock); + if (ret < 0) { + gnutls_assert(); + } +#ifdef HAVE_LIBNETTLE + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_192_CBC, 80, &_gnutls_aes_padlock); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_256_CBC, 80, &_gnutls_aes_padlock); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_cipher_register + (GNUTLS_CIPHER_AES_256_GCM, 80, + &_gnutls_aes_gcm_padlock); + if (ret < 0) { + gnutls_assert(); + } +#endif + } +#ifdef HAVE_LIBNETTLE + phe = check_phe(edx); + + if (phe && check_phe_partial()) { + _gnutls_debug_log + ("Padlock SHA1 and SHA256 (partial) accelerator was detected\n"); + if (check_phe_sha512(edx)) { + _gnutls_debug_log + ("Padlock SHA512 (partial) accelerator was detected\n"); + ret = + gnutls_crypto_single_digest_register + (GNUTLS_DIG_SHA384, 80, + &_gnutls_sha_padlock_nano); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_digest_register + (GNUTLS_DIG_SHA512, 80, + &_gnutls_sha_padlock_nano); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_mac_register + (GNUTLS_MAC_SHA384, 80, + &_gnutls_hmac_sha_padlock_nano); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_mac_register + (GNUTLS_MAC_SHA512, 80, + &_gnutls_hmac_sha_padlock_nano); + if (ret < 0) { + gnutls_assert(); + } + } + + ret = + gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA1, + 80, + &_gnutls_sha_padlock_nano); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA224, + 80, + &_gnutls_sha_padlock_nano); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA256, + 80, + &_gnutls_sha_padlock_nano); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA1, + 80, + &_gnutls_hmac_sha_padlock_nano); + if (ret < 0) { + gnutls_assert(); + } + + /* we don't register MAC_SHA224 because it is not used by TLS */ + + ret = + gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA256, + 80, + &_gnutls_hmac_sha_padlock_nano); + if (ret < 0) { + gnutls_assert(); + } + } else if (phe) { + /* Original padlock PHE. Does not support incremental operations. + */ + _gnutls_debug_log + ("Padlock SHA1 and SHA256 accelerator was detected\n"); + ret = + gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA1, + 80, + &_gnutls_sha_padlock); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA256, + 80, + &_gnutls_sha_padlock); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA1, + 80, + &_gnutls_hmac_sha_padlock); + if (ret < 0) { + gnutls_assert(); + } + + ret = + gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA256, + 80, + &_gnutls_hmac_sha_padlock); + if (ret < 0) { + gnutls_assert(); + } + } +#endif + + return; +} +#endif + static unsigned check_intel_or_amd(void) { unsigned int a, b, c, d; @@ -386,186 +569,6 @@ void register_x86_intel_crypto(unsigned capabilities) return; } -static -void register_x86_padlock_crypto(unsigned capabilities) -{ - int ret, phe; - unsigned edx; - - if (check_via() == 0) - return; - - if (capabilities == 0) - edx = padlock_capability(); - else - edx = capabilities_to_via_edx(capabilities); - - if (check_padlock(edx)) { - _gnutls_debug_log - ("Padlock AES accelerator was detected\n"); - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_128_CBC, 80, &_gnutls_aes_padlock); - if (ret < 0) { - gnutls_assert(); - } - - /* register GCM ciphers */ - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_128_GCM, 80, - &_gnutls_aes_gcm_padlock); - if (ret < 0) { - gnutls_assert(); - } -#ifdef HAVE_LIBNETTLE - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_192_CBC, 80, &_gnutls_aes_padlock); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_256_CBC, 80, &_gnutls_aes_padlock); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_cipher_register - (GNUTLS_CIPHER_AES_256_GCM, 80, - &_gnutls_aes_gcm_padlock); - if (ret < 0) { - gnutls_assert(); - } -#endif - } -#ifdef HAVE_LIBNETTLE - phe = check_phe(edx); - - if (phe && check_phe_partial()) { - _gnutls_debug_log - ("Padlock SHA1 and SHA256 (partial) accelerator was detected\n"); - if (check_phe_sha512(edx)) { - _gnutls_debug_log - ("Padlock SHA512 (partial) accelerator was detected\n"); - ret = - gnutls_crypto_single_digest_register - (GNUTLS_DIG_SHA384, 80, - &_gnutls_sha_padlock_nano); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_digest_register - (GNUTLS_DIG_SHA512, 80, - &_gnutls_sha_padlock_nano); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_mac_register - (GNUTLS_MAC_SHA384, 80, - &_gnutls_hmac_sha_padlock_nano); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_mac_register - (GNUTLS_MAC_SHA512, 80, - &_gnutls_hmac_sha_padlock_nano); - if (ret < 0) { - gnutls_assert(); - } - } - - ret = - gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA1, - 80, - &_gnutls_sha_padlock_nano); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA224, - 80, - &_gnutls_sha_padlock_nano); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA256, - 80, - &_gnutls_sha_padlock_nano); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA1, - 80, - &_gnutls_hmac_sha_padlock_nano); - if (ret < 0) { - gnutls_assert(); - } - - /* we don't register MAC_SHA224 because it is not used by TLS */ - - ret = - gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA256, - 80, - &_gnutls_hmac_sha_padlock_nano); - if (ret < 0) { - gnutls_assert(); - } - } else if (phe) { - /* Original padlock PHE. Does not support incremental operations. - */ - _gnutls_debug_log - ("Padlock SHA1 and SHA256 accelerator was detected\n"); - ret = - gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA1, - 80, - &_gnutls_sha_padlock); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_digest_register(GNUTLS_DIG_SHA256, - 80, - &_gnutls_sha_padlock); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA1, - 80, - &_gnutls_hmac_sha_padlock); - if (ret < 0) { - gnutls_assert(); - } - - ret = - gnutls_crypto_single_mac_register(GNUTLS_MAC_SHA256, - 80, - &_gnutls_hmac_sha_padlock); - if (ret < 0) { - gnutls_assert(); - } - } -#endif - - return; -} void register_x86_crypto(void) { @@ -577,6 +580,8 @@ void register_x86_crypto(void) } register_x86_intel_crypto(capabilities); +#ifdef ENABLE_PADLOCK register_x86_padlock_crypto(capabilities); +#endif } |