diff options
author | Sergei Golubchik <serg@mariadb.org> | 2015-03-25 19:35:22 +0100 |
---|---|---|
committer | Sergei Golubchik <serg@mariadb.org> | 2015-04-08 10:58:46 +0200 |
commit | 91f7363e4baff9debe43cf039fe4525c43aee4cc (patch) | |
tree | 4ab3f9cb7cea72501552cfd1bd094dc9bbff612c /mysys_ssl | |
parent | f444d13a3bd861ec86530e22cd861f25e2b350df (diff) | |
download | mariadb-git-91f7363e4baff9debe43cf039fe4525c43aee4cc.tar.gz |
yassl padding
Diffstat (limited to 'mysys_ssl')
-rw-r--r-- | mysys_ssl/my_crypt.cc | 44 |
1 files changed, 33 insertions, 11 deletions
diff --git a/mysys_ssl/my_crypt.cc b/mysys_ssl/my_crypt.cc index 3e8ec854f59..60072a5bbaf 100644 --- a/mysys_ssl/my_crypt.cc +++ b/mysys_ssl/my_crypt.cc @@ -18,9 +18,6 @@ #include <my_global.h> #include <my_crypt.h> -// TODO -// 2. padding - #ifdef HAVE_YASSL #include "aes.hpp" @@ -75,13 +72,12 @@ static int do_crypt(CipherMode cipher, Dir dir, const KeyByte *key, uint8 key_length, const KeyByte *iv, uint8 iv_length, int no_padding) { - int tail= no_padding ? source_length % MY_AES_BLOCK_SIZE : 0; - DBUG_ASSERT(source_length - tail >= MY_AES_BLOCK_SIZE); + int tail= source_length % MY_AES_BLOCK_SIZE; #ifdef HAVE_YASSL TaoCrypt::AES ctx(dir, cipher); - if (key_length != 16 && key_length != 24 && key_length != 32) + if (unlikely(key_length != 16 && key_length != 24 && key_length != 32)) return AES_BAD_KEYSIZE; ctx.SetKey(key, key_length); @@ -93,12 +89,33 @@ static int do_crypt(CipherMode cipher, Dir dir, DBUG_ASSERT(TaoCrypt::AES::BLOCK_SIZE == MY_AES_BLOCK_SIZE); ctx.Process(dest, source, source_length - tail); - *dest_length= source_length; + *dest_length= source_length - tail; + + /* unlike OpenSSL, YaSSL doesn't support PKCS#7 padding */ + if (!no_padding) + { + if (dir == CRYPT_ENCRYPT) + { + uchar buf[MY_AES_BLOCK_SIZE]; + memcpy(buf, source + source_length - tail, tail); + memset(buf + tail, MY_AES_BLOCK_SIZE - tail, MY_AES_BLOCK_SIZE - tail); + ctx.Process(dest + *dest_length, buf, MY_AES_BLOCK_SIZE); + *dest_length+= MY_AES_BLOCK_SIZE; + } + else + { + int n= dest[source_length - 1]; + if (tail || n == 0 || n > MY_AES_BLOCK_SIZE) + return AES_OPENSSL_ERROR; + *dest_length-= n; + } + } + #else // HAVE_OPENSSL int fin; struct MyCTX ctx; - if (!cipher) + if (unlikely(!cipher)) return AES_BAD_KEYSIZE; if (!EVP_CipherInit_ex(&ctx, cipher, NULL, key, iv, dir)) @@ -110,7 +127,9 @@ static int do_crypt(CipherMode cipher, Dir dir, DBUG_ASSERT(EVP_CIPHER_CTX_iv_length(&ctx) == iv_length); DBUG_ASSERT(EVP_CIPHER_CTX_block_size(&ctx) == MY_AES_BLOCK_SIZE || !no_padding); - if (!EVP_CipherUpdate(&ctx, dest, (int*)dest_length, source, source_length - tail)) + /* use built-in OpenSSL padding, if possible */ + if (!EVP_CipherUpdate(&ctx, dest, (int*)dest_length, + source, source_length - (no_padding ? tail : 0))) return AES_OPENSSL_ERROR; if (!EVP_CipherFinal_ex(&ctx, dest + *dest_length, &fin)) return AES_OPENSSL_ERROR; @@ -118,14 +137,17 @@ static int do_crypt(CipherMode cipher, Dir dir, #endif - if (tail) + if (no_padding && tail) { /* - Not much we can do here, block ciphers cannot encrypt data that aren't + Not much we can do, block ciphers cannot encrypt data that aren't a multiple of the block length. At least not without padding. What we do here, we XOR the tail with the previous encrypted block. */ + if (unlikely(source_length < MY_AES_BLOCK_SIZE)) + return AES_OPENSSL_ERROR; + const uchar *s= source + source_length - tail; const uchar *e= source + source_length; uchar *d= dest + source_length - tail; |