summaryrefslogtreecommitdiff
path: root/mysys_ssl
diff options
context:
space:
mode:
authorSergei Golubchik <serg@mariadb.org>2015-09-04 10:32:52 +0200
committerSergei Golubchik <serg@mariadb.org>2015-09-04 10:33:50 +0200
commit66b9a9409c73e298d6ceb668783a7cdd5ee85a69 (patch)
treebe04b2c42d1b858756c5a8ba5355abd961589ec8 /mysys_ssl
parentd94a982adbc21d74c0202f1ef64119baeb27c597 (diff)
downloadmariadb-git-66b9a9409c73e298d6ceb668783a7cdd5ee85a69.tar.gz
New encryption API. Piece-wise encryption.
Instead of encrypt(src, dst, key, iv) that encrypts all data in one go, now we have encrypt_init(key,iv), encrypt_update(src,dst), and encrypt_finish(dst). This also causes collateral changes in the internal my_crypt.cc encryption functions and in the encryption service. There are wrappers to provide the old all-at-once encryption functionality. But binlog events are often written piecewise, they'll need the new api.
Diffstat (limited to 'mysys_ssl')
-rw-r--r--mysys_ssl/my_crypt.cc449
-rw-r--r--mysys_ssl/yassl.cc194
2 files changed, 368 insertions, 275 deletions
diff --git a/mysys_ssl/my_crypt.cc b/mysys_ssl/my_crypt.cc
index dc3c4f63bdb..1e76b34a1fb 100644
--- a/mysys_ssl/my_crypt.cc
+++ b/mysys_ssl/my_crypt.cc
@@ -16,199 +16,115 @@
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#include <my_global.h>
+#include <string.h>
#include <my_crypt.h>
#ifdef HAVE_YASSL
-#include "aes.hpp"
-
-typedef TaoCrypt::CipherDir Dir;
-static const Dir CRYPT_ENCRYPT = TaoCrypt::ENCRYPTION;
-static const Dir CRYPT_DECRYPT = TaoCrypt::DECRYPTION;
-
-typedef TaoCrypt::Mode CipherMode;
-static inline CipherMode aes_ecb(uint) { return TaoCrypt::ECB; }
-static inline CipherMode aes_cbc(uint) { return TaoCrypt::CBC; }
-
-typedef TaoCrypt::byte KeyByte;
-
+#include "yassl.cc"
#else
+
#include <openssl/evp.h>
#include <openssl/aes.h>
#include <openssl/err.h>
-typedef int Dir;
-static const Dir CRYPT_ENCRYPT = 1;
-static const Dir CRYPT_DECRYPT = 0;
-
-typedef const EVP_CIPHER *CipherMode;
-
-#define make_aes_dispatcher(mode) \
- static inline CipherMode aes_ ## mode(uint key_length) \
- { \
- switch (key_length) { \
- case 16: return EVP_aes_128_ ## mode(); \
- case 24: return EVP_aes_192_ ## mode(); \
- case 32: return EVP_aes_256_ ## mode(); \
- default: return 0; \
- } \
- }
-
-make_aes_dispatcher(ecb)
-make_aes_dispatcher(cbc)
-
-typedef uchar KeyByte;
-
-struct MyCTX : EVP_CIPHER_CTX {
- MyCTX() { EVP_CIPHER_CTX_init(this); }
- ~MyCTX() { EVP_CIPHER_CTX_cleanup(this); ERR_remove_state(0); }
-};
#endif
-static int block_crypt(CipherMode cipher, Dir dir,
- const uchar* source, uint source_length,
- uchar* dest, uint* dest_length,
- const KeyByte *key, uint key_length,
- const KeyByte *iv, uint iv_length, int no_padding)
+class MyCTX
{
- int tail= source_length % MY_AES_BLOCK_SIZE;
+public:
+ EVP_CIPHER_CTX ctx;
+ MyCTX() { EVP_CIPHER_CTX_init(&ctx); }
+ virtual ~MyCTX() { EVP_CIPHER_CTX_cleanup(&ctx); ERR_remove_state(0); }
- if (likely(source_length >= MY_AES_BLOCK_SIZE || !no_padding))
+ virtual int init(const EVP_CIPHER *cipher, int encrypt, const uchar *key,
+ uint klen, const uchar *iv, uint ivlen)
{
-#ifdef HAVE_YASSL
- TaoCrypt::AES ctx(dir, cipher);
-
- if (unlikely(key_length != 16 && key_length != 24 && key_length != 32))
- return MY_AES_BAD_KEYSIZE;
-
- ctx.SetKey(key, key_length);
- if (iv)
- {
- ctx.SetIV(iv);
- DBUG_ASSERT(TaoCrypt::AES::BLOCK_SIZE <= iv_length);
- }
- DBUG_ASSERT(TaoCrypt::AES::BLOCK_SIZE == MY_AES_BLOCK_SIZE);
-
- ctx.Process(dest, source, source_length - tail);
- *dest_length= source_length - tail;
-
- /* unlike OpenSSL, YaSSL doesn't support PKCS#7 padding */
- if (!no_padding)
- {
- if (dir == CRYPT_ENCRYPT)
- {
- uchar buf[MY_AES_BLOCK_SIZE];
- memcpy(buf, source + source_length - tail, tail);
- memset(buf + tail, MY_AES_BLOCK_SIZE - tail, MY_AES_BLOCK_SIZE - tail);
- ctx.Process(dest + *dest_length, buf, MY_AES_BLOCK_SIZE);
- *dest_length+= MY_AES_BLOCK_SIZE;
- }
- else
- {
- int n= source_length ? dest[source_length - 1] : 0;
- if (tail || n == 0 || n > MY_AES_BLOCK_SIZE)
- return MY_AES_BAD_DATA;
- *dest_length-= n;
- }
- }
-
-#else // HAVE_OPENSSL
- int fin;
- struct MyCTX ctx;
-
if (unlikely(!cipher))
return MY_AES_BAD_KEYSIZE;
- if (!EVP_CipherInit_ex(&ctx, cipher, NULL, key, iv, dir))
+ if (!EVP_CipherInit_ex(&ctx, cipher, NULL, key, iv, encrypt))
return MY_AES_OPENSSL_ERROR;
- EVP_CIPHER_CTX_set_padding(&ctx, !no_padding);
+ DBUG_ASSERT(EVP_CIPHER_CTX_key_length(&ctx) == (int)klen);
+ DBUG_ASSERT(EVP_CIPHER_CTX_iv_length(&ctx) <= (int)ivlen);
- DBUG_ASSERT(EVP_CIPHER_CTX_key_length(&ctx) == (int)key_length);
- DBUG_ASSERT(EVP_CIPHER_CTX_iv_length(&ctx) <= (int)iv_length);
- DBUG_ASSERT(EVP_CIPHER_CTX_block_size(&ctx) == MY_AES_BLOCK_SIZE);
-
- /* use built-in OpenSSL padding, if possible */
- if (!EVP_CipherUpdate(&ctx, dest, (int*)dest_length,
- source, source_length - (no_padding ? tail : 0)))
+ return MY_AES_OK;
+ }
+ virtual int update(const uchar *src, uint slen, uchar *dst, uint *dlen)
+ {
+ if (!EVP_CipherUpdate(&ctx, dst, (int*)dlen, src, slen))
return MY_AES_OPENSSL_ERROR;
- if (!EVP_CipherFinal_ex(&ctx, dest + *dest_length, &fin))
+ return MY_AES_OK;
+ }
+ virtual int finish(uchar *dst, uint *dlen)
+ {
+ if (!EVP_CipherFinal_ex(&ctx, dst, (int*)dlen))
return MY_AES_BAD_DATA;
- *dest_length += fin;
+ return MY_AES_OK;
+ }
+};
-#endif
+class MyCTX_nopad : public MyCTX
+{
+public:
+ const uchar *key;
+ int klen;
+
+ MyCTX_nopad() : MyCTX() { }
+ ~MyCTX_nopad() { }
+
+ int init(const EVP_CIPHER *cipher, int encrypt, const uchar *key, uint klen,
+ const uchar *iv, uint ivlen)
+ {
+ compile_time_assert(MY_AES_CTX_SIZE >= sizeof(MyCTX_nopad));
+ this->key= key;
+ this->klen= klen;
+ int res= MyCTX::init(cipher, encrypt, key, klen, iv, ivlen);
+ memcpy(ctx.oiv, iv, ivlen); // in ECB mode OpenSSL doesn't do that itself
+ EVP_CIPHER_CTX_set_padding(&ctx, 0);
+ return res;
}
- if (no_padding)
+ int finish(uchar *dst, uint *dlen)
{
- if (tail)
+ if (ctx.buf_len)
{
/*
Not much we can do, block ciphers cannot encrypt data that aren't
a multiple of the block length. At least not without padding.
Let's do something CTR-like for the last partial block.
*/
-
uchar mask[MY_AES_BLOCK_SIZE];
uint mlen;
- DBUG_ASSERT(iv_length >= sizeof(mask));
- my_aes_encrypt_ecb(iv, sizeof(mask), mask, &mlen,
- key, key_length, 0, 0, 1);
+ my_aes_crypt(MY_AES_ECB, ENCRYPTION_FLAG_ENCRYPT | ENCRYPTION_FLAG_NOPAD,
+ ctx.oiv, sizeof(mask), mask, &mlen, key, klen, 0, 0);
DBUG_ASSERT(mlen == sizeof(mask));
- const uchar *s= source + source_length - tail;
- const uchar *e= source + source_length;
- uchar *d= dest + source_length - tail;
- const uchar *m= mask;
- while (s < e)
- *d++ = *s++ ^ *m++;
+ for (int i=0; i < ctx.buf_len; i++)
+ dst[i]= ctx.buf[i] ^ mask[i];
}
- *dest_length= source_length;
+ *dlen= ctx.buf_len;
+ return MY_AES_OK;
}
+};
- return MY_AES_OK;
-}
-
-C_MODE_START
+#define make_aes_dispatcher(mode) \
+ static inline const EVP_CIPHER *aes_ ## mode(uint klen) \
+ { \
+ switch (klen) { \
+ case 16: return EVP_aes_128_ ## mode(); \
+ case 24: return EVP_aes_192_ ## mode(); \
+ case 32: return EVP_aes_256_ ## mode(); \
+ default: return 0; \
+ } \
+ }
+make_aes_dispatcher(ecb)
+make_aes_dispatcher(cbc)
#ifdef HAVE_EncryptAes128Ctr
make_aes_dispatcher(ctr)
-
-/*
- special simplified implementation for CTR, because it's a stream cipher
- (doesn't need padding, always encrypts the specified number of bytes), and
- because encrypting and decrypting code is exactly the same (courtesy of XOR)
-*/
-int my_aes_encrypt_ctr(const uchar* source, uint source_length,
- uchar* dest, uint* dest_length,
- const uchar* key, uint key_length,
- const uchar* iv, uint iv_length)
-{
- CipherMode cipher= aes_ctr(key_length);
- struct MyCTX ctx;
- int fin __attribute__((unused));
-
- if (unlikely(!cipher))
- return MY_AES_BAD_KEYSIZE;
-
- if (!EVP_CipherInit_ex(&ctx, cipher, NULL, key, iv, CRYPT_ENCRYPT))
- return MY_AES_OPENSSL_ERROR;
-
- DBUG_ASSERT(EVP_CIPHER_CTX_key_length(&ctx) == (int)key_length);
- DBUG_ASSERT(EVP_CIPHER_CTX_iv_length(&ctx) <= (int)iv_length);
- DBUG_ASSERT(EVP_CIPHER_CTX_block_size(&ctx) == 1);
-
- if (!EVP_CipherUpdate(&ctx, dest, (int*)dest_length, source, source_length))
- return MY_AES_OPENSSL_ERROR;
-
- DBUG_ASSERT(EVP_CipherFinal_ex(&ctx, dest + *dest_length, &fin));
- DBUG_ASSERT(fin == 0);
-
- return MY_AES_OK;
-}
-
#endif /* HAVE_EncryptAes128Ctr */
-
#ifdef HAVE_EncryptAes128Gcm
make_aes_dispatcher(gcm)
@@ -218,145 +134,146 @@ make_aes_dispatcher(gcm)
- IV tail (over 12 bytes) goes to AAD
- the tag is appended to the ciphertext
*/
-int do_gcm(const uchar* source, uint source_length,
- uchar* dest, uint* dest_length,
- const uchar* key, uint key_length,
- const uchar* iv, uint iv_length, Dir dir)
-{
- CipherMode cipher= aes_gcm(key_length);
- struct MyCTX ctx;
- int fin;
- uint real_iv_length;
-
- if (unlikely(!cipher))
- return MY_AES_BAD_KEYSIZE;
-
- if (!EVP_CipherInit_ex(&ctx, cipher, NULL, key, iv, dir))
- return MY_AES_OPENSSL_ERROR;
- real_iv_length= EVP_CIPHER_CTX_iv_length(&ctx);
-
- DBUG_ASSERT(EVP_CIPHER_CTX_key_length(&ctx) == (int)key_length);
- DBUG_ASSERT(real_iv_length <= iv_length);
- DBUG_ASSERT(EVP_CIPHER_CTX_block_size(&ctx) == 1);
-
- if (dir == CRYPT_DECRYPT)
+class MyCTX_gcm : public MyCTX
+{
+public:
+ const uchar *aad;
+ int aadlen;
+ MyCTX_gcm() : MyCTX() { }
+ ~MyCTX_gcm() { }
+
+ int init(const EVP_CIPHER *cipher, int encrypt, const uchar *key, uint klen,
+ const uchar *iv, uint ivlen)
{
- source_length-= MY_AES_BLOCK_SIZE;
- if(!EVP_CIPHER_CTX_ctrl(&ctx, EVP_CTRL_GCM_SET_TAG, MY_AES_BLOCK_SIZE,
- (void*)(source + source_length)))
- return MY_AES_OPENSSL_ERROR;
+ compile_time_assert(MY_AES_CTX_SIZE >= sizeof(MyCTX_gcm));
+ int res= MyCTX::init(cipher, encrypt, key, klen, iv, ivlen);
+ int real_ivlen= EVP_CIPHER_CTX_iv_length(&ctx);
+ aad= iv + real_ivlen;
+ aadlen= ivlen - real_ivlen;
+ return res;
}
- if (real_iv_length < iv_length)
+ int update(const uchar *src, uint slen, uchar *dst, uint *dlen)
{
- if (!EVP_CipherUpdate(&ctx, NULL, &fin,
- iv + real_iv_length, iv_length - real_iv_length))
+ /*
+ note that this GCM class cannot do streaming decryption, because
+ it needs the tag (which is located at the end of encrypted data)
+ before decrypting the data. it can encrypt data piecewise, like, first
+ half, then the second half, but it must decrypt all at once
+ */
+ if (!ctx.encrypt)
+ {
+ slen-= MY_AES_BLOCK_SIZE;
+ if(!EVP_CIPHER_CTX_ctrl(&ctx, EVP_CTRL_GCM_SET_TAG, MY_AES_BLOCK_SIZE,
+ (void*)(src + slen)))
+ return MY_AES_OPENSSL_ERROR;
+ }
+ int unused;
+ if (aadlen && !EVP_CipherUpdate(&ctx, NULL, &unused, aad, aadlen))
return MY_AES_OPENSSL_ERROR;
+ aadlen= 0;
+ return MyCTX::update(src, slen, dst, dlen);
}
- if (!EVP_CipherUpdate(&ctx, dest, (int*)dest_length, source, source_length))
- return MY_AES_OPENSSL_ERROR;
-
- if (!EVP_CipherFinal_ex(&ctx, dest + *dest_length, &fin))
- return MY_AES_BAD_DATA;
- DBUG_ASSERT(fin == 0);
-
- if (dir == CRYPT_ENCRYPT)
+ int finish(uchar *dst, uint *dlen)
{
- if(!EVP_CIPHER_CTX_ctrl(&ctx, EVP_CTRL_GCM_GET_TAG, MY_AES_BLOCK_SIZE,
- dest + *dest_length))
- return MY_AES_OPENSSL_ERROR;
- *dest_length+= MY_AES_BLOCK_SIZE;
- }
-
- return MY_AES_OK;
-}
+ int fin;
+ if (!EVP_CipherFinal_ex(&ctx, dst, &fin))
+ return MY_AES_BAD_DATA;
+ DBUG_ASSERT(fin == 0);
-int my_aes_encrypt_gcm(const uchar* source, uint source_length,
- uchar* dest, uint* dest_length,
- const uchar* key, uint key_length,
- const uchar* iv, uint iv_length)
-{
- return do_gcm(source, source_length, dest, dest_length,
- key, key_length, iv, iv_length, CRYPT_ENCRYPT);
-}
+ if (ctx.encrypt)
+ {
+ if(!EVP_CIPHER_CTX_ctrl(&ctx, EVP_CTRL_GCM_GET_TAG, MY_AES_BLOCK_SIZE, dst))
+ return MY_AES_OPENSSL_ERROR;
+ *dlen= MY_AES_BLOCK_SIZE;
+ }
+ else
+ *dlen= 0;
+ return MY_AES_OK;
+ }
+};
-int my_aes_decrypt_gcm(const uchar* source, uint source_length,
- uchar* dest, uint* dest_length,
- const uchar* key, uint key_length,
- const uchar* iv, uint iv_length)
-{
- return do_gcm(source, source_length, dest, dest_length,
- key, key_length, iv, iv_length, CRYPT_DECRYPT);
-}
+#endif
+const EVP_CIPHER *(*ciphers[])(uint)= {
+ aes_ecb, aes_cbc
+#ifdef HAVE_EncryptAes128Ctr
+ , aes_ctr
+#ifdef HAVE_EncryptAes128Gcm
+ , aes_gcm
+#endif
#endif
+};
+
+extern "C" {
-int my_aes_encrypt_ecb(const uchar* source, uint source_length,
- uchar* dest, uint* dest_length,
- const uchar* key, uint key_length,
- const uchar* iv, uint iv_length,
- int no_padding)
+int my_aes_crypt_init(void *ctx, enum my_aes_mode mode, int flags,
+ const unsigned char* key, unsigned int klen,
+ const unsigned char* iv, unsigned int ivlen)
{
- return block_crypt(aes_ecb(key_length), CRYPT_ENCRYPT, source, source_length,
- dest, dest_length, key, key_length, iv, iv_length, no_padding);
+#ifdef HAVE_EncryptAes128Ctr
+#ifdef HAVE_EncryptAes128Gcm
+ if (mode == MY_AES_GCM)
+ if (flags & ENCRYPTION_FLAG_NOPAD)
+ return MY_AES_OPENSSL_ERROR;
+ else
+ new (ctx) MyCTX_gcm();
+ else
+#endif
+ if (mode == MY_AES_CTR)
+ new (ctx) MyCTX();
+ else
+#endif
+ if (flags & ENCRYPTION_FLAG_NOPAD)
+ new (ctx) MyCTX_nopad();
+ else
+ new (ctx) MyCTX();
+ return ((MyCTX*)ctx)->init(ciphers[mode](klen), flags & 1,
+ key, klen, iv, ivlen);
}
-int my_aes_decrypt_ecb(const uchar* source, uint source_length,
- uchar* dest, uint* dest_length,
- const uchar* key, uint key_length,
- const uchar* iv, uint iv_length,
- int no_padding)
+int my_aes_crypt_update(void *ctx, const uchar *src, uint slen,
+ uchar *dst, uint *dlen)
{
- return block_crypt(aes_ecb(key_length), CRYPT_DECRYPT, source, source_length,
- dest, dest_length, key, key_length, iv, iv_length, no_padding);
+ return ((MyCTX*)ctx)->update(src, slen, dst, dlen);
}
-int my_aes_encrypt_cbc(const uchar* source, uint source_length,
- uchar* dest, uint* dest_length,
- const uchar* key, uint key_length,
- const uchar* iv, uint iv_length,
- int no_padding)
+int my_aes_crypt_finish(void *ctx, uchar *dst, uint *dlen)
{
- return block_crypt(aes_cbc(key_length), CRYPT_ENCRYPT, source, source_length,
- dest, dest_length, key, key_length, iv, iv_length, no_padding);
+ int res= ((MyCTX*)ctx)->finish(dst, dlen);
+ ((MyCTX*)ctx)->~MyCTX();
+ return res;
}
-int my_aes_decrypt_cbc(const uchar* source, uint source_length,
- uchar* dest, uint* dest_length,
- const uchar* key, uint key_length,
- const uchar* iv, uint iv_length,
- int no_padding)
+int my_aes_crypt(enum my_aes_mode mode, int flags,
+ const uchar *src, uint slen, uchar *dst, uint *dlen,
+ const uchar *key, uint klen, const uchar *iv, uint ivlen)
{
- return block_crypt(aes_cbc(key_length), CRYPT_DECRYPT, source, source_length,
- dest, dest_length, key, key_length, iv, iv_length, no_padding);
+ void *ctx= alloca(MY_AES_CTX_SIZE);
+ int res1, res2;
+ uint d1, d2;
+ if ((res1= my_aes_crypt_init(ctx, mode, flags, key, klen, iv, ivlen)))
+ return res1;
+ res1= my_aes_crypt_update(ctx, src, slen, dst, &d1);
+ res2= my_aes_crypt_finish(ctx, dst + d1, &d2);
+ *dlen= d1 + d2;
+ return res1 ? res1 : res2;
}
-C_MODE_END
-
-#if defined(HAVE_YASSL)
-
+#ifdef HAVE_YASSL
#include <random.hpp>
-
-C_MODE_START
-
int my_random_bytes(uchar* buf, int num)
{
TaoCrypt::RandomNumberGenerator rand;
rand.GenerateBlock((TaoCrypt::byte*) buf, num);
return MY_AES_OK;
}
-
-C_MODE_END
-
-#else /* OpenSSL */
-
+#else
#include <openssl/rand.h>
-C_MODE_START
-
-int my_random_bytes(uchar* buf, int num)
+int my_random_bytes(uchar *buf, int num)
{
/*
Unfortunately RAND_bytes manual page does not provide any guarantees
@@ -364,30 +281,12 @@ int my_random_bytes(uchar* buf, int num)
instead of whatever random engine is currently set in OpenSSL. That way
we are guaranteed to have a non-blocking random.
*/
- RAND_METHOD* rand = RAND_SSLeay();
+ RAND_METHOD *rand = RAND_SSLeay();
if (rand == NULL || rand->bytes(buf, num) != 1)
return MY_AES_OPENSSL_ERROR;
return MY_AES_OK;
}
+#endif
-C_MODE_END
-#endif /* HAVE_YASSL */
-
-/**
- Get size of buffer which will be large enough for encrypted data
-
- The buffer should be sufficiently large to fit encrypted data
- independently from the encryption algorithm and mode. With padding up to
- MY_AES_BLOCK_SIZE bytes can be added. With GCM, exactly MY_AES_BLOCK_SIZE
- bytes are added.
-
- The actual length of the encrypted data is returned from the encryption
- function (e.g. from my_aes_encrypt_cbc).
-
- @return required buffer size
-*/
-
-uint my_aes_get_size(uint source_length)
-{
- return source_length + MY_AES_BLOCK_SIZE;
}
+
diff --git a/mysys_ssl/yassl.cc b/mysys_ssl/yassl.cc
new file mode 100644
index 00000000000..9717870fe26
--- /dev/null
+++ b/mysys_ssl/yassl.cc
@@ -0,0 +1,194 @@
+/*
+ Copyright (c) 2015 MariaDB Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+/*
+ The very minimal subset of OpenSSL's EVP* functions.
+ Just enough for my_crypt.cc to work.
+
+ On the other hand, where it has to implement OpenSSL functionality,
+ it tries to be compatible (e.g. same flags and struct member names).
+*/
+
+#include <openssl/ssl.h>
+#include "aes.hpp"
+
+using yaSSL::yaERR_remove_state;
+
+#define EVP_CIPH_ECB_MODE 0x1
+#define EVP_CIPH_CBC_MODE 0x2
+#define EVP_CIPH_NO_PADDING 0x100
+
+/*
+ note that TaoCrypt::AES object is not explicitly put into EVP_CIPHER_CTX.
+ That's because we need to control when TaoCrypt::AES constructor and
+ destructor are called.
+*/
+typedef struct
+{
+ ulong flags;
+ int encrypt;
+ int key_len;
+ int buf_len;
+ int final_used;
+ uchar tao_buf[sizeof(TaoCrypt::AES)]; // TaoCrypt::AES object
+ uchar oiv[TaoCrypt::AES::BLOCK_SIZE]; // original IV
+ uchar buf[TaoCrypt::AES::BLOCK_SIZE]; // last partial input block
+ uchar final[TaoCrypt::AES::BLOCK_SIZE]; // last decrypted (output) block
+} EVP_CIPHER_CTX;
+
+typedef struct {
+ TaoCrypt::Mode mode;
+ TaoCrypt::word32 key_len;
+} EVP_CIPHER;
+
+#define gen_cipher(mode, MODE, len) \
+ static const EVP_CIPHER *EVP_aes_ ## len ## _ ## mode() \
+ { static const EVP_CIPHER c={TaoCrypt::MODE, len/8}; return &c; }
+
+gen_cipher(ecb,ECB,128)
+gen_cipher(ecb,ECB,192)
+gen_cipher(ecb,ECB,256)
+gen_cipher(cbc,CBC,128)
+gen_cipher(cbc,CBC,192)
+gen_cipher(cbc,CBC,256)
+
+static inline TaoCrypt::AES *TAO(EVP_CIPHER_CTX *ctx)
+{
+ return (TaoCrypt::AES *)(ctx->tao_buf);
+}
+
+static void EVP_CIPHER_CTX_init(EVP_CIPHER_CTX *ctx)
+{
+ ctx->final_used= ctx->buf_len= ctx->flags= 0;
+}
+
+static int EVP_CIPHER_CTX_cleanup(EVP_CIPHER_CTX *ctx)
+{
+ TAO(ctx)->~AES();
+ return 1;
+}
+
+static int EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *ctx, int pad)
+{
+ if (pad)
+ ctx->flags&= ~EVP_CIPH_NO_PADDING;
+ else
+ ctx->flags|= EVP_CIPH_NO_PADDING;
+ return 1;
+}
+
+static int EVP_CipherInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher,
+ void *, const uchar *key, const uchar *iv, int enc)
+{
+ new (ctx->tao_buf) TaoCrypt::AES(enc ? TaoCrypt::ENCRYPTION
+ : TaoCrypt::DECRYPTION, cipher->mode);
+ TAO(ctx)->SetKey(key, cipher->key_len);
+ if (iv)
+ {
+ TAO(ctx)->SetIV(iv);
+ memcpy(ctx->oiv, iv, TaoCrypt::AES::BLOCK_SIZE);
+ }
+ ctx->encrypt= enc;
+ ctx->key_len= cipher->key_len;
+ ctx->flags|= cipher->mode == TaoCrypt::CBC ? EVP_CIPH_CBC_MODE : EVP_CIPH_ECB_MODE;
+ return 1;
+}
+
+static int EVP_CIPHER_CTX_key_length(const EVP_CIPHER_CTX *ctx)
+{
+ return ctx->key_len;
+}
+
+static int EVP_CIPHER_CTX_iv_length(const EVP_CIPHER_CTX *ctx)
+{
+ return ctx->flags & EVP_CIPH_ECB_MODE ? 0 : TaoCrypt::AES::BLOCK_SIZE;
+}
+
+static void do_whole_blocks(EVP_CIPHER_CTX *ctx, uchar *out, int *outl,
+ const uchar *in, int inl)
+{
+ DBUG_ASSERT(inl);
+ DBUG_ASSERT(inl % TaoCrypt::AES::BLOCK_SIZE == 0);
+ if (ctx->encrypt || (ctx->flags & EVP_CIPH_NO_PADDING))
+ {
+ TAO(ctx)->Process(out, in, inl);
+ *outl+= inl;
+ return;
+ }
+ /* 'final' is only needed when decrypting with padding */
+ if (ctx->final_used)
+ {
+ memcpy(out, ctx->final, TaoCrypt::AES::BLOCK_SIZE);
+ *outl+= TaoCrypt::AES::BLOCK_SIZE;
+ out+= TaoCrypt::AES::BLOCK_SIZE;
+ }
+ inl-= TaoCrypt::AES::BLOCK_SIZE;
+ TAO(ctx)->Process(out, in, inl);
+ *outl+= inl;
+ TAO(ctx)->Process(ctx->final, in + inl, TaoCrypt::AES::BLOCK_SIZE);
+ ctx->final_used= 1;
+}
+
+static int EVP_CipherUpdate(EVP_CIPHER_CTX *ctx, uchar *out, int *outl,
+ const uchar *in, int inl)
+{
+ *outl= 0;
+ if (ctx->buf_len)
+ {
+ int prefixl= TaoCrypt::AES::BLOCK_SIZE - ctx->buf_len;
+ if (prefixl > inl)
+ {
+ memcpy(ctx->buf + ctx->buf_len, in, inl);
+ ctx->buf_len+= inl;
+ return 1;
+ }
+ memcpy(ctx->buf + ctx->buf_len, in, prefixl);
+ do_whole_blocks(ctx, out, outl, ctx->buf, TaoCrypt::AES::BLOCK_SIZE);
+ in+= prefixl;
+ inl-= prefixl;
+ out+= *outl;
+ }
+ ctx->buf_len= inl % TaoCrypt::AES::BLOCK_SIZE;
+ inl-= ctx->buf_len;
+ memcpy(ctx->buf, in + inl, ctx->buf_len);
+ if (inl)
+ do_whole_blocks(ctx, out, outl, in, inl);
+ return 1;
+}
+
+static int EVP_CipherFinal_ex(EVP_CIPHER_CTX *ctx, uchar *out, int *outl)
+{
+ if (ctx->flags & EVP_CIPH_NO_PADDING)
+ return ctx->buf_len == 0;
+
+ // PKCS#7 padding
+ *outl= 0;
+ if (ctx->encrypt)
+ {
+ int v= TaoCrypt::AES::BLOCK_SIZE - ctx->buf_len;
+ memset(ctx->buf + ctx->buf_len, v, v);
+ do_whole_blocks(ctx, out, outl, ctx->buf, TaoCrypt::AES::BLOCK_SIZE);
+ return 1;
+ }
+ int n= ctx->final[TaoCrypt::AES::BLOCK_SIZE - 1];
+ if (ctx->buf_len || !ctx->final_used ||
+ n < 1 || n > TaoCrypt::AES::BLOCK_SIZE)
+ return 0;
+ *outl= TaoCrypt::AES::BLOCK_SIZE - n;
+ memcpy(out, ctx->final, *outl);
+ return 1;
+}
+