summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristian Grothoff <christian@grothoff.org>2013-04-04 16:12:16 +0200
committerWerner Koch <wk@gnupg.org>2013-04-04 16:50:45 +0200
commit855b1a8f81b5a3b5b31d0c3c303675425f58a5af (patch)
tree392592213aa025cc184166dae23bfca4494cdd13
parent02e8344d3803b80b74bc9b56d718304a6588bc14 (diff)
downloadlibgcrypt-855b1a8f81b5a3b5b31d0c3c303675425f58a5af.tar.gz
Add the SCRYPT KDF function
* scrypt.c, scrypt.h: New files. * memxor.c, memxor.h: New files. * cipher/Makefile.am: Add new files. * cipher/kdf.c (gcry_kdf_derive): Support GCRY_KDF_SCRYPT. * src/gcrypt.h.in (GCRY_KDF_SCRYPT): New. -- Signed-off-by: Christian Grothoff <christian@grothoff.org> I added the ChangeLog entry and the missing signed-off line. Signed-off-by: Werner Koch <wk@gnupg.org>
-rw-r--r--cipher/Makefile.am2
-rw-r--r--cipher/kdf.c4
-rw-r--r--cipher/memxor.c326
-rw-r--r--cipher/memxor.h21
-rw-r--r--cipher/scrypt.c284
-rw-r--r--cipher/scrypt.h66
-rw-r--r--doc/gcrypt.texi6
-rw-r--r--src/gcrypt.h.in3
8 files changed, 710 insertions, 2 deletions
diff --git a/cipher/Makefile.am b/cipher/Makefile.am
index fcb9be52..5b016f05 100644
--- a/cipher/Makefile.am
+++ b/cipher/Makefile.am
@@ -39,7 +39,7 @@ libcipher_la_LIBADD = $(GCRYPT_MODULES)
libcipher_la_SOURCES = \
cipher.c cipher-internal.h \
cipher-cbc.c cipher-cfb.c cipher-ofb.c cipher-ctr.c cipher-aeswrap.c \
-pubkey.c md.c kdf.c \
+pubkey.c md.c kdf.c scrypt.c memxor.c \
hmac-tests.c \
bithelp.h \
bufhelp.h \
diff --git a/cipher/kdf.c b/cipher/kdf.c
index 46e8550d..4ea0fb29 100644
--- a/cipher/kdf.c
+++ b/cipher/kdf.c
@@ -26,6 +26,7 @@
#include "g10lib.h"
#include "cipher.h"
#include "ath.h"
+#include "scrypt.h"
/* Transform a passphrase into a suitable key of length KEYSIZE and
@@ -267,6 +268,9 @@ gcry_kdf_derive (const void *passphrase, size_t passphraselen,
ec = pkdf2 (passphrase, passphraselen, subalgo,
salt, saltlen, iterations, keysize, keybuffer);
break;
+ case GCRY_KDF_SCRYPT:
+ ec = scrypt (passphrase, passphraselen, subalgo,
+ salt, saltlen, iterations, keysize, keybuffer);
default:
ec = GPG_ERR_UNKNOWN_ALGORITHM;
diff --git a/cipher/memxor.c b/cipher/memxor.c
new file mode 100644
index 00000000..74307f0c
--- /dev/null
+++ b/cipher/memxor.c
@@ -0,0 +1,326 @@
+/* memxor.c
+ *
+ *
+ * This file is part of Libgcrypt.
+ * Adapted from the nettle, low-level cryptographics library for
+ * libgcrypt by Christian Grothoff; original license:
+ */
+
+/*
+ * Copyright (C) 1991, 1993, 1995 Free Software Foundation, Inc.
+ * Copyright (C) 2010 Niels Möller
+ *
+ * The nettle library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at your
+ * option) any later version.
+ *
+ * The nettle library is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with the nettle library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02111-1301, USA.
+ */
+
+/* Implementation inspired by memcmp in glibc, contributed to the FSF
+ by Torbjorn Granlund.
+ */
+
+#include <config.h>
+#include <limits.h>
+#include <stdint.h>
+#include "memxor.h"
+
+typedef unsigned long int word_t;
+
+// FIXME: need configure test for this hack...
+#define SIZEOF_LONG 8
+
+#if SIZEOF_LONG & (SIZEOF_LONG - 1)
+#error Word size must be a power of two
+#endif
+
+#define ALIGN_OFFSET(p) ((uintptr_t) (p) % sizeof(word_t))
+
+#ifndef WORDS_BIGENDIAN
+#define MERGE(w0, sh_1, w1, sh_2) (((w0) >> (sh_1)) | ((w1) << (sh_2)))
+#else
+#define MERGE(w0, sh_1, w1, sh_2) (((w0) << (sh_1)) | ((w1) >> (sh_2)))
+#endif
+
+#define WORD_T_THRESH 16
+
+/* XOR word-aligned areas. n is the number of words, not bytes. */
+static void
+memxor_common_alignment (word_t *dst, const word_t *src, size_t n)
+{
+ /* FIXME: Require n > 0? */
+ /* FIXME: Unroll four times, like memcmp? Probably not worth the
+ effort. */
+
+ if (n & 1)
+ {
+ *dst++ ^= *src++;
+ n--;
+ }
+ for (; n >= 2; dst += 2, src += 2, n -= 2)
+ {
+ dst[0] ^= src[0];
+ dst[1] ^= src[1];
+ }
+}
+
+/* XOR *un-aligned* src-area onto aligned dst area. n is number of
+ words, not bytes. Assumes we can read complete words at the start
+ and end of the src operand. */
+static void
+memxor_different_alignment (word_t *dst, const uint8_t *src, size_t n)
+{
+ size_t i;
+ int shl, shr;
+ const word_t *src_word;
+ unsigned offset = ALIGN_OFFSET (src);
+ word_t s0, s1;
+
+ shl = CHAR_BIT * offset;
+ shr = CHAR_BIT * (sizeof(word_t) - offset);
+
+ src_word = (const word_t *) ((uintptr_t) src & -SIZEOF_LONG);
+
+ /* FIXME: Unroll four times, like memcmp? */
+ i = n & 1;
+ s0 = src_word[i];
+ if (i)
+ {
+ s1 = src_word[0];
+ dst[0] ^= MERGE (s1, shl, s0, shr);
+ }
+
+ for (; i < n; i += 2)
+ {
+ s1 = src_word[i+1];
+ dst[i] ^= MERGE(s0, shl, s1, shr);
+ s0 = src_word[i+2];
+ dst[i+1] ^= MERGE(s1, shl, s0, shr);
+ }
+}
+
+/* Performance, Intel SU1400 (x86_64): 0.25 cycles/byte aligned, 0.45
+ cycles/byte unaligned. */
+
+/* XOR LEN bytes starting at SRCADDR onto DESTADDR. Result undefined
+ if the source overlaps with the destination. Return DESTADDR. */
+uint8_t *
+memxor(uint8_t *dst, const uint8_t *src, size_t n)
+{
+ uint8_t *orig_dst = dst;
+
+ if (n >= WORD_T_THRESH)
+ {
+ /* There are at least some bytes to compare. No need to test
+ for N == 0 in this alignment loop. */
+ while (ALIGN_OFFSET (dst))
+ {
+ *dst++ ^= *src++;
+ n--;
+ }
+ if (ALIGN_OFFSET (src))
+ memxor_different_alignment ((word_t *) dst, src, n / sizeof(word_t));
+ else
+ memxor_common_alignment ((word_t *) dst, (const word_t *) src, n / sizeof(word_t));
+
+ dst += n & -SIZEOF_LONG;
+ src += n & -SIZEOF_LONG;
+ n = n & (SIZEOF_LONG - 1);
+ }
+ for (; n > 0; n--)
+ *dst++ ^= *src++;
+
+ return orig_dst;
+}
+
+
+/* XOR word-aligned areas. n is the number of words, not bytes. */
+static void
+memxor3_common_alignment (word_t *dst,
+ const word_t *a, const word_t *b, size_t n)
+{
+ /* FIXME: Require n > 0? */
+ while (n-- > 0)
+ dst[n] = a[n] ^ b[n];
+}
+
+static void
+memxor3_different_alignment_b (word_t *dst,
+ const word_t *a, const uint8_t *b, unsigned offset, size_t n)
+{
+ int shl, shr;
+ const word_t *b_word;
+
+ word_t s0, s1;
+
+ shl = CHAR_BIT * offset;
+ shr = CHAR_BIT * (sizeof(word_t) - offset);
+
+ b_word = (const word_t *) ((uintptr_t) b & -SIZEOF_LONG);
+
+ if (n & 1)
+ {
+ n--;
+ s1 = b_word[n];
+ s0 = b_word[n+1];
+ dst[n] = a[n] ^ MERGE (s1, shl, s0, shr);
+ }
+ else
+ s1 = b_word[n];
+
+ while (n > 0)
+ {
+ n -= 2;
+ s0 = b_word[n+1];
+ dst[n+1] = a[n+1] ^ MERGE(s0, shl, s1, shr);
+ s1 = b_word[n];
+ dst[n] = a[n] ^ MERGE(s1, shl, s0, shr);
+ }
+}
+
+static void
+memxor3_different_alignment_ab (word_t *dst,
+ const uint8_t *a, const uint8_t *b,
+ unsigned offset, size_t n)
+{
+ int shl, shr;
+ const word_t *a_word;
+ const word_t *b_word;
+
+ word_t s0, s1;
+
+ shl = CHAR_BIT * offset;
+ shr = CHAR_BIT * (sizeof(word_t) - offset);
+
+ a_word = (const word_t *) ((uintptr_t) a & -SIZEOF_LONG);
+ b_word = (const word_t *) ((uintptr_t) b & -SIZEOF_LONG);
+
+ if (n & 1)
+ {
+ n--;
+ s1 = a_word[n] ^ b_word[n];
+ s0 = a_word[n+1] ^ b_word[n+1];
+ dst[n] = MERGE (s1, shl, s0, shr);
+ }
+ else
+ s1 = a_word[n] ^ b_word[n];
+
+ while (n > 0)
+ {
+ n -= 2;
+ s0 = a_word[n+1] ^ b_word[n+1];
+ dst[n+1] = MERGE(s0, shl, s1, shr);
+ s1 = a_word[n] ^ b_word[n];
+ dst[n] = MERGE(s1, shl, s0, shr);
+ }
+}
+
+static void
+memxor3_different_alignment_all (word_t *dst,
+ const uint8_t *a, const uint8_t *b,
+ unsigned a_offset, unsigned b_offset,
+ size_t n)
+{
+ int al, ar, bl, br;
+ const word_t *a_word;
+ const word_t *b_word;
+
+ word_t a0, a1, b0, b1;
+
+ al = CHAR_BIT * a_offset;
+ ar = CHAR_BIT * (sizeof(word_t) - a_offset);
+ bl = CHAR_BIT * b_offset;
+ br = CHAR_BIT * (sizeof(word_t) - b_offset);
+
+ a_word = (const word_t *) ((uintptr_t) a & -SIZEOF_LONG);
+ b_word = (const word_t *) ((uintptr_t) b & -SIZEOF_LONG);
+
+ if (n & 1)
+ {
+ n--;
+ a1 = a_word[n]; a0 = a_word[n+1];
+ b1 = b_word[n]; b0 = b_word[n+1];
+
+ dst[n] = MERGE (a1, al, a0, ar) ^ MERGE (b1, bl, b0, br);
+ }
+ else
+ {
+ a1 = a_word[n];
+ b1 = b_word[n];
+ }
+
+ while (n > 0)
+ {
+ n -= 2;
+ a0 = a_word[n+1]; b0 = b_word[n+1];
+ dst[n+1] = MERGE(a0, al, a1, ar) ^ MERGE(b0, bl, b1, br);
+ a1 = a_word[n]; b1 = b_word[n];
+ dst[n] = MERGE(a1, al, a0, ar) ^ MERGE(b1, bl, b0, br);
+ }
+}
+
+/* Current implementation processes data in descending order, to
+ support overlapping operation with one of the sources overlapping
+ the start of the destination area. This feature is used only
+ internally by cbc decrypt, and it is not advertised or documented
+ to nettle users. */
+uint8_t *
+memxor3(uint8_t *dst, const uint8_t *a, const uint8_t *b, size_t n)
+{
+ if (n >= WORD_T_THRESH)
+ {
+ unsigned i;
+ unsigned a_offset;
+ unsigned b_offset;
+ size_t nwords;
+
+ for (i = ALIGN_OFFSET(dst + n); i > 0; i--)
+ {
+ n--;
+ dst[n] = a[n] ^ b[n];
+ }
+
+ a_offset = ALIGN_OFFSET(a + n);
+ b_offset = ALIGN_OFFSET(b + n);
+
+ nwords = n / sizeof (word_t);
+ n %= sizeof (word_t);
+
+ if (a_offset == b_offset)
+ {
+ if (!a_offset)
+ memxor3_common_alignment((word_t *) (dst + n),
+ (const word_t *) (a + n),
+ (const word_t *) (b + n), nwords);
+ else
+ memxor3_different_alignment_ab((word_t *) (dst + n),
+ a + n, b + n, a_offset,
+ nwords);
+ }
+ else if (!a_offset)
+ memxor3_different_alignment_b((word_t *) (dst + n),
+ (const word_t *) (a + n), b + n,
+ b_offset, nwords);
+ else if (!b_offset)
+ memxor3_different_alignment_b((word_t *) (dst + n),
+ (const word_t *) (b + n), a + n,
+ a_offset, nwords);
+ else
+ memxor3_different_alignment_all((word_t *) (dst + n), a + n, b + n,
+ a_offset, b_offset, nwords);
+ }
+ while (n-- > 0)
+ dst[n] = a[n] ^ b[n];
+
+ return dst;
+}
diff --git a/cipher/memxor.h b/cipher/memxor.h
new file mode 100644
index 00000000..f308155f
--- /dev/null
+++ b/cipher/memxor.h
@@ -0,0 +1,21 @@
+/* memxor.h
+ *
+ */
+
+#ifndef MEMXOR_H_INCLUDED
+#define MEMXOR_H_INCLUDED
+
+#include <stdlib.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+uint8_t *memxor(uint8_t *dst, const uint8_t *src, size_t n);
+uint8_t *memxor3(uint8_t *dst, const uint8_t *a, const uint8_t *b, size_t n);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* MEMXOR_H_INCLUDED */
diff --git a/cipher/scrypt.c b/cipher/scrypt.c
new file mode 100644
index 00000000..45ab4b31
--- /dev/null
+++ b/cipher/scrypt.c
@@ -0,0 +1,284 @@
+/* scrypt.c - Scrypt password-based key derivation function.
+ *
+ * This file is part of Libgcrypt.
+ */
+
+/* Adapted from the nettle, low-level cryptographics library for
+ * libgcrypt by Christian Grothoff; original license:
+ *
+ * Copyright (C) 2012 Simon Josefsson
+ *
+ * The nettle library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at your
+ * option) any later version.
+ *
+ * The nettle library is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with the nettle library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02111-1301, USA.
+ */
+
+#include <config.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "g10lib.h"
+#include "scrypt.h"
+#include "memxor.h"
+
+
+
+#define _SALSA20_INPUT_LENGTH 16
+
+#define ROTL32(n,x) (((x)<<(n)) | ((x)>>(32-(n))))
+
+
+/* Reads a 64-bit integer, in network, big-endian, byte order */
+#define READ_UINT64(p) \
+( (((uint64_t) (p)[0]) << 56) \
+ | (((uint64_t) (p)[1]) << 48) \
+ | (((uint64_t) (p)[2]) << 40) \
+ | (((uint64_t) (p)[3]) << 32) \
+ | (((uint64_t) (p)[4]) << 24) \
+ | (((uint64_t) (p)[5]) << 16) \
+ | (((uint64_t) (p)[6]) << 8) \
+ | ((uint64_t) (p)[7]))
+
+
+
+/* And the other, little-endian, byteorder */
+#define LE_READ_UINT64(p) \
+( (((uint64_t) (p)[7]) << 56) \
+ | (((uint64_t) (p)[6]) << 48) \
+ | (((uint64_t) (p)[5]) << 40) \
+ | (((uint64_t) (p)[4]) << 32) \
+ | (((uint64_t) (p)[3]) << 24) \
+ | (((uint64_t) (p)[2]) << 16) \
+ | (((uint64_t) (p)[1]) << 8) \
+ | ((uint64_t) (p)[0]))
+
+
+
+#ifdef WORDS_BIGENDIAN
+#define LE_SWAP32(v) \
+ ((ROTL32(8, v) & 0x00FF00FFUL) | \
+ (ROTL32(24, v) & 0xFF00FF00UL))
+#else
+#define LE_SWAP32(v) (v)
+#endif
+
+#define QROUND(x0, x1, x2, x3) do { \
+ x1 ^= ROTL32(7, x0 + x3); \
+ x2 ^= ROTL32(9, x1 + x0); \
+ x3 ^= ROTL32(13, x2 + x1); \
+ x0 ^= ROTL32(18, x3 + x2); \
+ } while(0)
+
+
+static void
+_salsa20_core(uint32_t *dst, const uint32_t *src, unsigned rounds)
+{
+ uint32_t x[_SALSA20_INPUT_LENGTH];
+ unsigned i;
+
+ assert ( (rounds & 1) == 0);
+
+ memcpy (x, src, sizeof(x));
+ for (i = 0; i < rounds;i += 2)
+ {
+ QROUND(x[0], x[4], x[8], x[12]);
+ QROUND(x[5], x[9], x[13], x[1]);
+ QROUND(x[10], x[14], x[2], x[6]);
+ QROUND(x[15], x[3], x[7], x[11]);
+
+ QROUND(x[0], x[1], x[2], x[3]);
+ QROUND(x[5], x[6], x[7], x[4]);
+ QROUND(x[10], x[11], x[8], x[9]);
+ QROUND(x[15], x[12], x[13], x[14]);
+ }
+
+ for (i = 0; i < _SALSA20_INPUT_LENGTH; i++)
+ {
+ uint32_t t = x[i] + src[i];
+ dst[i] = LE_SWAP32 (t);
+ }
+}
+
+
+static void
+_scryptBlockMix (uint32_t r, uint8_t *B, uint8_t *tmp2)
+{
+ uint64_t i;
+ uint8_t *X = tmp2;
+ uint8_t *Y = tmp2 + 64;
+
+#if 0
+ for (i = 0; i < 2 * r; i++)
+ {
+ size_t j;
+ printf ("B[%d]: ", i);
+ for (j = 0; j < 64; j++)
+ {
+ if (j % 4 == 0)
+ printf (" ");
+ printf ("%02x", B[i * 64 + j]);
+ }
+ printf ("\n");
+ }
+#endif
+
+ /* X = B[2 * r - 1] */
+ memcpy (X, &B[(2 * r - 1) * 64], 64);
+
+ /* for i = 0 to 2 * r - 1 do */
+ for (i = 0; i <= 2 * r - 1; i++)
+ {
+ /* T = X xor B[i] */
+ memxor(X, &B[i * 64], 64);
+
+ /* X = Salsa (T) */
+ _salsa20_core (X, X, 8);
+
+ /* Y[i] = X */
+ memcpy (&Y[i * 64], X, 64);
+ }
+
+ for (i = 0; i < r; i++)
+ {
+ memcpy (&B[i * 64], &Y[2 * i * 64], 64);
+ memcpy (&B[(r + i) * 64], &Y[(2 * i + 1) * 64], 64);
+ }
+
+#if 0
+ for (i = 0; i < 2 * r; i++)
+ {
+ size_t j;
+ printf ("B'[%d]: ", i);
+ for (j = 0; j < 64; j++)
+ {
+ if (j % 4 == 0)
+ printf (" ");
+ printf ("%02x", B[i * 64 + j]);
+ }
+ printf ("\n");
+ }
+#endif
+}
+
+static void
+_scryptROMix (uint32_t r, uint8_t *B, uint64_t N,
+ uint8_t *tmp1, uint8_t *tmp2)
+{
+ uint8_t *X = B, *T = B;
+ uint64_t i;
+
+#if 0
+ printf ("B: ");
+ for (i = 0; i < 128 * r; i++)
+ {
+ size_t j;
+ if (i % 4 == 0)
+ printf (" ");
+ printf ("%02x", B[i]);
+ }
+ printf ("\n");
+#endif
+
+ /* for i = 0 to N - 1 do */
+ for (i = 0; i <= N - 1; i++)
+ {
+ /* V[i] = X */
+ memcpy (&tmp1[i * 128 * r], X, 128 * r);
+
+ /* X = ScryptBlockMix (X) */
+ _scryptBlockMix (r, X, tmp2);
+ }
+
+ /* for i = 0 to N - 1 do */
+ for (i = 0; i <= N - 1; i++)
+ {
+ uint64_t j;
+
+ /* j = Integerify (X) mod N */
+ j = LE_READ_UINT64 (&X[128 * r - 64]) % N;
+
+ /* T = X xor V[j] */
+ memxor (T, &tmp1[j * 128 * r], 128 * r);
+
+ /* X = scryptBlockMix (T) */
+ _scryptBlockMix (r, T, tmp2);
+ }
+
+#if 0
+ printf ("B': ");
+ for (i = 0; i < 128 * r; i++)
+ {
+ size_t j;
+ if (i % 4 == 0)
+ printf (" ");
+ printf ("%02x", B[i]);
+ }
+ printf ("\n");
+#endif
+}
+
+/**
+ */
+gcry_err_code_t
+scrypt (const uint8_t * passwd, size_t passwdlen,
+ int subalgo,
+ const uint8_t * salt, size_t saltlen,
+ unsigned long iterations,
+ size_t dkLen, uint8_t * DK)
+{
+ /* XXX sanity-check parameters */
+ uint64_t N = subalgo; /* CPU/memory cost paramter */
+ uint32_t r = 8; /* block size, should be sane enough */
+ uint32_t p = iterations; /* parallelization parameter */
+
+ uint32_t i;
+ uint8_t *B;
+ uint8_t *tmp1;
+ uint8_t *tmp2;
+
+
+ B = malloc (p * 128 * r);
+ if (B == NULL)
+ return GPG_ERR_ENOMEM;
+
+ tmp1 = malloc (N * 128 * r);
+ if (tmp1 == NULL)
+ {
+ free (B);
+ return GPG_ERR_ENOMEM;
+ }
+
+ tmp2 = malloc (64 + 128 * r);
+ if (tmp2 == NULL)
+ {
+ free (B);
+ free (tmp1);
+ return GPG_ERR_ENOMEM;
+ }
+
+ pkdf2 (passwd, passwdlen, GCRY_MD_SHA256, salt, saltlen, 1 /* iterations */, p * 128 * r, B);
+
+ for (i = 0; i < p; i++)
+ _scryptROMix (r, &B[i * 128 * r], N, tmp1, tmp2);
+
+ for (i = 0; i < p; i++)
+ pkdf2 (passwd, passwdlen, GCRY_MD_SHA256, B, p * 128 * r, 1 /* iterations */, dkLen, DK);
+
+ free (tmp2);
+ free (tmp1);
+ free (B);
+
+ return 0;
+}
diff --git a/cipher/scrypt.h b/cipher/scrypt.h
new file mode 100644
index 00000000..e0c8df92
--- /dev/null
+++ b/cipher/scrypt.h
@@ -0,0 +1,66 @@
+/* scrypt.h - Scrypt password-based key derivation function.
+ *
+ * This file is part of Libgcrypt.
+ */
+
+/* Adapted from the nettle, low-level cryptographics library for
+ * libgcrypt by Christian Grothoff; original license:
+ *
+ * Copyright (C) 2012 Simon Josefsson
+ *
+ * The nettle library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or (at your
+ * option) any later version.
+ *
+ * The nettle library is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with the nettle library; see the file COPYING.LIB. If not, write to
+ * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02111-1301, USA.
+ */
+
+#ifndef SCRYPT_H_INCLUDED
+#define SCRYPT_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include <stdint.h>
+#include "g10lib.h"
+#include "cipher.h"
+
+
+/* Transform a passphrase into a suitable key of length KEYSIZE and
+ store this key in the caller provided buffer KEYBUFFER. The caller
+ must provide PRFALGO which indicates the pseudorandom function to
+ use: This shall be the algorithms id of a hash algorithm; it is
+ used in HMAC mode. SALT is a salt of length SALTLEN and ITERATIONS
+ gives the number of iterations; implemented in 'kdf.c', used by
+ scrypt.c */
+gpg_err_code_t
+pkdf2 (const void *passphrase, size_t passphraselen,
+ int hashalgo,
+ const void *salt, size_t saltlen,
+ unsigned long iterations,
+ size_t keysize, void *keybuffer);
+
+
+gcry_err_code_t
+scrypt (const uint8_t * passwd, size_t passwdlen,
+ int subalgo,
+ const uint8_t * salt, size_t saltlen,
+ unsigned long iterations,
+ size_t dkLen, uint8_t * DK);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* SCRYPT_H_INCLUDED */
diff --git a/doc/gcrypt.texi b/doc/gcrypt.texi
index bef91f7b..fe040f02 100644
--- a/doc/gcrypt.texi
+++ b/doc/gcrypt.texi
@@ -3172,6 +3172,12 @@ iteration count.
@item GCRY_KDF_PBKDF2
The PKCS#5 Passphrase Based Key Derivation Function number 2.
+@item GCRY_KDF_SCRYPT
+The SCRYPT Key Derivation Function. The subalgorithm is used to specify
+the CPU/memory cost paramter N, and the number of iterations
+is used for the parallelization parameter p. The block size is fixed
+at 8 in the current implementation.
+
@end table
@end deftypefun
diff --git a/src/gcrypt.h.in b/src/gcrypt.h.in
index 83437991..dff0e0b1 100644
--- a/src/gcrypt.h.in
+++ b/src/gcrypt.h.in
@@ -1193,7 +1193,8 @@ enum gcry_kdf_algos
GCRY_KDF_SALTED_S2K = 17,
GCRY_KDF_ITERSALTED_S2K = 19,
GCRY_KDF_PBKDF1 = 33,
- GCRY_KDF_PBKDF2 = 34
+ GCRY_KDF_PBKDF2 = 34,
+ GCRY_KDF_SCRYPT = 35
};
/* Derive a key from a passphrase. */