summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authorMatt Caswell <matt@openssl.org>2015-01-05 11:30:03 +0000
committerMatt Caswell <matt@openssl.org>2015-01-22 09:39:01 +0000
commitcda8845ded7c0739c9142283ed4c449130b1b546 (patch)
tree0df96afebb471333d619466a9559a2bae9bc7eae /crypto
parent47050853f13b07f91b5b4a058dcb188621296f21 (diff)
downloadopenssl-new-cda8845ded7c0739c9142283ed4c449130b1b546.tar.gz
Re-align some comments after running the reformat script.OpenSSL_1_0_1-post-reformat
This should be a one off operation (subsequent invokation of the script should not move them) This commit is for the 1.0.1 changes Reviewed-by: Tim Hudson <tjh@openssl.org>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/asn1/a_sign.c14
-rw-r--r--crypto/bio/b_sock.c14
-rw-r--r--crypto/bio/bf_null.c10
-rw-r--r--crypto/bio/bio.h18
-rw-r--r--crypto/bio/bss_acpt.c12
-rw-r--r--crypto/bn/bn_add.c24
-rw-r--r--crypto/bn/bn_exp.c60
-rw-r--r--crypto/bn/bn_gcd.c212
-rw-r--r--crypto/bn/bn_kron.c14
-rw-r--r--crypto/bn/bn_mul.c90
-rw-r--r--crypto/bn/bn_print.c12
-rw-r--r--crypto/bn/bn_sqr.c22
-rw-r--r--crypto/bn/bn_sqrt.c122
-rw-r--r--crypto/cast/casttest.c6
-rw-r--r--crypto/conf/conf_def.c20
-rw-r--r--crypto/des/des.c16
-rw-r--r--crypto/des/destest.c14
-rw-r--r--crypto/des/enc_read.c12
-rw-r--r--crypto/des/ofb64ede.c4
-rw-r--r--crypto/dsa/dsa_ameth.c10
-rw-r--r--crypto/dso/dso_vms.c34
-rw-r--r--crypto/ec/ec.h16
-rw-r--r--crypto/ec/ec2_smpl.c12
-rw-r--r--crypto/ec/ec_lcl.h16
-rw-r--r--crypto/ec/ec_mult.c14
-rw-r--r--crypto/ec/ecp_nistp224.c26
-rw-r--r--crypto/ec/ecp_nistp256.c77
-rw-r--r--crypto/ec/ecp_nistp521.c111
-rw-r--r--crypto/ec/ecp_oct.c10
-rw-r--r--crypto/ec/ecp_smpl.c60
-rw-r--r--crypto/idea/ideatest.c6
-rw-r--r--crypto/lhash/lhash.c6
-rw-r--r--crypto/o_time.c48
-rw-r--r--crypto/pem/pem.h16
-rw-r--r--crypto/rand/randfile.c8
-rw-r--r--crypto/rc2/rc2test.c6
-rw-r--r--crypto/rc4/rc4_enc.c116
-rw-r--r--crypto/rsa/rsa_pss.c24
-rw-r--r--crypto/threads/mttest.c44
-rw-r--r--crypto/whrlpool/wp_dgst.c24
-rw-r--r--crypto/x509/by_dir.c4
-rw-r--r--crypto/x509/x509_lu.c4
-rw-r--r--crypto/x509/x509_r2x.c4
-rw-r--r--crypto/x509/x509_vfy.c18
-rw-r--r--crypto/x509/x509name.c20
45 files changed, 717 insertions, 713 deletions
diff --git a/crypto/asn1/a_sign.c b/crypto/asn1/a_sign.c
index 2a8c3a336c..51c6a0c34d 100644
--- a/crypto/asn1/a_sign.c
+++ b/crypto/asn1/a_sign.c
@@ -252,13 +252,13 @@ int ASN1_item_sign_ctx(const ASN1_ITEM *it,
rv = pkey->ameth->item_sign(ctx, it, asn, algor1, algor2, signature);
if (rv == 1)
outl = signature->length;
- /*-
- * Return value meanings:
- * <=0: error.
- * 1: method does everything.
- * 2: carry on as normal.
- * 3: ASN1 method sets algorithm identifiers: just sign.
- */
+ /*-
+ * Return value meanings:
+ * <=0: error.
+ * 1: method does everything.
+ * 2: carry on as normal.
+ * 3: ASN1 method sets algorithm identifiers: just sign.
+ */
if (rv <= 0)
ASN1err(ASN1_F_ASN1_ITEM_SIGN_CTX, ERR_R_EVP_LIB);
if (rv <= 1)
diff --git a/crypto/bio/b_sock.c b/crypto/bio/b_sock.c
index 7469f07f25..bda882c40b 100644
--- a/crypto/bio/b_sock.c
+++ b/crypto/bio/b_sock.c
@@ -530,13 +530,13 @@ int BIO_socket_ioctl(int fd, long type, void *arg)
i = ioctlsocket(fd, type, (char *)arg);
# else
# if defined(OPENSSL_SYS_VMS)
- /*-
- * 2011-02-18 SMS.
- * VMS ioctl() can't tolerate a 64-bit "void *arg", but we
- * observe that all the consumers pass in an "unsigned long *",
- * so we arrange a local copy with a short pointer, and use
- * that, instead.
- */
+ /*-
+ * 2011-02-18 SMS.
+ * VMS ioctl() can't tolerate a 64-bit "void *arg", but we
+ * observe that all the consumers pass in an "unsigned long *",
+ * so we arrange a local copy with a short pointer, and use
+ * that, instead.
+ */
# if __INITIAL_POINTER_SIZE == 64
# define ARG arg_32p
# pragma pointer_size save
diff --git a/crypto/bio/bf_null.c b/crypto/bio/bf_null.c
index d9d0dc6bf4..e0c79e8291 100644
--- a/crypto/bio/bf_null.c
+++ b/crypto/bio/bf_null.c
@@ -103,11 +103,11 @@ static int nullf_free(BIO *a)
{
if (a == NULL)
return (0);
- /*-
- a->ptr=NULL;
- a->init=0;
- a->flags=0;
- */
+ /*-
+ a->ptr=NULL;
+ a->init=0;
+ a->flags=0;
+ */
return (1);
}
diff --git a/crypto/bio/bio.h b/crypto/bio/bio.h
index 2298f301a6..81f13c725c 100644
--- a/crypto/bio/bio.h
+++ b/crypto/bio/bio.h
@@ -343,15 +343,15 @@ struct bio_st {
DECLARE_STACK_OF(BIO)
typedef struct bio_f_buffer_ctx_struct {
- /*-
- * Buffers are setup like this:
- *
- * <---------------------- size ----------------------->
- * +---------------------------------------------------+
- * | consumed | remaining | free space |
- * +---------------------------------------------------+
- * <-- off --><------- len ------->
- */
+ /*-
+ * Buffers are setup like this:
+ *
+ * <---------------------- size ----------------------->
+ * +---------------------------------------------------+
+ * | consumed | remaining | free space |
+ * +---------------------------------------------------+
+ * <-- off --><------- len ------->
+ */
/*- BIO *bio; *//*
* this is now in the BIO struct
*/
diff --git a/crypto/bio/bss_acpt.c b/crypto/bio/bss_acpt.c
index 1a1e4d99ac..d08292c3e9 100644
--- a/crypto/bio/bss_acpt.c
+++ b/crypto/bio/bss_acpt.c
@@ -421,12 +421,12 @@ static long acpt_ctrl(BIO *b, int cmd, long num, void *ptr)
ret = (long)data->bind_mode;
break;
case BIO_CTRL_DUP:
-/*- dbio=(BIO *)ptr;
- if (data->param_port) EAY EAY
- BIO_set_port(dbio,data->param_port);
- if (data->param_hostname)
- BIO_set_hostname(dbio,data->param_hostname);
- BIO_set_nbio(dbio,data->nbio); */
+/*- dbio=(BIO *)ptr;
+ if (data->param_port) EAY EAY
+ BIO_set_port(dbio,data->param_port);
+ if (data->param_hostname)
+ BIO_set_hostname(dbio,data->param_hostname);
+ BIO_set_nbio(dbio,data->nbio); */
break;
default:
diff --git a/crypto/bn/bn_add.c b/crypto/bn/bn_add.c
index c48665a038..2f3d110449 100644
--- a/crypto/bn/bn_add.c
+++ b/crypto/bn/bn_add.c
@@ -69,12 +69,12 @@ int BN_add(BIGNUM *r, const BIGNUM *a, const BIGNUM *b)
bn_check_top(a);
bn_check_top(b);
- /*-
- * a + b a+b
- * a + -b a-b
- * -a + b b-a
- * -a + -b -(a+b)
- */
+ /*-
+ * a + b a+b
+ * a + -b a-b
+ * -a + b b-a
+ * -a + -b -(a+b)
+ */
if (a_neg ^ b->neg) {
/* only one is negative */
if (a_neg) {
@@ -265,12 +265,12 @@ int BN_sub(BIGNUM *r, const BIGNUM *a, const BIGNUM *b)
bn_check_top(a);
bn_check_top(b);
- /*-
- * a - b a-b
- * a - -b a+b
- * -a - b -(a+b)
- * -a - -b b-a
- */
+ /*-
+ * a - b a-b
+ * a - -b a+b
+ * -a - b -(a+b)
+ * -a - -b b-a
+ */
if (a->neg) {
if (b->neg) {
tmp = a;
diff --git a/crypto/bn/bn_exp.c b/crypto/bn/bn_exp.c
index 1ded193738..fca401450a 100644
--- a/crypto/bn/bn_exp.c
+++ b/crypto/bn/bn_exp.c
@@ -186,36 +186,36 @@ int BN_mod_exp(BIGNUM *r, const BIGNUM *a, const BIGNUM *p, const BIGNUM *m,
bn_check_top(p);
bn_check_top(m);
- /*-
- * For even modulus m = 2^k*m_odd, it might make sense to compute
- * a^p mod m_odd and a^p mod 2^k separately (with Montgomery
- * exponentiation for the odd part), using appropriate exponent
- * reductions, and combine the results using the CRT.
- *
- * For now, we use Montgomery only if the modulus is odd; otherwise,
- * exponentiation using the reciprocal-based quick remaindering
- * algorithm is used.
- *
- * (Timing obtained with expspeed.c [computations a^p mod m
- * where a, p, m are of the same length: 256, 512, 1024, 2048,
- * 4096, 8192 bits], compared to the running time of the
- * standard algorithm:
- *
- * BN_mod_exp_mont 33 .. 40 % [AMD K6-2, Linux, debug configuration]
- * 55 .. 77 % [UltraSparc processor, but
- * debug-solaris-sparcv8-gcc conf.]
- *
- * BN_mod_exp_recp 50 .. 70 % [AMD K6-2, Linux, debug configuration]
- * 62 .. 118 % [UltraSparc, debug-solaris-sparcv8-gcc]
- *
- * On the Sparc, BN_mod_exp_recp was faster than BN_mod_exp_mont
- * at 2048 and more bits, but at 512 and 1024 bits, it was
- * slower even than the standard algorithm!
- *
- * "Real" timings [linux-elf, solaris-sparcv9-gcc configurations]
- * should be obtained when the new Montgomery reduction code
- * has been integrated into OpenSSL.)
- */
+ /*-
+ * For even modulus m = 2^k*m_odd, it might make sense to compute
+ * a^p mod m_odd and a^p mod 2^k separately (with Montgomery
+ * exponentiation for the odd part), using appropriate exponent
+ * reductions, and combine the results using the CRT.
+ *
+ * For now, we use Montgomery only if the modulus is odd; otherwise,
+ * exponentiation using the reciprocal-based quick remaindering
+ * algorithm is used.
+ *
+ * (Timing obtained with expspeed.c [computations a^p mod m
+ * where a, p, m are of the same length: 256, 512, 1024, 2048,
+ * 4096, 8192 bits], compared to the running time of the
+ * standard algorithm:
+ *
+ * BN_mod_exp_mont 33 .. 40 % [AMD K6-2, Linux, debug configuration]
+ * 55 .. 77 % [UltraSparc processor, but
+ * debug-solaris-sparcv8-gcc conf.]
+ *
+ * BN_mod_exp_recp 50 .. 70 % [AMD K6-2, Linux, debug configuration]
+ * 62 .. 118 % [UltraSparc, debug-solaris-sparcv8-gcc]
+ *
+ * On the Sparc, BN_mod_exp_recp was faster than BN_mod_exp_mont
+ * at 2048 and more bits, but at 512 and 1024 bits, it was
+ * slower even than the standard algorithm!
+ *
+ * "Real" timings [linux-elf, solaris-sparcv9-gcc configurations]
+ * should be obtained when the new Montgomery reduction code
+ * has been integrated into OpenSSL.)
+ */
#define MONT_MUL_MOD
#define MONT_EXP_WORD
diff --git a/crypto/bn/bn_gcd.c b/crypto/bn/bn_gcd.c
index 02fb7c462d..97c55ab720 100644
--- a/crypto/bn/bn_gcd.c
+++ b/crypto/bn/bn_gcd.c
@@ -268,13 +268,13 @@ BIGNUM *BN_mod_inverse(BIGNUM *in,
goto err;
}
sign = -1;
- /*-
- * From B = a mod |n|, A = |n| it follows that
- *
- * 0 <= B < A,
- * -sign*X*a == B (mod |n|),
- * sign*Y*a == A (mod |n|).
- */
+ /*-
+ * From B = a mod |n|, A = |n| it follows that
+ *
+ * 0 <= B < A,
+ * -sign*X*a == B (mod |n|),
+ * sign*Y*a == A (mod |n|).
+ */
if (BN_is_odd(n) && (BN_num_bits(n) <= (BN_BITS <= 32 ? 450 : 2048))) {
/*
@@ -286,12 +286,12 @@ BIGNUM *BN_mod_inverse(BIGNUM *in,
int shift;
while (!BN_is_zero(B)) {
- /*-
- * 0 < B < |n|,
- * 0 < A <= |n|,
- * (1) -sign*X*a == B (mod |n|),
- * (2) sign*Y*a == A (mod |n|)
- */
+ /*-
+ * 0 < B < |n|,
+ * 0 < A <= |n|,
+ * (1) -sign*X*a == B (mod |n|),
+ * (2) sign*Y*a == A (mod |n|)
+ */
/*
* Now divide B by the maximum possible power of two in the
@@ -337,18 +337,18 @@ BIGNUM *BN_mod_inverse(BIGNUM *in,
goto err;
}
- /*-
- * We still have (1) and (2).
- * Both A and B are odd.
- * The following computations ensure that
- *
- * 0 <= B < |n|,
- * 0 < A < |n|,
- * (1) -sign*X*a == B (mod |n|),
- * (2) sign*Y*a == A (mod |n|),
- *
- * and that either A or B is even in the next iteration.
- */
+ /*-
+ * We still have (1) and (2).
+ * Both A and B are odd.
+ * The following computations ensure that
+ *
+ * 0 <= B < |n|,
+ * 0 < A < |n|,
+ * (1) -sign*X*a == B (mod |n|),
+ * (2) sign*Y*a == A (mod |n|),
+ *
+ * and that either A or B is even in the next iteration.
+ */
if (BN_ucmp(B, A) >= 0) {
/* -sign*(X + Y)*a == B - A (mod |n|) */
if (!BN_uadd(X, X, Y))
@@ -377,11 +377,11 @@ BIGNUM *BN_mod_inverse(BIGNUM *in,
while (!BN_is_zero(B)) {
BIGNUM *tmp;
- /*-
- * 0 < B < A,
- * (*) -sign*X*a == B (mod |n|),
- * sign*Y*a == A (mod |n|)
- */
+ /*-
+ * 0 < B < A,
+ * (*) -sign*X*a == B (mod |n|),
+ * sign*Y*a == A (mod |n|)
+ */
/* (D, M) := (A/B, A%B) ... */
if (BN_num_bits(A) == BN_num_bits(B)) {
@@ -428,12 +428,12 @@ BIGNUM *BN_mod_inverse(BIGNUM *in,
goto err;
}
- /*-
- * Now
- * A = D*B + M;
- * thus we have
- * (**) sign*Y*a == D*B + M (mod |n|).
- */
+ /*-
+ * Now
+ * A = D*B + M;
+ * thus we have
+ * (**) sign*Y*a == D*B + M (mod |n|).
+ */
tmp = A; /* keep the BIGNUM object, the value does not
* matter */
@@ -443,25 +443,25 @@ BIGNUM *BN_mod_inverse(BIGNUM *in,
B = M;
/* ... so we have 0 <= B < A again */
- /*-
- * Since the former M is now B and the former B is now A,
- * (**) translates into
- * sign*Y*a == D*A + B (mod |n|),
- * i.e.
- * sign*Y*a - D*A == B (mod |n|).
- * Similarly, (*) translates into
- * -sign*X*a == A (mod |n|).
- *
- * Thus,
- * sign*Y*a + D*sign*X*a == B (mod |n|),
- * i.e.
- * sign*(Y + D*X)*a == B (mod |n|).
- *
- * So if we set (X, Y, sign) := (Y + D*X, X, -sign), we arrive back at
- * -sign*X*a == B (mod |n|),
- * sign*Y*a == A (mod |n|).
- * Note that X and Y stay non-negative all the time.
- */
+ /*-
+ * Since the former M is now B and the former B is now A,
+ * (**) translates into
+ * sign*Y*a == D*A + B (mod |n|),
+ * i.e.
+ * sign*Y*a - D*A == B (mod |n|).
+ * Similarly, (*) translates into
+ * -sign*X*a == A (mod |n|).
+ *
+ * Thus,
+ * sign*Y*a + D*sign*X*a == B (mod |n|),
+ * i.e.
+ * sign*(Y + D*X)*a == B (mod |n|).
+ *
+ * So if we set (X, Y, sign) := (Y + D*X, X, -sign), we arrive back at
+ * -sign*X*a == B (mod |n|),
+ * sign*Y*a == A (mod |n|).
+ * Note that X and Y stay non-negative all the time.
+ */
/*
* most of the time D is very small, so we can optimize tmp :=
@@ -498,13 +498,13 @@ BIGNUM *BN_mod_inverse(BIGNUM *in,
}
}
- /*-
- * The while loop (Euclid's algorithm) ends when
- * A == gcd(a,n);
- * we have
- * sign*Y*a == A (mod |n|),
- * where Y is non-negative.
- */
+ /*-
+ * The while loop (Euclid's algorithm) ends when
+ * A == gcd(a,n);
+ * we have
+ * sign*Y*a == A (mod |n|),
+ * where Y is non-negative.
+ */
if (sign < 0) {
if (!BN_sub(Y, n, Y))
@@ -588,22 +588,22 @@ static BIGNUM *BN_mod_inverse_no_branch(BIGNUM *in,
goto err;
}
sign = -1;
- /*-
- * From B = a mod |n|, A = |n| it follows that
- *
- * 0 <= B < A,
- * -sign*X*a == B (mod |n|),
- * sign*Y*a == A (mod |n|).
- */
+ /*-
+ * From B = a mod |n|, A = |n| it follows that
+ *
+ * 0 <= B < A,
+ * -sign*X*a == B (mod |n|),
+ * sign*Y*a == A (mod |n|).
+ */
while (!BN_is_zero(B)) {
BIGNUM *tmp;
- /*-
- * 0 < B < A,
- * (*) -sign*X*a == B (mod |n|),
- * sign*Y*a == A (mod |n|)
- */
+ /*-
+ * 0 < B < A,
+ * (*) -sign*X*a == B (mod |n|),
+ * sign*Y*a == A (mod |n|)
+ */
/*
* Turn BN_FLG_CONSTTIME flag on, so that when BN_div is invoked,
@@ -616,12 +616,12 @@ static BIGNUM *BN_mod_inverse_no_branch(BIGNUM *in,
if (!BN_div(D, M, pA, B, ctx))
goto err;
- /*-
- * Now
- * A = D*B + M;
- * thus we have
- * (**) sign*Y*a == D*B + M (mod |n|).
- */
+ /*-
+ * Now
+ * A = D*B + M;
+ * thus we have
+ * (**) sign*Y*a == D*B + M (mod |n|).
+ */
tmp = A; /* keep the BIGNUM object, the value does not
* matter */
@@ -631,25 +631,25 @@ static BIGNUM *BN_mod_inverse_no_branch(BIGNUM *in,
B = M;
/* ... so we have 0 <= B < A again */
- /*-
- * Since the former M is now B and the former B is now A,
- * (**) translates into
- * sign*Y*a == D*A + B (mod |n|),
- * i.e.
- * sign*Y*a - D*A == B (mod |n|).
- * Similarly, (*) translates into
- * -sign*X*a == A (mod |n|).
- *
- * Thus,
- * sign*Y*a + D*sign*X*a == B (mod |n|),
- * i.e.
- * sign*(Y + D*X)*a == B (mod |n|).
- *
- * So if we set (X, Y, sign) := (Y + D*X, X, -sign), we arrive back at
- * -sign*X*a == B (mod |n|),
- * sign*Y*a == A (mod |n|).
- * Note that X and Y stay non-negative all the time.
- */
+ /*-
+ * Since the former M is now B and the former B is now A,
+ * (**) translates into
+ * sign*Y*a == D*A + B (mod |n|),
+ * i.e.
+ * sign*Y*a - D*A == B (mod |n|).
+ * Similarly, (*) translates into
+ * -sign*X*a == A (mod |n|).
+ *
+ * Thus,
+ * sign*Y*a + D*sign*X*a == B (mod |n|),
+ * i.e.
+ * sign*(Y + D*X)*a == B (mod |n|).
+ *
+ * So if we set (X, Y, sign) := (Y + D*X, X, -sign), we arrive back at
+ * -sign*X*a == B (mod |n|),
+ * sign*Y*a == A (mod |n|).
+ * Note that X and Y stay non-negative all the time.
+ */
if (!BN_mul(tmp, D, X, ctx))
goto err;
@@ -663,13 +663,13 @@ static BIGNUM *BN_mod_inverse_no_branch(BIGNUM *in,
sign = -sign;
}
- /*-
- * The while loop (Euclid's algorithm) ends when
- * A == gcd(a,n);
- * we have
- * sign*Y*a == A (mod |n|),
- * where Y is non-negative.
- */
+ /*-
+ * The while loop (Euclid's algorithm) ends when
+ * A == gcd(a,n);
+ * we have
+ * sign*Y*a == A (mod |n|),
+ * where Y is non-negative.
+ */
if (sign < 0) {
if (!BN_sub(Y, n, Y))
diff --git a/crypto/bn/bn_kron.c b/crypto/bn/bn_kron.c
index 71808321d5..88d731ac75 100644
--- a/crypto/bn/bn_kron.c
+++ b/crypto/bn/bn_kron.c
@@ -66,13 +66,13 @@ int BN_kronecker(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx)
int ret = -2; /* avoid 'uninitialized' warning */
int err = 0;
BIGNUM *A, *B, *tmp;
- /*-
- * In 'tab', only odd-indexed entries are relevant:
- * For any odd BIGNUM n,
- * tab[BN_lsw(n) & 7]
- * is $(-1)^{(n^2-1)/8}$ (using TeX notation).
- * Note that the sign of n does not matter.
- */
+ /*-
+ * In 'tab', only odd-indexed entries are relevant:
+ * For any odd BIGNUM n,
+ * tab[BN_lsw(n) & 7]
+ * is $(-1)^{(n^2-1)/8}$ (using TeX notation).
+ * Note that the sign of n does not matter.
+ */
static const int tab[8] = { 0, 1, 0, -1, 0, -1, 0, 1 };
bn_check_top(a);
diff --git a/crypto/bn/bn_mul.c b/crypto/bn/bn_mul.c
index 3b751d3edc..b174850b6b 100644
--- a/crypto/bn/bn_mul.c
+++ b/crypto/bn/bn_mul.c
@@ -527,11 +527,11 @@ void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2,
bn_mul_recursive(&(r[n2]), &(a[n]), &(b[n]), n, dna, dnb, p);
}
- /*-
- * t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign
- * r[10] holds (a[0]*b[0])
- * r[32] holds (b[1]*b[1])
- */
+ /*-
+ * t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign
+ * r[10] holds (a[0]*b[0])
+ * r[32] holds (b[1]*b[1])
+ */
c1 = (int)(bn_add_words(t, r, &(r[n2]), n2));
@@ -542,12 +542,12 @@ void bn_mul_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n2,
c1 += (int)(bn_add_words(&(t[n2]), &(t[n2]), t, n2));
}
- /*-
- * t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1])
- * r[10] holds (a[0]*b[0])
- * r[32] holds (b[1]*b[1])
- * c1 holds the carry bits
- */
+ /*-
+ * t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1])
+ * r[10] holds (a[0]*b[0])
+ * r[32] holds (b[1]*b[1])
+ * c1 holds the carry bits
+ */
c1 += (int)(bn_add_words(&(r[n]), &(r[n]), &(t[n2]), n2));
if (c1) {
p = &(r[n + n2]);
@@ -689,11 +689,11 @@ void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n,
}
}
- /*-
- * t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign
- * r[10] holds (a[0]*b[0])
- * r[32] holds (b[1]*b[1])
- */
+ /*-
+ * t[32] holds (a[0]-a[1])*(b[1]-b[0]), c1 is the sign
+ * r[10] holds (a[0]*b[0])
+ * r[32] holds (b[1]*b[1])
+ */
c1 = (int)(bn_add_words(t, r, &(r[n2]), n2));
@@ -704,12 +704,12 @@ void bn_mul_part_recursive(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n,
c1 += (int)(bn_add_words(&(t[n2]), &(t[n2]), t, n2));
}
- /*-
- * t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1])
- * r[10] holds (a[0]*b[0])
- * r[32] holds (b[1]*b[1])
- * c1 holds the carry bits
- */
+ /*-
+ * t[32] holds (a[0]-a[1])*(b[1]-b[0])+(a[0]*b[0])+(a[1]*b[1])
+ * r[10] holds (a[0]*b[0])
+ * r[32] holds (b[1]*b[1])
+ * c1 holds the carry bits
+ */
c1 += (int)(bn_add_words(&(r[n]), &(r[n]), &(t[n2]), n2));
if (c1) {
p = &(r[n + n2]);
@@ -828,13 +828,13 @@ void bn_mul_high(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, BN_ULONG *l, int n2,
bn_mul_recursive(r, &(a[n]), &(b[n]), n, 0, 0, &(t[n2]));
}
- /*-
- * s0 == low(al*bl)
- * s1 == low(ah*bh)+low((al-ah)*(bh-bl))+low(al*bl)+high(al*bl)
- * We know s0 and s1 so the only unknown is high(al*bl)
- * high(al*bl) == s1 - low(ah*bh+s0+(al-ah)*(bh-bl))
- * high(al*bl) == s1 - (r[0]+l[0]+t[0])
- */
+ /*-
+ * s0 == low(al*bl)
+ * s1 == low(ah*bh)+low((al-ah)*(bh-bl))+low(al*bl)+high(al*bl)
+ * We know s0 and s1 so the only unknown is high(al*bl)
+ * high(al*bl) == s1 - low(ah*bh+s0+(al-ah)*(bh-bl))
+ * high(al*bl) == s1 - (r[0]+l[0]+t[0])
+ */
if (l != NULL) {
lp = &(t[n2 + n]);
c1 = (int)(bn_add_words(lp, &(r[0]), &(l[0]), n));
@@ -859,22 +859,22 @@ void bn_mul_high(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, BN_ULONG *l, int n2,
lp[i] = ((~mp[i]) + 1) & BN_MASK2;
}
- /*-
- * s[0] = low(al*bl)
- * t[3] = high(al*bl)
- * t[10] = (a[0]-a[1])*(b[1]-b[0]) neg is the sign
- * r[10] = (a[1]*b[1])
- */
- /*-
- * R[10] = al*bl
- * R[21] = al*bl + ah*bh + (a[0]-a[1])*(b[1]-b[0])
- * R[32] = ah*bh
- */
- /*-
- * R[1]=t[3]+l[0]+r[0](+-)t[0] (have carry/borrow)
- * R[2]=r[0]+t[3]+r[1](+-)t[1] (have carry/borrow)
- * R[3]=r[1]+(carry/borrow)
- */
+ /*-
+ * s[0] = low(al*bl)
+ * t[3] = high(al*bl)
+ * t[10] = (a[0]-a[1])*(b[1]-b[0]) neg is the sign
+ * r[10] = (a[1]*b[1])
+ */
+ /*-
+ * R[10] = al*bl
+ * R[21] = al*bl + ah*bh + (a[0]-a[1])*(b[1]-b[0])
+ * R[32] = ah*bh
+ */
+ /*-
+ * R[1]=t[3]+l[0]+r[0](+-)t[0] (have carry/borrow)
+ * R[2]=r[0]+t[3]+r[1](+-)t[1] (have carry/borrow)
+ * R[3]=r[1]+(carry/borrow)
+ */
if (l != NULL) {
lp = &(t[n2]);
c1 = (int)(bn_add_words(lp, &(t[n2 + n]), &(l[0]), n));
diff --git a/crypto/bn/bn_print.c b/crypto/bn/bn_print.c
index 71a347e259..4dcaae32bf 100644
--- a/crypto/bn/bn_print.c
+++ b/crypto/bn/bn_print.c
@@ -106,12 +106,12 @@ char *BN_bn2dec(const BIGNUM *a)
BIGNUM *t = NULL;
BN_ULONG *bn_data = NULL, *lp;
- /*-
- * get an upper bound for the length of the decimal integer
- * num <= (BN_num_bits(a) + 1) * log(2)
- * <= 3 * BN_num_bits(a) * 0.1001 + log(2) + 1 (rounding error)
- * <= BN_num_bits(a)/10 + BN_num_bits/1000 + 1 + 1
- */
+ /*-
+ * get an upper bound for the length of the decimal integer
+ * num <= (BN_num_bits(a) + 1) * log(2)
+ * <= 3 * BN_num_bits(a) * 0.1001 + log(2) + 1 (rounding error)
+ * <= BN_num_bits(a)/10 + BN_num_bits/1000 + 1 + 1
+ */
i = BN_num_bits(a) * 3;
num = (i / 10 + i / 1000 + 1) + 1;
bn_data =
diff --git a/crypto/bn/bn_sqr.c b/crypto/bn/bn_sqr.c
index 51daae4481..3ca69879ee 100644
--- a/crypto/bn/bn_sqr.c
+++ b/crypto/bn/bn_sqr.c
@@ -249,23 +249,23 @@ void bn_sqr_recursive(BN_ULONG *r, const BN_ULONG *a, int n2, BN_ULONG *t)
bn_sqr_recursive(r, a, n, p);
bn_sqr_recursive(&(r[n2]), &(a[n]), n, p);
- /*-
- * t[32] holds (a[0]-a[1])*(a[1]-a[0]), it is negative or zero
- * r[10] holds (a[0]*b[0])
- * r[32] holds (b[1]*b[1])
- */
+ /*-
+ * t[32] holds (a[0]-a[1])*(a[1]-a[0]), it is negative or zero
+ * r[10] holds (a[0]*b[0])
+ * r[32] holds (b[1]*b[1])
+ */
c1 = (int)(bn_add_words(t, r, &(r[n2]), n2));
/* t[32] is negative */
c1 -= (int)(bn_sub_words(&(t[n2]), t, &(t[n2]), n2));
- /*-
- * t[32] holds (a[0]-a[1])*(a[1]-a[0])+(a[0]*a[0])+(a[1]*a[1])
- * r[10] holds (a[0]*a[0])
- * r[32] holds (a[1]*a[1])
- * c1 holds the carry bits
- */
+ /*-
+ * t[32] holds (a[0]-a[1])*(a[1]-a[0])+(a[0]*a[0])+(a[1]*a[1])
+ * r[10] holds (a[0]*a[0])
+ * r[32] holds (a[1]*a[1])
+ * c1 holds the carry bits
+ */
c1 += (int)(bn_add_words(&(r[n]), &(r[n]), &(t[n2]), n2));
if (c1) {
p = &(r[n + n2]);
diff --git a/crypto/bn/bn_sqrt.c b/crypto/bn/bn_sqrt.c
index 772c8080bb..232af99a21 100644
--- a/crypto/bn/bn_sqrt.c
+++ b/crypto/bn/bn_sqrt.c
@@ -132,14 +132,14 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
/* we'll set q later (if needed) */
if (e == 1) {
- /*-
- * The easy case: (|p|-1)/2 is odd, so 2 has an inverse
- * modulo (|p|-1)/2, and square roots can be computed
- * directly by modular exponentiation.
- * We have
- * 2 * (|p|+1)/4 == 1 (mod (|p|-1)/2),
- * so we can use exponent (|p|+1)/4, i.e. (|p|-3)/4 + 1.
- */
+ /*-
+ * The easy case: (|p|-1)/2 is odd, so 2 has an inverse
+ * modulo (|p|-1)/2, and square roots can be computed
+ * directly by modular exponentiation.
+ * We have
+ * 2 * (|p|+1)/4 == 1 (mod (|p|-1)/2),
+ * so we can use exponent (|p|+1)/4, i.e. (|p|-3)/4 + 1.
+ */
if (!BN_rshift(q, p, 2))
goto end;
q->neg = 0;
@@ -152,32 +152,32 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
}
if (e == 2) {
- /*-
- * |p| == 5 (mod 8)
- *
- * In this case 2 is always a non-square since
- * Legendre(2,p) = (-1)^((p^2-1)/8) for any odd prime.
- * So if a really is a square, then 2*a is a non-square.
- * Thus for
- * b := (2*a)^((|p|-5)/8),
- * i := (2*a)*b^2
- * we have
- * i^2 = (2*a)^((1 + (|p|-5)/4)*2)
- * = (2*a)^((p-1)/2)
- * = -1;
- * so if we set
- * x := a*b*(i-1),
- * then
- * x^2 = a^2 * b^2 * (i^2 - 2*i + 1)
- * = a^2 * b^2 * (-2*i)
- * = a*(-i)*(2*a*b^2)
- * = a*(-i)*i
- * = a.
- *
- * (This is due to A.O.L. Atkin,
- * <URL: http://listserv.nodak.edu/scripts/wa.exe?A2=ind9211&L=nmbrthry&O=T&P=562>,
- * November 1992.)
- */
+ /*-
+ * |p| == 5 (mod 8)
+ *
+ * In this case 2 is always a non-square since
+ * Legendre(2,p) = (-1)^((p^2-1)/8) for any odd prime.
+ * So if a really is a square, then 2*a is a non-square.
+ * Thus for
+ * b := (2*a)^((|p|-5)/8),
+ * i := (2*a)*b^2
+ * we have
+ * i^2 = (2*a)^((1 + (|p|-5)/4)*2)
+ * = (2*a)^((p-1)/2)
+ * = -1;
+ * so if we set
+ * x := a*b*(i-1),
+ * then
+ * x^2 = a^2 * b^2 * (i^2 - 2*i + 1)
+ * = a^2 * b^2 * (-2*i)
+ * = a*(-i)*(2*a*b^2)
+ * = a*(-i)*i
+ * = a.
+ *
+ * (This is due to A.O.L. Atkin,
+ * <URL: http://listserv.nodak.edu/scripts/wa.exe?A2=ind9211&L=nmbrthry&O=T&P=562>,
+ * November 1992.)
+ */
/* t := 2*a */
if (!BN_mod_lshift1_quick(t, A, p))
@@ -277,24 +277,24 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
goto end;
}
- /*-
- * Now we know that (if p is indeed prime) there is an integer
- * k, 0 <= k < 2^e, such that
- *
- * a^q * y^k == 1 (mod p).
- *
- * As a^q is a square and y is not, k must be even.
- * q+1 is even, too, so there is an element
- *
- * X := a^((q+1)/2) * y^(k/2),
- *
- * and it satisfies
- *
- * X^2 = a^q * a * y^k
- * = a,
- *
- * so it is the square root that we are looking for.
- */
+ /*-
+ * Now we know that (if p is indeed prime) there is an integer
+ * k, 0 <= k < 2^e, such that
+ *
+ * a^q * y^k == 1 (mod p).
+ *
+ * As a^q is a square and y is not, k must be even.
+ * q+1 is even, too, so there is an element
+ *
+ * X := a^((q+1)/2) * y^(k/2),
+ *
+ * and it satisfies
+ *
+ * X^2 = a^q * a * y^k
+ * = a,
+ *
+ * so it is the square root that we are looking for.
+ */
/* t := (q-1)/2 (note that q is odd) */
if (!BN_rshift1(t, q))
@@ -333,15 +333,15 @@ BIGNUM *BN_mod_sqrt(BIGNUM *in, const BIGNUM *a, const BIGNUM *p, BN_CTX *ctx)
goto end;
while (1) {
- /*-
- * Now b is a^q * y^k for some even k (0 <= k < 2^E
- * where E refers to the original value of e, which we
- * don't keep in a variable), and x is a^((q+1)/2) * y^(k/2).
- *
- * We have a*b = x^2,
- * y^2^(e-1) = -1,
- * b^2^(e-1) = 1.
- */
+ /*-
+ * Now b is a^q * y^k for some even k (0 <= k < 2^E
+ * where E refers to the original value of e, which we
+ * don't keep in a variable), and x is a^((q+1)/2) * y^(k/2).
+ *
+ * We have a*b = x^2,
+ * y^2^(e-1) = -1,
+ * b^2^(e-1) = 1.
+ */
if (BN_is_one(b)) {
if (!BN_copy(ret, x))
diff --git a/crypto/cast/casttest.c b/crypto/cast/casttest.c
index b3bd85c520..dc31bc6604 100644
--- a/crypto/cast/casttest.c
+++ b/crypto/cast/casttest.c
@@ -134,9 +134,9 @@ static unsigned char cfb_cipher64[CFB_TEST_SIZE] = {
0x59, 0xD8, 0xE2, 0x65, 0x00, 0x58, 0x6C, 0x3F,
0x2C, 0x17, 0x25, 0xD0, 0x1A, 0x38, 0xB7, 0x2A,
0x39, 0x61, 0x37, 0xDC, 0x79, 0xFB, 0x9F, 0x45
-/*- 0xF9,0x78,0x32,0xB5,0x42,0x1A,0x6B,0x38,
- 0x9A,0x44,0xD6,0x04,0x19,0x43,0xC4,0xD9,
- 0x3D,0x1E,0xAE,0x47,0xFC,0xCF,0x29,0x0B,*/
+/*- 0xF9,0x78,0x32,0xB5,0x42,0x1A,0x6B,0x38,
+ 0x9A,0x44,0xD6,0x04,0x19,0x43,0xC4,0xD9,
+ 0x3D,0x1E,0xAE,0x47,0xFC,0xCF,0x29,0x0B,*/
};
# endif
diff --git a/crypto/conf/conf_def.c b/crypto/conf/conf_def.c
index 30a1992150..e3ffeb21ef 100644
--- a/crypto/conf/conf_def.c
+++ b/crypto/conf/conf_def.c
@@ -567,16 +567,16 @@ static int str_copy(CONF *conf, char *section, char **pto, char *from)
}
e++;
}
- /*-
- * So at this point we have
- * np which is the start of the name string which is
- * '\0' terminated.
- * cp which is the start of the section string which is
- * '\0' terminated.
- * e is the 'next point after'.
- * r and rr are the chars replaced by the '\0'
- * rp and rrp is where 'r' and 'rr' came from.
- */
+ /*-
+ * So at this point we have
+ * np which is the start of the name string which is
+ * '\0' terminated.
+ * cp which is the start of the section string which is
+ * '\0' terminated.
+ * e is the 'next point after'.
+ * r and rr are the chars replaced by the '\0'
+ * rp and rrp is where 'r' and 'rr' came from.
+ */
p = _CONF_get_string(conf, cp, np);
if (rrp != NULL)
*rrp = rr;
diff --git a/crypto/des/des.c b/crypto/des/des.c
index 402695160d..2bff281258 100644
--- a/crypto/des/des.c
+++ b/crypto/des/des.c
@@ -228,14 +228,14 @@ int main(int argc, char **argv)
}
if (error)
usage();
- /*-
- * We either
- * do checksum or
- * do encrypt or
- * do decrypt or
- * do decrypt then ckecksum or
- * do checksum then encrypt
- */
+ /*-
+ * We either
+ * do checksum or
+ * do encrypt or
+ * do decrypt or
+ * do decrypt then ckecksum or
+ * do checksum then encrypt
+ */
if (((eflag + dflag) == 1) || cflag) {
if (eflag)
do_encrypt = DES_ENCRYPT;
diff --git a/crypto/des/destest.c b/crypto/des/destest.c
index 994eeefd53..c6be342038 100644
--- a/crypto/des/destest.c
+++ b/crypto/des/destest.c
@@ -404,13 +404,13 @@ int main(int argc, char *argv[])
DES_ENCRYPT);
DES_ede3_cbcm_encrypt(&cbc_data[16], &cbc_out[16], i - 16, &ks, &ks2,
&ks3, &iv3, &iv2, DES_ENCRYPT);
- /*- if (memcmp(cbc_out,cbc3_ok,
- (unsigned int)(strlen((char *)cbc_data)+1+7)/8*8) != 0)
- {
- printf("des_ede3_cbc_encrypt encrypt error\n");
- err=1;
- }
- */
+/*- if (memcmp(cbc_out,cbc3_ok,
+ (unsigned int)(strlen((char *)cbc_data)+1+7)/8*8) != 0)
+ {
+ printf("des_ede3_cbc_encrypt encrypt error\n");
+ err=1;
+ }
+*/
memcpy(iv3, cbc_iv, sizeof(cbc_iv));
memset(iv2, '\0', sizeof iv2);
DES_ede3_cbcm_encrypt(cbc_out, cbc_in, i, &ks, &ks2, &ks3, &iv3, &iv2,
diff --git a/crypto/des/enc_read.c b/crypto/des/enc_read.c
index ed44abcaea..fcb66541bd 100644
--- a/crypto/des/enc_read.c
+++ b/crypto/des/enc_read.c
@@ -205,12 +205,12 @@ int DES_enc_read(int fd, void *buf, int len, DES_key_schedule *sched,
*/
num = len;
} else {
- /*-
- * >output is a multiple of 8 byes, if len < rnum
- * >we must be careful. The user must be aware that this
- * >routine will write more bytes than he asked for.
- * >The length of the buffer must be correct.
- * FIXED - Should be ok now 18-9-90 - eay */
+ /*-
+ * >output is a multiple of 8 byes, if len < rnum
+ * >we must be careful. The user must be aware that this
+ * >routine will write more bytes than he asked for.
+ * >The length of the buffer must be correct.
+ * FIXED - Should be ok now 18-9-90 - eay */
if (len < rnum) {
if (DES_rw_mode & DES_PCBC_MODE)
diff --git a/crypto/des/ofb64ede.c b/crypto/des/ofb64ede.c
index 03399bc297..45c67505a6 100644
--- a/crypto/des/ofb64ede.c
+++ b/crypto/des/ofb64ede.c
@@ -102,8 +102,8 @@ void DES_ede3_ofb64_encrypt(register const unsigned char *in,
n = (n + 1) & 0x07;
}
if (save) {
-/*- v0=ti[0];
- v1=ti[1];*/
+/*- v0=ti[0];
+ v1=ti[1];*/
iv = &(*ivec)[0];
l2c(v0, iv);
l2c(v1, iv);
diff --git a/crypto/dsa/dsa_ameth.c b/crypto/dsa/dsa_ameth.c
index 3fa64328f6..61a73b34c6 100644
--- a/crypto/dsa/dsa_ameth.c
+++ b/crypto/dsa/dsa_ameth.c
@@ -200,11 +200,11 @@ static int dsa_priv_decode(EVP_PKEY *pkey, PKCS8_PRIV_KEY_INFO *p8)
goto decerr;
if (sk_ASN1_TYPE_num(ndsa) != 2)
goto decerr;
- /*-
- * Handle Two broken types:
- * SEQUENCE {parameters, priv_key}
- * SEQUENCE {pub_key, priv_key}
- */
+ /*-
+ * Handle Two broken types:
+ * SEQUENCE {parameters, priv_key}
+ * SEQUENCE {pub_key, priv_key}
+ */
t1 = sk_ASN1_TYPE_value(ndsa, 0);
t2 = sk_ASN1_TYPE_value(ndsa, 1);
diff --git a/crypto/dso/dso_vms.c b/crypto/dso/dso_vms.c
index 511858a681..14d885df15 100644
--- a/crypto/dso/dso_vms.c
+++ b/crypto/dso/dso_vms.c
@@ -178,23 +178,23 @@ static int vms_load(DSO *dso)
goto err;
}
- /*-
- * A file specification may look like this:
- *
- * node::dev:[dir-spec]name.type;ver
- *
- * or (for compatibility with TOPS-20):
- *
- * node::dev:<dir-spec>name.type;ver
- *
- * and the dir-spec uses '.' as separator. Also, a dir-spec
- * may consist of several parts, with mixed use of [] and <>:
- *
- * [dir1.]<dir2>
- *
- * We need to split the file specification into the name and
- * the rest (both before and after the name itself).
- */
+ /*-
+ * A file specification may look like this:
+ *
+ * node::dev:[dir-spec]name.type;ver
+ *
+ * or (for compatibility with TOPS-20):
+ *
+ * node::dev:<dir-spec>name.type;ver
+ *
+ * and the dir-spec uses '.' as separator. Also, a dir-spec
+ * may consist of several parts, with mixed use of [] and <>:
+ *
+ * [dir1.]<dir2>
+ *
+ * We need to split the file specification into the name and
+ * the rest (both before and after the name itself).
+ */
/*
* Start with trying to find the end of a dir-spec, and save the position
* of the byte after in sp1
diff --git a/crypto/ec/ec.h b/crypto/ec/ec.h
index 390d973850..c4e7aea938 100644
--- a/crypto/ec/ec.h
+++ b/crypto/ec/ec.h
@@ -116,14 +116,14 @@ typedef enum {
typedef struct ec_method_st EC_METHOD;
typedef struct ec_group_st
- /*-
- EC_METHOD *meth;
- -- field definition
- -- curve coefficients
- -- optional generator with associated information (order, cofactor)
- -- optional extra data (precomputed table for fast computation of multiples of generator)
- -- ASN1 stuff
- */
+ /*-
+ EC_METHOD *meth;
+ -- field definition
+ -- curve coefficients
+ -- optional generator with associated information (order, cofactor)
+ -- optional extra data (precomputed table for fast computation of multiples of generator)
+ -- ASN1 stuff
+ */
EC_GROUP;
typedef struct ec_point_st EC_POINT;
diff --git a/crypto/ec/ec2_smpl.c b/crypto/ec/ec2_smpl.c
index 9a39477f30..077c7fc8dd 100644
--- a/crypto/ec/ec2_smpl.c
+++ b/crypto/ec/ec2_smpl.c
@@ -632,12 +632,12 @@ int ec_GF2m_simple_is_on_curve(const EC_GROUP *group, const EC_POINT *point,
if (lh == NULL)
goto err;
- /*-
- * We have a curve defined by a Weierstrass equation
- * y^2 + x*y = x^3 + a*x^2 + b.
- * <=> x^3 + a*x^2 + x*y + b + y^2 = 0
- * <=> ((x + a) * x + y ) * x + b + y^2 = 0
- */
+ /*-
+ * We have a curve defined by a Weierstrass equation
+ * y^2 + x*y = x^3 + a*x^2 + b.
+ * <=> x^3 + a*x^2 + x*y + b + y^2 = 0
+ * <=> ((x + a) * x + y ) * x + b + y^2 = 0
+ */
if (!BN_GF2m_add(lh, &point->X, &group->a))
goto err;
if (!field_mul(group, lh, lh, &point->X, ctx))
diff --git a/crypto/ec/ec_lcl.h b/crypto/ec/ec_lcl.h
index fb6a43e6ad..319e651f67 100644
--- a/crypto/ec/ec_lcl.h
+++ b/crypto/ec/ec_lcl.h
@@ -120,14 +120,14 @@ struct ec_method_st {
void (*point_finish) (EC_POINT *);
void (*point_clear_finish) (EC_POINT *);
int (*point_copy) (EC_POINT *, const EC_POINT *);
- /*-
- * used by EC_POINT_set_to_infinity,
- * EC_POINT_set_Jprojective_coordinates_GFp,
- * EC_POINT_get_Jprojective_coordinates_GFp,
- * EC_POINT_set_affine_coordinates_GFp, ..._GF2m,
- * EC_POINT_get_affine_coordinates_GFp, ..._GF2m,
- * EC_POINT_set_compressed_coordinates_GFp, ..._GF2m:
- */
+ /*-
+ * used by EC_POINT_set_to_infinity,
+ * EC_POINT_set_Jprojective_coordinates_GFp,
+ * EC_POINT_get_Jprojective_coordinates_GFp,
+ * EC_POINT_set_affine_coordinates_GFp, ..._GF2m,
+ * EC_POINT_get_affine_coordinates_GFp, ..._GF2m,
+ * EC_POINT_set_compressed_coordinates_GFp, ..._GF2m:
+ */
int (*point_set_to_infinity) (const EC_GROUP *, EC_POINT *);
int (*point_set_Jprojective_coordinates_GFp) (const EC_GROUP *,
EC_POINT *, const BIGNUM *x,
diff --git a/crypto/ec/ec_mult.c b/crypto/ec/ec_mult.c
index 807641a0f4..23b8c3089b 100644
--- a/crypto/ec/ec_mult.c
+++ b/crypto/ec/ec_mult.c
@@ -602,13 +602,13 @@ int ec_wNAF_mul(const EC_GROUP *group, EC_POINT *r, const BIGNUM *scalar,
if (!(tmp = EC_POINT_new(group)))
goto err;
- /*-
- * prepare precomputed values:
- * val_sub[i][0] := points[i]
- * val_sub[i][1] := 3 * points[i]
- * val_sub[i][2] := 5 * points[i]
- * ...
- */
+ /*-
+ * prepare precomputed values:
+ * val_sub[i][0] := points[i]
+ * val_sub[i][1] := 3 * points[i]
+ * val_sub[i][2] := 5 * points[i]
+ * ...
+ */
for (i = 0; i < num + num_scalar; i++) {
if (i < num) {
if (!EC_POINT_copy(val_sub[i][0], points[i]))
diff --git a/crypto/ec/ecp_nistp224.c b/crypto/ec/ecp_nistp224.c
index ece7b75400..9a59ef0c19 100644
--- a/crypto/ec/ecp_nistp224.c
+++ b/crypto/ec/ecp_nistp224.c
@@ -618,11 +618,11 @@ static void felem_reduce(felem out, const widefelem in)
/* output[3] <= 2^56 + 2^16 */
out[2] = output[2] & 0x00ffffffffffffff;
- /*-
- * out[0] < 2^56, out[1] < 2^56, out[2] < 2^56,
- * out[3] <= 2^56 + 2^16 (due to final carry),
- * so out < 2*p
- */
+ /*-
+ * out[0] < 2^56, out[1] < 2^56, out[2] < 2^56,
+ * out[3] <= 2^56 + 2^16 (due to final carry),
+ * so out < 2*p
+ */
out[3] = output[3];
}
@@ -1048,10 +1048,10 @@ static void point_add(felem x3, felem y3, felem z3,
felem_scalar(ftmp5, 2);
/* ftmp5[i] < 2 * 2^57 = 2^58 */
- /*-
- * x_out = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 -
- * 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2
- */
+ /*-
+ * x_out = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 -
+ * 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2
+ */
felem_diff_128_64(tmp2, ftmp5);
/* tmp2[i] < 2^117 + 2^64 + 8 < 2^118 */
felem_reduce(x_out, tmp2);
@@ -1066,10 +1066,10 @@ static void point_add(felem x3, felem y3, felem z3,
felem_mul(tmp2, ftmp3, ftmp2);
/* tmp2[i] < 4 * 2^57 * 2^59 = 2^118 */
- /*-
- * y_out = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out) -
- * z2^3*y1*(z1^2*x2 - z2^2*x1)^3
- */
+ /*-
+ * y_out = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out) -
+ * z2^3*y1*(z1^2*x2 - z2^2*x1)^3
+ */
widefelem_diff(tmp2, tmp);
/* tmp2[i] < 2^118 + 2^120 < 2^121 */
felem_reduce(y_out, tmp2);
diff --git a/crypto/ec/ecp_nistp256.c b/crypto/ec/ecp_nistp256.c
index ea63c10f49..a5887086c6 100644
--- a/crypto/ec/ecp_nistp256.c
+++ b/crypto/ec/ecp_nistp256.c
@@ -437,25 +437,25 @@ static void felem_shrink(smallfelem out, const felem in)
/* As tmp[3] < 2^65, high is either 1 or 0 */
high <<= 63;
high >>= 63;
- /*-
- * high is:
- * all ones if the high word of tmp[3] is 1
- * all zeros if the high word of tmp[3] if 0 */
+ /*-
+ * high is:
+ * all ones if the high word of tmp[3] is 1
+ * all zeros if the high word of tmp[3] if 0 */
low = tmp[3];
mask = low >> 63;
- /*-
- * mask is:
- * all ones if the MSB of low is 1
- * all zeros if the MSB of low if 0 */
+ /*-
+ * mask is:
+ * all ones if the MSB of low is 1
+ * all zeros if the MSB of low if 0 */
low &= bottom63bits;
low -= kPrime3Test;
/* if low was greater than kPrime3Test then the MSB is zero */
low = ~low;
low >>= 63;
- /*-
- * low is:
- * all ones if low was > kPrime3Test
- * all zeros if low was <= kPrime3Test */
+ /*-
+ * low is:
+ * all ones if low was > kPrime3Test
+ * all zeros if low was <= kPrime3Test */
mask = (mask & low) | high;
tmp[0] -= mask & kPrime[0];
tmp[1] -= mask & kPrime[1];
@@ -795,17 +795,17 @@ static void felem_reduce(felem out, const longfelem in)
felem_reduce_(out, in);
- /*-
- * out[0] > 2^100 - 2^36 - 2^4 - 3*2^64 - 3*2^96 - 2^64 - 2^96 > 0
- * out[1] > 2^100 - 2^64 - 7*2^96 > 0
- * out[2] > 2^100 - 2^36 + 2^4 - 5*2^64 - 5*2^96 > 0
- * out[3] > 2^100 - 2^36 + 2^4 - 7*2^64 - 5*2^96 - 3*2^96 > 0
- *
- * out[0] < 2^100 + 2^64 + 7*2^64 + 5*2^96 < 2^101
- * out[1] < 2^100 + 3*2^64 + 5*2^64 + 3*2^97 < 2^101
- * out[2] < 2^100 + 5*2^64 + 2^64 + 3*2^65 + 2^97 < 2^101
- * out[3] < 2^100 + 7*2^64 + 7*2^96 + 3*2^64 < 2^101
- */
+ /*-
+ * out[0] > 2^100 - 2^36 - 2^4 - 3*2^64 - 3*2^96 - 2^64 - 2^96 > 0
+ * out[1] > 2^100 - 2^64 - 7*2^96 > 0
+ * out[2] > 2^100 - 2^36 + 2^4 - 5*2^64 - 5*2^96 > 0
+ * out[3] > 2^100 - 2^36 + 2^4 - 7*2^64 - 5*2^96 - 3*2^96 > 0
+ *
+ * out[0] < 2^100 + 2^64 + 7*2^64 + 5*2^96 < 2^101
+ * out[1] < 2^100 + 3*2^64 + 5*2^64 + 3*2^97 < 2^101
+ * out[2] < 2^100 + 5*2^64 + 2^64 + 3*2^65 + 2^97 < 2^101
+ * out[3] < 2^100 + 7*2^64 + 7*2^96 + 3*2^64 < 2^101
+ */
}
/*-
@@ -824,17 +824,17 @@ static void felem_reduce_zero105(felem out, const longfelem in)
felem_reduce_(out, in);
- /*-
- * out[0] > 2^105 - 2^41 - 2^9 - 2^71 - 2^103 - 2^71 - 2^103 > 0
- * out[1] > 2^105 - 2^71 - 2^103 > 0
- * out[2] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 > 0
- * out[3] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 - 2^103 > 0
- *
- * out[0] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
- * out[1] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
- * out[2] < 2^105 + 2^71 + 2^71 + 2^71 + 2^103 < 2^106
- * out[3] < 2^105 + 2^71 + 2^103 + 2^71 < 2^106
- */
+ /*-
+ * out[0] > 2^105 - 2^41 - 2^9 - 2^71 - 2^103 - 2^71 - 2^103 > 0
+ * out[1] > 2^105 - 2^71 - 2^103 > 0
+ * out[2] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 > 0
+ * out[3] > 2^105 - 2^41 + 2^9 - 2^71 - 2^103 - 2^103 > 0
+ *
+ * out[0] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
+ * out[1] < 2^105 + 2^71 + 2^71 + 2^103 < 2^106
+ * out[2] < 2^105 + 2^71 + 2^71 + 2^71 + 2^103 < 2^106
+ * out[3] < 2^105 + 2^71 + 2^103 + 2^71 < 2^106
+ */
}
/*
@@ -1099,7 +1099,8 @@ static void smallfelem_inv_contract(smallfelem out, const smallfelem in)
*
* Building on top of the field operations we have the operations on the
* elliptic curve group itself. Points on the curve are represented in Jacobian
- * coordinates */
+ * coordinates
+ */
/*-
* point_double calculates 2*(x_in, y_in, z_in)
@@ -1108,7 +1109,8 @@ static void smallfelem_inv_contract(smallfelem out, const smallfelem in)
* http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
*
* Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed.
- * while x_out == y_in is not (maybe this works, but it's not tested). */
+ * while x_out == y_in is not (maybe this works, but it's not tested).
+ */
static void
point_double(felem x_out, felem y_out, felem z_out,
const felem x_in, const felem y_in, const felem z_in)
@@ -1239,7 +1241,8 @@ static void copy_small_conditional(felem out, const smallfelem in, limb mask)
* This function includes a branch for checking whether the two input points
* are equal, (while not equal to the point at infinity). This case never
* happens during single point multiplication, so there is no timing leak for
- * ECDH or ECDSA signing. */
+ * ECDH or ECDSA signing.
+ */
static void point_add(felem x3, felem y3, felem z3,
const felem x1, const felem y1, const felem z1,
const int mixed, const smallfelem x2,
diff --git a/crypto/ec/ecp_nistp521.c b/crypto/ec/ecp_nistp521.c
index c1ef3fedac..cc7634512d 100644
--- a/crypto/ec/ecp_nistp521.c
+++ b/crypto/ec/ecp_nistp521.c
@@ -414,15 +414,16 @@ static void felem_square(largefelem out, const felem in)
felem_scalar(inx2, in, 2);
felem_scalar(inx4, in, 4);
- /*-
- * We have many cases were we want to do
- * in[x] * in[y] +
- * in[y] * in[x]
- * This is obviously just
- * 2 * in[x] * in[y]
- * However, rather than do the doubling on the 128 bit result, we
- * double one of the inputs to the multiplication by reading from
- * |inx2| */
+ /*-
+ * We have many cases were we want to do
+ * in[x] * in[y] +
+ * in[y] * in[x]
+ * This is obviously just
+ * 2 * in[x] * in[y]
+ * However, rather than do the doubling on the 128 bit result, we
+ * double one of the inputs to the multiplication by reading from
+ * |inx2|
+ */
out[0] = ((uint128_t) in[0]) * in[0];
out[1] = ((uint128_t) in[0]) * inx2[1];
@@ -610,10 +611,10 @@ static void felem_reduce(felem out, const largefelem in)
out[1] += ((limb) in[0]) >> 58;
out[1] += (((limb) (in[0] >> 64)) & bottom52bits) << 6;
- /*-
- * out[1] < 2^58 + 2^6 + 2^58
- * = 2^59 + 2^6
- */
+ /*-
+ * out[1] < 2^58 + 2^6 + 2^58
+ * = 2^59 + 2^6
+ */
out[2] += ((limb) (in[0] >> 64)) >> 52;
out[2] += ((limb) in[1]) >> 58;
@@ -642,10 +643,10 @@ static void felem_reduce(felem out, const largefelem in)
out[8] += ((limb) in[7]) >> 58;
out[8] += (((limb) (in[7] >> 64)) & bottom52bits) << 6;
- /*-
- * out[x > 1] < 2^58 + 2^6 + 2^58 + 2^12
- * < 2^59 + 2^13
- */
+ /*-
+ * out[x > 1] < 2^58 + 2^6 + 2^58 + 2^12
+ * < 2^59 + 2^13
+ */
overflow1 = ((limb) (in[7] >> 64)) >> 52;
overflow1 += ((limb) in[8]) >> 58;
@@ -660,11 +661,11 @@ static void felem_reduce(felem out, const largefelem in)
out[1] += out[0] >> 58;
out[0] &= bottom58bits;
- /*-
- * out[0] < 2^58
- * out[1] < 2^59 + 2^6 + 2^13 + 2^2
- * < 2^59 + 2^14
- */
+ /*-
+ * out[0] < 2^58
+ * out[1] < 2^59 + 2^6 + 2^13 + 2^2
+ * < 2^59 + 2^14
+ */
}
static void felem_square_reduce(felem out, const felem in)
@@ -1055,13 +1056,13 @@ point_double(felem x_out, felem y_out, felem z_out,
felem_scalar64(ftmp2, 3);
/* ftmp2[i] < 3*2^60 + 3*2^15 */
felem_mul(tmp, ftmp, ftmp2);
- /*-
- * tmp[i] < 17(3*2^121 + 3*2^76)
- * = 61*2^121 + 61*2^76
- * < 64*2^121 + 64*2^76
- * = 2^127 + 2^82
- * < 2^128
- */
+ /*-
+ * tmp[i] < 17(3*2^121 + 3*2^76)
+ * = 61*2^121 + 61*2^76
+ * < 64*2^121 + 64*2^76
+ * = 2^127 + 2^82
+ * < 2^128
+ */
felem_reduce(alpha, tmp);
/* x' = alpha^2 - 8*beta */
@@ -1096,30 +1097,30 @@ point_double(felem x_out, felem y_out, felem z_out,
felem_diff64(beta, x_out);
/* beta[i] < 2^61 + 2^60 + 2^16 */
felem_mul(tmp, alpha, beta);
- /*-
- * tmp[i] < 17*((2^59 + 2^14)(2^61 + 2^60 + 2^16))
- * = 17*(2^120 + 2^75 + 2^119 + 2^74 + 2^75 + 2^30)
- * = 17*(2^120 + 2^119 + 2^76 + 2^74 + 2^30)
- * < 2^128
- */
+ /*-
+ * tmp[i] < 17*((2^59 + 2^14)(2^61 + 2^60 + 2^16))
+ * = 17*(2^120 + 2^75 + 2^119 + 2^74 + 2^75 + 2^30)
+ * = 17*(2^120 + 2^119 + 2^76 + 2^74 + 2^30)
+ * < 2^128
+ */
felem_square(tmp2, gamma);
- /*-
- * tmp2[i] < 17*(2^59 + 2^14)^2
- * = 17*(2^118 + 2^74 + 2^28)
- */
+ /*-
+ * tmp2[i] < 17*(2^59 + 2^14)^2
+ * = 17*(2^118 + 2^74 + 2^28)
+ */
felem_scalar128(tmp2, 8);
- /*-
- * tmp2[i] < 8*17*(2^118 + 2^74 + 2^28)
- * = 2^125 + 2^121 + 2^81 + 2^77 + 2^35 + 2^31
- * < 2^126
- */
+ /*-
+ * tmp2[i] < 8*17*(2^118 + 2^74 + 2^28)
+ * = 2^125 + 2^121 + 2^81 + 2^77 + 2^35 + 2^31
+ * < 2^126
+ */
felem_diff128(tmp, tmp2);
- /*-
- * tmp[i] < 2^127 - 2^69 + 17(2^120 + 2^119 + 2^76 + 2^74 + 2^30)
- * = 2^127 + 2^124 + 2^122 + 2^120 + 2^118 + 2^80 + 2^78 + 2^76 +
- * 2^74 + 2^69 + 2^34 + 2^30
- * < 2^128
- */
+ /*-
+ * tmp[i] < 2^127 - 2^69 + 17(2^120 + 2^119 + 2^76 + 2^74 + 2^30)
+ * = 2^127 + 2^124 + 2^122 + 2^120 + 2^118 + 2^80 + 2^78 + 2^76 +
+ * 2^74 + 2^69 + 2^34 + 2^30
+ * < 2^128
+ */
felem_reduce(y_out, tmp);
}
@@ -1281,11 +1282,11 @@ static void point_add(felem x3, felem y3, felem z3,
felem_scalar128(tmp2, 2);
/* tmp2[i] < 17*2^121 */
felem_diff128(tmp, tmp2);
- /*-
- * tmp[i] < 2^127 - 2^69 + 17*2^122
- * = 2^126 - 2^122 - 2^6 - 2^2 - 1
- * < 2^127
- */
+ /*-
+ * tmp[i] < 2^127 - 2^69 + 17*2^122
+ * = 2^126 - 2^122 - 2^6 - 2^2 - 1
+ * < 2^127
+ */
felem_reduce(y_out, tmp);
copy_conditional(x_out, x2, z1_is_zero);
diff --git a/crypto/ec/ecp_oct.c b/crypto/ec/ecp_oct.c
index 77627bb5c4..e5cec8be82 100644
--- a/crypto/ec/ecp_oct.c
+++ b/crypto/ec/ecp_oct.c
@@ -96,11 +96,11 @@ int ec_GFp_simple_set_compressed_coordinates(const EC_GROUP *group,
if (y == NULL)
goto err;
- /*-
- * Recover y. We have a Weierstrass equation
- * y^2 = x^3 + a*x + b,
- * so y is one of the square roots of x^3 + a*x + b.
- */
+ /*-
+ * Recover y. We have a Weierstrass equation
+ * y^2 = x^3 + a*x + b,
+ * so y is one of the square roots of x^3 + a*x + b.
+ */
/* tmp1 := x^3 */
if (!BN_nnmod(x, x_, &group->field, ctx))
diff --git a/crypto/ec/ecp_smpl.c b/crypto/ec/ecp_smpl.c
index d196dedfb3..2b848216d7 100644
--- a/crypto/ec/ecp_smpl.c
+++ b/crypto/ec/ecp_smpl.c
@@ -320,11 +320,11 @@ int ec_GFp_simple_group_check_discriminant(const EC_GROUP *group, BN_CTX *ctx)
goto err;
}
- /*-
- * check the discriminant:
- * y^2 = x^3 + a*x + b is an elliptic curve <=> 4*a^3 + 27*b^2 != 0 (mod p)
- * 0 =< a, b < p
- */
+ /*-
+ * check the discriminant:
+ * y^2 = x^3 + a*x + b is an elliptic curve <=> 4*a^3 + 27*b^2 != 0 (mod p)
+ * 0 =< a, b < p
+ */
if (BN_is_zero(a)) {
if (BN_is_zero(b))
goto err;
@@ -900,10 +900,10 @@ int ec_GFp_simple_dbl(const EC_GROUP *group, EC_POINT *r, const EC_POINT *a,
goto err;
if (!BN_mod_add_quick(n1, n0, n1, p))
goto err;
- /*-
- * n1 = 3 * (X_a + Z_a^2) * (X_a - Z_a^2)
- * = 3 * X_a^2 - 3 * Z_a^4
- */
+ /*-
+ * n1 = 3 * (X_a + Z_a^2) * (X_a - Z_a^2)
+ * = 3 * X_a^2 - 3 * Z_a^4
+ */
} else {
if (!field_sqr(group, n0, &a->X, ctx))
goto err;
@@ -1024,15 +1024,15 @@ int ec_GFp_simple_is_on_curve(const EC_GROUP *group, const EC_POINT *point,
if (Z6 == NULL)
goto err;
- /*-
- * We have a curve defined by a Weierstrass equation
- * y^2 = x^3 + a*x + b.
- * The point to consider is given in Jacobian projective coordinates
- * where (X, Y, Z) represents (x, y) = (X/Z^2, Y/Z^3).
- * Substituting this and multiplying by Z^6 transforms the above equation into
- * Y^2 = X^3 + a*X*Z^4 + b*Z^6.
- * To test this, we add up the right-hand side in 'rh'.
- */
+ /*-
+ * We have a curve defined by a Weierstrass equation
+ * y^2 = x^3 + a*x + b.
+ * The point to consider is given in Jacobian projective coordinates
+ * where (X, Y, Z) represents (x, y) = (X/Z^2, Y/Z^3).
+ * Substituting this and multiplying by Z^6 transforms the above equation into
+ * Y^2 = X^3 + a*X*Z^4 + b*Z^6.
+ * To test this, we add up the right-hand side in 'rh'.
+ */
/* rh := X^2 */
if (!field_sqr(group, rh, &point->X, ctx))
@@ -1099,12 +1099,12 @@ int ec_GFp_simple_is_on_curve(const EC_GROUP *group, const EC_POINT *point,
int ec_GFp_simple_cmp(const EC_GROUP *group, const EC_POINT *a,
const EC_POINT *b, BN_CTX *ctx)
{
- /*-
- * return values:
- * -1 error
- * 0 equal (in affine coordinates)
- * 1 not equal
- */
+ /*-
+ * return values:
+ * -1 error
+ * 0 equal (in affine coordinates)
+ * 1 not equal
+ */
int (*field_mul) (const EC_GROUP *, BIGNUM *, const BIGNUM *,
const BIGNUM *, BN_CTX *);
@@ -1143,12 +1143,12 @@ int ec_GFp_simple_cmp(const EC_GROUP *group, const EC_POINT *a,
if (Zb23 == NULL)
goto end;
- /*-
- * We have to decide whether
- * (X_a/Z_a^2, Y_a/Z_a^3) = (X_b/Z_b^2, Y_b/Z_b^3),
- * or equivalently, whether
- * (X_a*Z_b^2, Y_a*Z_b^3) = (X_b*Z_a^2, Y_b*Z_a^3).
- */
+ /*-
+ * We have to decide whether
+ * (X_a/Z_a^2, Y_a/Z_a^3) = (X_b/Z_b^2, Y_b/Z_b^3),
+ * or equivalently, whether
+ * (X_a*Z_b^2, Y_a*Z_b^3) = (X_b*Z_a^2, Y_b*Z_a^3).
+ */
if (!b->Z_is_one) {
if (!field_sqr(group, Zb23, &b->Z, ctx))
diff --git a/crypto/idea/ideatest.c b/crypto/idea/ideatest.c
index d137a8f03a..a967dd58a7 100644
--- a/crypto/idea/ideatest.c
+++ b/crypto/idea/ideatest.c
@@ -102,9 +102,9 @@ static unsigned char cfb_cipher64[CFB_TEST_SIZE] = {
0x59, 0xD8, 0xE2, 0x65, 0x00, 0x58, 0x6C, 0x3F,
0x2C, 0x17, 0x25, 0xD0, 0x1A, 0x38, 0xB7, 0x2A,
0x39, 0x61, 0x37, 0xDC, 0x79, 0xFB, 0x9F, 0x45
-/*- 0xF9,0x78,0x32,0xB5,0x42,0x1A,0x6B,0x38,
- 0x9A,0x44,0xD6,0x04,0x19,0x43,0xC4,0xD9,
- 0x3D,0x1E,0xAE,0x47,0xFC,0xCF,0x29,0x0B,*/
+/*- 0xF9,0x78,0x32,0xB5,0x42,0x1A,0x6B,0x38,
+ 0x9A,0x44,0xD6,0x04,0x19,0x43,0xC4,0xD9,
+ 0x3D,0x1E,0xAE,0x47,0xFC,0xCF,0x29,0x0B,*/
};
static int cfb64_test(unsigned char *cfb_cipher);
diff --git a/crypto/lhash/lhash.c b/crypto/lhash/lhash.c
index e33b6fba48..53c5c138bb 100644
--- a/crypto/lhash/lhash.c
+++ b/crypto/lhash/lhash.c
@@ -434,9 +434,9 @@ unsigned long lh_strhash(const char *c)
if ((c == NULL) || (*c == '\0'))
return (ret);
/*-
- unsigned char b[16];
- MD5(c,strlen(c),b);
- return(b[0]|(b[1]<<8)|(b[2]<<16)|(b[3]<<24));
+ unsigned char b[16];
+ MD5(c,strlen(c),b);
+ return(b[0]|(b[1]<<8)|(b[2]<<16)|(b[3]<<24));
*/
n = 0x100;
diff --git a/crypto/o_time.c b/crypto/o_time.c
index 292bf07ee1..e18b71d484 100644
--- a/crypto/o_time.c
+++ b/crypto/o_time.c
@@ -159,30 +159,30 @@ struct tm *OPENSSL_gmtime(const time_t *timer, struct tm *result)
* do it the hard way.
*/
{
- /*-
- * The VMS epoch is the astronomical Smithsonian date,
- if I remember correctly, which is November 17, 1858.
- Furthermore, time is measure in thenths of microseconds
- and stored in quadwords (64 bit integers). unix_epoch
- below is January 1st 1970 expressed as a VMS time. The
- following code was used to get this number:
-
- #include <stdio.h>
- #include <stdlib.h>
- #include <lib$routines.h>
- #include <starlet.h>
-
- main()
- {
- unsigned long systime[2];
- unsigned short epoch_values[7] =
- { 1970, 1, 1, 0, 0, 0, 0 };
-
- lib$cvt_vectim(epoch_values, systime);
-
- printf("%u %u", systime[0], systime[1]);
- }
- */
+ /*-
+ * The VMS epoch is the astronomical Smithsonian date,
+ if I remember correctly, which is November 17, 1858.
+ Furthermore, time is measure in thenths of microseconds
+ and stored in quadwords (64 bit integers). unix_epoch
+ below is January 1st 1970 expressed as a VMS time. The
+ following code was used to get this number:
+
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <lib$routines.h>
+ #include <starlet.h>
+
+ main()
+ {
+ unsigned long systime[2];
+ unsigned short epoch_values[7] =
+ { 1970, 1, 1, 0, 0, 0, 0 };
+
+ lib$cvt_vectim(epoch_values, systime);
+
+ printf("%u %u", systime[0], systime[1]);
+ }
+ */
unsigned long unix_epoch[2] = { 1273708544, 8164711 };
unsigned long deltatime[2];
unsigned long systime[2];
diff --git a/crypto/pem/pem.h b/crypto/pem/pem.h
index 000a6ca72c..a33025809e 100644
--- a/crypto/pem/pem.h
+++ b/crypto/pem/pem.h
@@ -173,8 +173,8 @@ typedef struct pem_ctx_st {
struct {
int cipher;
/*-
- unused, and wrong size
- unsigned char iv[8]; */
+ unused, and wrong size
+ unsigned char iv[8]; */
} DEK_info;
PEM_USER *originator;
@@ -182,9 +182,9 @@ typedef struct pem_ctx_st {
int num_recipient;
PEM_USER **recipient;
- /*-
- XXX(ben): don#t think this is used!
- STACK *x509_chain; / * certificate chain */
+/*-
+ XXX(ben): don#t think this is used!
+ STACK *x509_chain; / * certificate chain */
EVP_MD *md; /* signature type */
int md_enc; /* is the md encrypted or not? */
@@ -194,9 +194,9 @@ typedef struct pem_ctx_st {
EVP_CIPHER *dec; /* date encryption cipher */
int key_len; /* key length */
unsigned char *key; /* key */
- /*-
- unused, and wrong size
- unsigned char iv[8]; */
+ /*-
+ unused, and wrong size
+ unsigned char iv[8]; */
int data_enc; /* is the data encrypted */
int data_len;
diff --git a/crypto/rand/randfile.c b/crypto/rand/randfile.c
index 8abb536d20..9537c56a78 100644
--- a/crypto/rand/randfile.c
+++ b/crypto/rand/randfile.c
@@ -112,10 +112,10 @@ static FILE *(*const vms_fopen)(const char *, const char *, ...) =
int RAND_load_file(const char *file, long bytes)
{
- /*-
- * If bytes >= 0, read up to 'bytes' bytes.
- * if bytes == -1, read complete file.
- */
+ /*-
+ * If bytes >= 0, read up to 'bytes' bytes.
+ * if bytes == -1, read complete file.
+ */
MS_STATIC unsigned char buf[BUFSIZE];
#ifndef OPENSSL_NO_POSIX_IO
diff --git a/crypto/rc2/rc2test.c b/crypto/rc2/rc2test.c
index 8347356312..e61df342ea 100644
--- a/crypto/rc2/rc2test.c
+++ b/crypto/rc2/rc2test.c
@@ -134,9 +134,9 @@ static unsigned char cfb_cipher64[CFB_TEST_SIZE] = {
0x59, 0xD8, 0xE2, 0x65, 0x00, 0x58, 0x6C, 0x3F,
0x2C, 0x17, 0x25, 0xD0, 0x1A, 0x38, 0xB7, 0x2A,
0x39, 0x61, 0x37, 0xDC, 0x79, 0xFB, 0x9F, 0x45
-/*- 0xF9,0x78,0x32,0xB5,0x42,0x1A,0x6B,0x38,
- 0x9A,0x44,0xD6,0x04,0x19,0x43,0xC4,0xD9,
- 0x3D,0x1E,0xAE,0x47,0xFC,0xCF,0x29,0x0B,*/
+/*- 0xF9,0x78,0x32,0xB5,0x42,0x1A,0x6B,0x38,
+ 0x9A,0x44,0xD6,0x04,0x19,0x43,0xC4,0xD9,
+ 0x3D,0x1E,0xAE,0x47,0xFC,0xCF,0x29,0x0B,*/
};
/*
diff --git a/crypto/rc4/rc4_enc.c b/crypto/rc4/rc4_enc.c
index 583e8e5de6..6ebd54d46c 100644
--- a/crypto/rc4/rc4_enc.c
+++ b/crypto/rc4/rc4_enc.c
@@ -80,36 +80,36 @@ void RC4(RC4_KEY *key, size_t len, const unsigned char *indata,
d = key->data;
#if defined(RC4_CHUNK)
- /*-
- * The original reason for implementing this(*) was the fact that
- * pre-21164a Alpha CPUs don't have byte load/store instructions
- * and e.g. a byte store has to be done with 64-bit load, shift,
- * and, or and finally 64-bit store. Peaking data and operating
- * at natural word size made it possible to reduce amount of
- * instructions as well as to perform early read-ahead without
- * suffering from RAW (read-after-write) hazard. This resulted
- * in ~40%(**) performance improvement on 21064 box with gcc.
- * But it's not only Alpha users who win here:-) Thanks to the
- * early-n-wide read-ahead this implementation also exhibits
- * >40% speed-up on SPARC and 20-30% on 64-bit MIPS (depending
- * on sizeof(RC4_INT)).
- *
- * (*) "this" means code which recognizes the case when input
- * and output pointers appear to be aligned at natural CPU
- * word boundary
- * (**) i.e. according to 'apps/openssl speed rc4' benchmark,
- * crypto/rc4/rc4speed.c exhibits almost 70% speed-up...
- *
- * Cavets.
- *
- * - RC4_CHUNK="unsigned long long" should be a #1 choice for
- * UltraSPARC. Unfortunately gcc generates very slow code
- * (2.5-3 times slower than one generated by Sun's WorkShop
- * C) and therefore gcc (at least 2.95 and earlier) should
- * always be told that RC4_CHUNK="unsigned long".
- *
- * <appro@fy.chalmers.se>
- */
+ /*-
+ * The original reason for implementing this(*) was the fact that
+ * pre-21164a Alpha CPUs don't have byte load/store instructions
+ * and e.g. a byte store has to be done with 64-bit load, shift,
+ * and, or and finally 64-bit store. Peaking data and operating
+ * at natural word size made it possible to reduce amount of
+ * instructions as well as to perform early read-ahead without
+ * suffering from RAW (read-after-write) hazard. This resulted
+ * in ~40%(**) performance improvement on 21064 box with gcc.
+ * But it's not only Alpha users who win here:-) Thanks to the
+ * early-n-wide read-ahead this implementation also exhibits
+ * >40% speed-up on SPARC and 20-30% on 64-bit MIPS (depending
+ * on sizeof(RC4_INT)).
+ *
+ * (*) "this" means code which recognizes the case when input
+ * and output pointers appear to be aligned at natural CPU
+ * word boundary
+ * (**) i.e. according to 'apps/openssl speed rc4' benchmark,
+ * crypto/rc4/rc4speed.c exhibits almost 70% speed-up...
+ *
+ * Cavets.
+ *
+ * - RC4_CHUNK="unsigned long long" should be a #1 choice for
+ * UltraSPARC. Unfortunately gcc generates very slow code
+ * (2.5-3 times slower than one generated by Sun's WorkShop
+ * C) and therefore gcc (at least 2.95 and earlier) should
+ * always be told that RC4_CHUNK="unsigned long".
+ *
+ * <appro@fy.chalmers.se>
+ */
# define RC4_STEP ( \
x=(x+1) &0xff, \
@@ -131,34 +131,34 @@ void RC4(RC4_KEY *key, size_t len, const unsigned char *indata,
1
};
- /*-
- * I reckon we can afford to implement both endian
- * cases and to decide which way to take at run-time
- * because the machine code appears to be very compact
- * and redundant 1-2KB is perfectly tolerable (i.e.
- * in case the compiler fails to eliminate it:-). By
- * suggestion from Terrel Larson <terr@terralogic.net>
- * who also stands for the is_endian union:-)
- *
- * Special notes.
- *
- * - is_endian is declared automatic as doing otherwise
- * (declaring static) prevents gcc from eliminating
- * the redundant code;
- * - compilers (those I've tried) don't seem to have
- * problems eliminating either the operators guarded
- * by "if (sizeof(RC4_CHUNK)==8)" or the condition
- * expressions themselves so I've got 'em to replace
- * corresponding #ifdefs from the previous version;
- * - I chose to let the redundant switch cases when
- * sizeof(RC4_CHUNK)!=8 be (were also #ifdefed
- * before);
- * - in case you wonder "&(sizeof(RC4_CHUNK)*8-1)" in
- * [LB]ESHFT guards against "shift is out of range"
- * warnings when sizeof(RC4_CHUNK)!=8
- *
- * <appro@fy.chalmers.se>
- */
+ /*-
+ * I reckon we can afford to implement both endian
+ * cases and to decide which way to take at run-time
+ * because the machine code appears to be very compact
+ * and redundant 1-2KB is perfectly tolerable (i.e.
+ * in case the compiler fails to eliminate it:-). By
+ * suggestion from Terrel Larson <terr@terralogic.net>
+ * who also stands for the is_endian union:-)
+ *
+ * Special notes.
+ *
+ * - is_endian is declared automatic as doing otherwise
+ * (declaring static) prevents gcc from eliminating
+ * the redundant code;
+ * - compilers (those I've tried) don't seem to have
+ * problems eliminating either the operators guarded
+ * by "if (sizeof(RC4_CHUNK)==8)" or the condition
+ * expressions themselves so I've got 'em to replace
+ * corresponding #ifdefs from the previous version;
+ * - I chose to let the redundant switch cases when
+ * sizeof(RC4_CHUNK)!=8 be (were also #ifdefed
+ * before);
+ * - in case you wonder "&(sizeof(RC4_CHUNK)*8-1)" in
+ * [LB]ESHFT guards against "shift is out of range"
+ * warnings when sizeof(RC4_CHUNK)!=8
+ *
+ * <appro@fy.chalmers.se>
+ */
if (!is_endian.little) { /* BIG-ENDIAN CASE */
# define BESHFT(c) (((sizeof(RC4_CHUNK)-(c)-1)*8)&(sizeof(RC4_CHUNK)*8-1))
for (; len & (0 - sizeof(RC4_CHUNK)); len -= sizeof(RC4_CHUNK)) {
diff --git a/crypto/rsa/rsa_pss.c b/crypto/rsa/rsa_pss.c
index b6a28cc3ff..41bc0844e4 100644
--- a/crypto/rsa/rsa_pss.c
+++ b/crypto/rsa/rsa_pss.c
@@ -97,12 +97,12 @@ int RSA_verify_PKCS1_PSS_mgf1(RSA *rsa, const unsigned char *mHash,
hLen = EVP_MD_size(Hash);
if (hLen < 0)
goto err;
- /*-
- * Negative sLen has special meanings:
- * -1 sLen == hLen
- * -2 salt length is autorecovered from signature
- * -N reserved
- */
+ /*-
+ * Negative sLen has special meanings:
+ * -1 sLen == hLen
+ * -2 salt length is autorecovered from signature
+ * -N reserved
+ */
if (sLen == -1)
sLen = hLen;
else if (sLen == -2)
@@ -201,12 +201,12 @@ int RSA_padding_add_PKCS1_PSS_mgf1(RSA *rsa, unsigned char *EM,
hLen = EVP_MD_size(Hash);
if (hLen < 0)
goto err;
- /*-
- * Negative sLen has special meanings:
- * -1 sLen == hLen
- * -2 salt length is maximized
- * -N reserved
- */
+ /*-
+ * Negative sLen has special meanings:
+ * -1 sLen == hLen
+ * -2 salt length is maximized
+ * -N reserved
+ */
if (sLen == -1)
sLen = hLen;
else if (sLen == -2)
diff --git a/crypto/threads/mttest.c b/crypto/threads/mttest.c
index 34664f0fc8..8f67db6ee3 100644
--- a/crypto/threads/mttest.c
+++ b/crypto/threads/mttest.c
@@ -333,8 +333,8 @@ int main(int argc, char *argv[])
fprintf(stderr, "-----\n");
lh_stats(SSL_CTX_sessions(s_ctx), stderr);
fprintf(stderr, "-----\n");
- /*- lh_node_stats(SSL_CTX_sessions(s_ctx),stderr);
- fprintf(stderr,"-----\n"); */
+ /*- lh_node_stats(SSL_CTX_sessions(s_ctx),stderr);
+ fprintf(stderr,"-----\n"); */
lh_node_usage_stats(SSL_CTX_sessions(s_ctx), stderr);
fprintf(stderr, "-----\n");
}
@@ -369,11 +369,11 @@ int ndoit(SSL_CTX *ssl_ctx[2])
fprintf(stdout, "started thread %lu\n", CRYPTO_thread_id());
for (i = 0; i < number_of_loops; i++) {
-/*- fprintf(stderr,"%4d %2d ctx->ref (%3d,%3d)\n",
- CRYPTO_thread_id(),i,
- ssl_ctx[0]->references,
- ssl_ctx[1]->references); */
- /* pthread_delay_np(&tm); */
+/*- fprintf(stderr,"%4d %2d ctx->ref (%3d,%3d)\n",
+ CRYPTO_thread_id(),i,
+ ssl_ctx[0]->references,
+ ssl_ctx[1]->references); */
+/* pthread_delay_np(&tm); */
ret = doit(ctx);
if (ret != 0) {
@@ -801,23 +801,23 @@ void solaris_locking_callback(int mode, int type, char *file, int line)
(type & CRYPTO_READ) ? "r" : "w", file, line);
# endif
- /*-
- if (CRYPTO_LOCK_SSL_CERT == type)
- fprintf(stderr,"(t,m,f,l) %ld %d %s %d\n",
- CRYPTO_thread_id(),
- mode,file,line);
- */
+ /*-
+ if (CRYPTO_LOCK_SSL_CERT == type)
+ fprintf(stderr,"(t,m,f,l) %ld %d %s %d\n",
+ CRYPTO_thread_id(),
+ mode,file,line);
+ */
if (mode & CRYPTO_LOCK) {
/*-
- if (mode & CRYPTO_READ)
- rw_rdlock(&(lock_cs[type]));
- else
- rw_wrlock(&(lock_cs[type])); */
+ if (mode & CRYPTO_READ)
+ rw_rdlock(&(lock_cs[type]));
+ else
+ rw_wrlock(&(lock_cs[type])); */
mutex_lock(&(lock_cs[type]));
lock_count[type]++;
} else {
-/* rw_unlock(&(lock_cs[type])); */
+/* rw_unlock(&(lock_cs[type])); */
mutex_unlock(&(lock_cs[type]));
}
}
@@ -987,10 +987,10 @@ void pthreads_locking_callback(int mode, int type, char *file, int line)
(type & CRYPTO_READ) ? "r" : "w", file, line);
# endif
/*-
- if (CRYPTO_LOCK_SSL_CERT == type)
- fprintf(stderr,"(t,m,f,l) %ld %d %s %d\n",
- CRYPTO_thread_id(),
- mode,file,line);
+ if (CRYPTO_LOCK_SSL_CERT == type)
+ fprintf(stderr,"(t,m,f,l) %ld %d %s %d\n",
+ CRYPTO_thread_id(),
+ mode,file,line);
*/
if (mode & CRYPTO_LOCK) {
pthread_mutex_lock(&(lock_cs[type]));
diff --git a/crypto/whrlpool/wp_dgst.c b/crypto/whrlpool/wp_dgst.c
index 24f7bb9d38..e33bb4f833 100644
--- a/crypto/whrlpool/wp_dgst.c
+++ b/crypto/whrlpool/wp_dgst.c
@@ -132,18 +132,18 @@ void WHIRLPOOL_BitUpdate(WHIRLPOOL_CTX *c, const void *_inp, size_t bits)
} else /* bit-oriented loop */
#endif
{
- /*-
- inp
- |
- +-------+-------+-------
- |||||||||||||||||||||
- +-------+-------+-------
- +-------+-------+-------+-------+-------
- |||||||||||||| c->data
- +-------+-------+-------+-------+-------
- |
- c->bitoff/8
- */
+ /*-
+ inp
+ |
+ +-------+-------+-------
+ |||||||||||||||||||||
+ +-------+-------+-------
+ +-------+-------+-------+-------+-------
+ |||||||||||||| c->data
+ +-------+-------+-------+-------+-------
+ |
+ c->bitoff/8
+ */
while (bits) {
unsigned int byteoff = bitoff / 8;
unsigned char b;
diff --git a/crypto/x509/by_dir.c b/crypto/x509/by_dir.c
index 90a3784858..9ee8f8d859 100644
--- a/crypto/x509/by_dir.c
+++ b/crypto/x509/by_dir.c
@@ -424,8 +424,8 @@ static int get_cert_by_subject(X509_LOOKUP *xl, int type, X509_NAME *name,
* If we were going to up the reference count, we would need to
* do it on a perl 'type' basis
*/
- /*- CRYPTO_add(&tmp->data.x509->references,1,
- CRYPTO_LOCK_X509);*/
+ /*- CRYPTO_add(&tmp->data.x509->references,1,
+ CRYPTO_LOCK_X509);*/
goto finish;
}
}
diff --git a/crypto/x509/x509_lu.c b/crypto/x509/x509_lu.c
index b42409fc76..a910636f82 100644
--- a/crypto/x509/x509_lu.c
+++ b/crypto/x509/x509_lu.c
@@ -309,8 +309,8 @@ int X509_STORE_get_by_subject(X509_STORE_CTX *vs, int type, X509_NAME *name,
return 0;
}
-/*- if (ret->data.ptr != NULL)
- X509_OBJECT_free_contents(ret); */
+/*- if (ret->data.ptr != NULL)
+ X509_OBJECT_free_contents(ret); */
ret->type = tmp->type;
ret->data.ptr = tmp->data.ptr;
diff --git a/crypto/x509/x509_r2x.c b/crypto/x509/x509_r2x.c
index 40b23e512a..0ff439c99f 100644
--- a/crypto/x509/x509_r2x.c
+++ b/crypto/x509/x509_r2x.c
@@ -84,8 +84,8 @@ X509 *X509_REQ_to_X509(X509_REQ *r, int days, EVP_PKEY *pkey)
goto err;
if (!ASN1_INTEGER_set(xi->version, 2))
goto err;
-/*- xi->extensions=ri->attributes; <- bad, should not ever be done
- ri->attributes=NULL; */
+/*- xi->extensions=ri->attributes; <- bad, should not ever be done
+ ri->attributes=NULL; */
}
xn = X509_REQ_get_subject_name(r);
diff --git a/crypto/x509/x509_vfy.c b/crypto/x509/x509_vfy.c
index d02eebfc7a..136bfbda61 100644
--- a/crypto/x509/x509_vfy.c
+++ b/crypto/x509/x509_vfy.c
@@ -461,15 +461,15 @@ static int check_chain_extensions(X509_STORE_CTX *ctx)
int allow_proxy_certs;
cb = ctx->verify_cb;
- /*-
- * must_be_ca can have 1 of 3 values:
- * -1: we accept both CA and non-CA certificates, to allow direct
- * use of self-signed certificates (which are marked as CA).
- * 0: we only accept non-CA certificates. This is currently not
- * used, but the possibility is present for future extensions.
- * 1: we only accept CA certificates. This is currently used for
- * all certificates in the chain except the leaf certificate.
- */
+ /*-
+ * must_be_ca can have 1 of 3 values:
+ * -1: we accept both CA and non-CA certificates, to allow direct
+ * use of self-signed certificates (which are marked as CA).
+ * 0: we only accept non-CA certificates. This is currently not
+ * used, but the possibility is present for future extensions.
+ * 1: we only accept CA certificates. This is currently used for
+ * all certificates in the chain except the leaf certificate.
+ */
must_be_ca = -1;
/* CRL path validation */
diff --git a/crypto/x509/x509name.c b/crypto/x509/x509name.c
index fc0475f7c2..6ea601f962 100644
--- a/crypto/x509/x509name.c
+++ b/crypto/x509/x509name.c
@@ -162,16 +162,16 @@ X509_NAME_ENTRY *X509_NAME_delete_entry(X509_NAME *name, int loc)
set_prev = ret->set - 1;
set_next = sk_X509_NAME_ENTRY_value(sk, loc)->set;
- /*-
- * set_prev is the previous set
- * set is the current set
- * set_next is the following
- * prev 1 1 1 1 1 1 1 1
- * set 1 1 2 2
- * next 1 1 2 2 2 2 3 2
- * so basically only if prev and next differ by 2, then
- * re-number down by 1
- */
+ /*-
+ * set_prev is the previous set
+ * set is the current set
+ * set_next is the following
+ * prev 1 1 1 1 1 1 1 1
+ * set 1 1 2 2
+ * next 1 1 2 2 2 2 3 2
+ * so basically only if prev and next differ by 2, then
+ * re-number down by 1
+ */
if (set_prev + 1 < set_next)
for (i = loc; i < n; i++)
sk_X509_NAME_ENTRY_value(sk, i)->set--;