diff options
author | Torbjorn Granlund <tege@gmplib.org> | 2011-11-21 21:15:18 +0100 |
---|---|---|
committer | Torbjorn Granlund <tege@gmplib.org> | 2011-11-21 21:15:18 +0100 |
commit | f24a8deaf598267ea9c57ba93e9e6a94038bc8f3 (patch) | |
tree | ab76a1173414ebafa1aa42527df660603053d18a | |
parent | 15a7619b6229dea0d8d895aaa5506e40304dcb3f (diff) | |
download | gmp-f24a8deaf598267ea9c57ba93e9e6a94038bc8f3.tar.gz |
Spacing cleanup.
33 files changed, 85 insertions, 85 deletions
@@ -58,5 +58,5 @@ David Harvey mpn/generic/add_err1_n.c, add_err2_n.c, aors_err2_n.asm, aors_err3_n.asm, mulmid_basecase.asm, mpn/x86_64/core2/aors_err1_n.asm. - + Martin Boij mpn/generic/perfpow.c @@ -1535,7 +1535,7 @@ __GMP_DECLSPEC mp_limb_t mpn_divrem_2 __GMP_PROTO ((mp_ptr, mp_size_t, mp_ptr, m #define mpn_div_qr_2 __MPN(div_qr_2) __GMP_DECLSPEC mp_limb_t mpn_div_qr_2 __GMP_PROTO ((mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr)); - + #define mpn_gcd __MPN(gcd) __GMP_DECLSPEC mp_size_t mpn_gcd __GMP_PROTO ((mp_ptr, mp_ptr, mp_size_t, mp_ptr, mp_size_t)); @@ -616,7 +616,7 @@ struct __gmp_binary_divides } else #endif - mpz_tdiv_q_ui(z, w, l); + mpz_tdiv_q_ui(z, w, l); } static void eval(mpz_ptr z, unsigned long int l, mpz_srcptr w) { diff --git a/mpn/generic/gcd_subdiv_step.c b/mpn/generic/gcd_subdiv_step.c index 11c00bb6a..3db34073c 100644 --- a/mpn/generic/gcd_subdiv_step.c +++ b/mpn/generic/gcd_subdiv_step.c @@ -185,7 +185,7 @@ mpn_gcd_subdiv_step (mp_ptr ap, mp_ptr bp, mp_size_t n, mp_size_t s, } else MPN_COPY (bp, ap, an); - + MPN_DECR_U (tp, qn, 1); } diff --git a/mpn/generic/hgcd_appr.c b/mpn/generic/hgcd_appr.c index 8454f9da5..f7c7eb2c9 100644 --- a/mpn/generic/hgcd_appr.c +++ b/mpn/generic/hgcd_appr.c @@ -72,7 +72,7 @@ mpn_hgcd_appr (mp_ptr ap, mp_ptr bp, mp_size_t n, we discard some of the least significant limbs, we must keep one additional bit to account for the truncation error. We maintain the GMP_NUMB_BITS * s - extra_bits as the current target size. */ - + s = n/2 + 1; if (BELOW_THRESHOLD (n, HGCD_APPR_THRESHOLD)) { @@ -155,7 +155,7 @@ mpn_hgcd_appr (mp_ptr ap, mp_ptr bp, mp_size_t n, ASSERT (n <= 2*s); nn = mpn_hgcd_step (n, ap, bp, s, M, tp); - + if (!nn) return 1; @@ -249,7 +249,7 @@ mpn_hgcd_appr (mp_ptr ap, mp_ptr bp, mp_size_t n, ASSERT (n <= 2*s); nn = mpn_hgcd_step (n, ap, bp, s, M, tp); - + if (!nn) return success; diff --git a/mpn/generic/hgcd_jacobi.c b/mpn/generic/hgcd_jacobi.c index 2dce43b99..0d4cb021c 100644 --- a/mpn/generic/hgcd_jacobi.c +++ b/mpn/generic/hgcd_jacobi.c @@ -26,7 +26,7 @@ along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */ #include "longlong.h" /* This file is almost a copy of hgcd.c, with some added calls to - mpn_jacobi_update */ + mpn_jacobi_update */ struct hgcd_jacobi_ctx { @@ -127,7 +127,7 @@ hgcd_jacobi_step (mp_size_t n, mp_ptr ap, mp_ptr bp, mp_size_t s, struct hgcd_jacobi_ctx ctx; ctx.M = M; ctx.bitsp = bitsp; - + return mpn_gcd_subdiv_step (ap, bp, n, s, hgcd_jacobi_hook, &ctx, tp); } } diff --git a/mpn/generic/hgcd_reduce.c b/mpn/generic/hgcd_reduce.c index 142d44a30..89240af4d 100644 --- a/mpn/generic/hgcd_reduce.c +++ b/mpn/generic/hgcd_reduce.c @@ -38,7 +38,7 @@ submul (mp_ptr rp, mp_size_t rn, ASSERT (an >= bn); ASSERT (rn >= an); ASSERT (an + bn <= rn + 1); - + TMP_MARK; tp = TMP_ALLOC_LIMBS (an + bn); @@ -61,7 +61,7 @@ submul (mp_ptr rp, mp_size_t rn, /* FIXME: x Take scratch parameter, and figure out scratch need. - x Use some fallback for small M->n? + x Use some fallback for small M->n? */ static mp_size_t hgcd_matrix_apply (const struct hgcd_matrix *M, @@ -83,7 +83,7 @@ hgcd_matrix_apply (const struct hgcd_matrix *M, MPN_NORMALIZE (ap, an); bn = n; MPN_NORMALIZE (bp, bn); - + for (i = 0; i < 2; i++) for (j = 0; j < 2; j++) { @@ -102,7 +102,7 @@ hgcd_matrix_apply (const struct hgcd_matrix *M, if (mn[0][1] == 0) { mp_size_t qn; - + /* A unchanged, M = (1, 0; q, 1) */ ASSERT (mn[0][0] == 1); ASSERT (M->p[0][0][0] == 1); @@ -121,7 +121,7 @@ hgcd_matrix_apply (const struct hgcd_matrix *M, ASSERT (M->p[1][1][0] == 1); /* Put A <-- A - q * B */ - nn = submul (ap, an, bp, bn, M->p[0][1], mn[0][1]); + nn = submul (ap, an, bp, bn, M->p[0][1], mn[0][1]); } else { @@ -159,7 +159,7 @@ hgcd_matrix_apply (const struct hgcd_matrix *M, MPN_ZERO (tp + n + mn[1][1], modn - n - mn[1][1]); if (n + mn[0][1] < modn) MPN_ZERO (sp + n + mn[0][1], modn - n - mn[0][1]); - + cy = mpn_sub_n (tp, tp, sp, modn); MPN_DECR_U (tp, modn, cy); @@ -209,7 +209,7 @@ mpn_hgcd_reduce_itch (mp_size_t n, mp_size_t p) itch = 2*(n-p) + mpn_hgcd_itch (n-p); /* Currently, hgcd_matrix_apply allocates its own storage. */ } - return itch; + return itch; } /* FIXME: Document storage need. */ diff --git a/mpn/generic/hgcd_step.c b/mpn/generic/hgcd_step.c index 0e56be39e..dbc757935 100644 --- a/mpn/generic/hgcd_step.c +++ b/mpn/generic/hgcd_step.c @@ -112,7 +112,7 @@ mpn_hgcd_step (mp_size_t n, mp_ptr ap, mp_ptr bp, mp_size_t s, /* Multiply M1^{-1} (a;b) */ return mpn_matrix22_mul1_inverse_vector (&M1, ap, tp, bp, n); } - + subtract: return mpn_gcd_subdiv_step (ap, bp, n, s, hgcd_hook, M, tp); diff --git a/mpn/powerpc64/mode64/aorsmul_1.asm b/mpn/powerpc64/mode64/aorsmul_1.asm index 658a2d941..4b843a044 100644 --- a/mpn/powerpc64/mode64/aorsmul_1.asm +++ b/mpn/powerpc64/mode64/aorsmul_1.asm @@ -54,7 +54,7 @@ ifdef(`OPERATION_submul_1',` ') MULFUNC_PROLOGUE(mpn_addmul_1 mpn_submul_1) - + ASM_START() PROLOGUE(func_nc) EPILOGUE() diff --git a/mpn/s390_32/lshift.asm b/mpn/s390_32/lshift.asm index 335a5f77a..17e52655f 100644 --- a/mpn/s390_32/lshift.asm +++ b/mpn/s390_32/lshift.asm @@ -126,7 +126,7 @@ L(top): l %r10, 16(up) L(end): l %r10, 16(up) sll %r10, 0(cnt) st %r10, 12(rp) - + lr %r2, %r12 lm %r6, %r12, 24(%r15) br %r14 diff --git a/mpn/s390_32/lshiftc.asm b/mpn/s390_32/lshiftc.asm index b42bc715b..9bdd0d798 100644 --- a/mpn/s390_32/lshiftc.asm +++ b/mpn/s390_32/lshiftc.asm @@ -138,7 +138,7 @@ L(end): l %r10, 16(up) sll %r10, 0(cnt) xr %r10, %r13 st %r10, 12(rp) - + lr %r2, %r12 lm %r6, %r13, 24(%r15) br %r14 diff --git a/mpn/s390_32/rshift.asm b/mpn/s390_32/rshift.asm index ec32fa764..becbe1893 100644 --- a/mpn/s390_32/rshift.asm +++ b/mpn/s390_32/rshift.asm @@ -120,7 +120,7 @@ L(top): l %r11, 0(up) L(end): l %r11, 0(up) srl %r11, 0(cnt) st %r11, 0(rp) - + lr %r2, %r12 lm %r6, %r12, 24(%r15) br %r14 diff --git a/mpn/x86/atom/lshift.asm b/mpn/x86/atom/lshift.asm index d8cb8b505..1005cce59 100644 --- a/mpn/x86/atom/lshift.asm +++ b/mpn/x86/atom/lshift.asm @@ -160,7 +160,7 @@ deflit(`FRAME',4) shr $2, %eax C (size + 3) / 4 and $3, %edx C (size - 1) % 4 jz L(goloop) C jmp if size == 1 (mod 4) - shr %edx + shr %edx jnc L(odd) C jum if size == 3 (mod 4) add %ecx, %ecx @@ -173,7 +173,7 @@ deflit(`FRAME',4) jnz L(goloop) C jump if size == 0 (mod 4) L(odd): lea -8(up), up lea -8(rp), rp - jmp L(sentry) C reached if size == 2 or 3 (mod 4) + jmp L(sentry) C reached if size == 2 or 3 (mod 4) L(sloop): adc %ecx, %ecx diff --git a/mpn/x86/atom/sse2/mul_1.asm b/mpn/x86/atom/sse2/mul_1.asm index dd9b95366..5cd86caec 100644 --- a/mpn/x86/atom/sse2/mul_1.asm +++ b/mpn/x86/atom/sse2/mul_1.asm @@ -62,7 +62,7 @@ EPILOGUE() PROLOGUE(mpn_mul_1) pxor %mm6, %mm6 L(ent): push %esi FRAME_pushl() - mov PARAM_SRC, up + mov PARAM_SRC, up mov PARAM_SIZE, %eax C size movd PARAM_MUL, %mm7 movd (up), %mm0 diff --git a/mpn/x86/bdiv_dbm1c.asm b/mpn/x86/bdiv_dbm1c.asm index 201ef173d..ac9faf270 100644 --- a/mpn/x86/bdiv_dbm1c.asm +++ b/mpn/x86/bdiv_dbm1c.asm @@ -24,10 +24,10 @@ C P5 C P6 model 0-8,10-12) C P6 model 9 (Banias) C P6 model 13 (Dothan) 5.1 -C P4 model 0 (Willamette) +C P4 model 0 (Willamette) C P4 model 1 (?) C P4 model 2 (Northwood) 13.67 -C P4 model 3 (Prescott) +C P4 model 3 (Prescott) C P4 model 4 (Nocona) C Intel Atom C AMD K6 diff --git a/mpn/x86/bdiv_q_1.asm b/mpn/x86/bdiv_q_1.asm index 2528d01f7..7f344ab57 100644 --- a/mpn/x86/bdiv_q_1.asm +++ b/mpn/x86/bdiv_q_1.asm @@ -30,7 +30,7 @@ C K6 14.0 C K7 12.0 C P4 42.0 -MULFUNC_PROLOGUE(mpn_bdiv_q_1 mpn_pi1_bdiv_q_1) +MULFUNC_PROLOGUE(mpn_bdiv_q_1 mpn_pi1_bdiv_q_1) defframe(PARAM_SHIFT, 24) defframe(PARAM_INVERSE,20) diff --git a/mpn/x86/k7/addlsh1_n.asm b/mpn/x86/k7/addlsh1_n.asm index e5163b676..05df4a740 100644 --- a/mpn/x86/k7/addlsh1_n.asm +++ b/mpn/x86/k7/addlsh1_n.asm @@ -44,14 +44,14 @@ C AMD K8 C This is a basic addlsh1_n for k7, atom, and perhaps some other x86-32 C processors. It uses 2*3-way unrolling, for good reasons. Unfortunately, C that means we need an initial magic multiply. -C +C C It is not clear how to do sublsh1_n or rsblsh1_n using the same pattern. We C cannot do rsblsh1_n since we feed carry from the shift blocks to the C add/subtract blocks, which is right for addition but reversed for C subtraction. We could perhaps do sublsh1_n, with some extra move insns, C without losing any time, since we're not issue limited but carry recurrency C latency. -C +C C Breaking carry recurrency might be a good idea. We would then need separate C registers for the shift carry and add/subtract carry, which in turn would C force is to 2*2-way unrolling. @@ -120,7 +120,7 @@ ifdef(`CPU_P6',` L(exact): incl VAR_COUNT jz L(end) - + ALIGN(16) L(top): ifdef(`CPU_P6',` diff --git a/mpn/x86/k7/invert_limb.asm b/mpn/x86/k7/invert_limb.asm index da6f28397..435fa96d0 100644 --- a/mpn/x86/k7/invert_limb.asm +++ b/mpn/x86/k7/invert_limb.asm @@ -60,7 +60,7 @@ ifdef(`DARWIN',` PROLOGUE(mpn_invert_limb) deflit(`FRAME', 0) mov PARAM_DIVISOR, %eax - C Avoid push/pop on k7. + C Avoid push/pop on k7. sub $8, %esp FRAME_subl_esp(8) mov %ebx, (%esp) mov %edi, 4(%esp) diff --git a/mpn/x86/k7/sublsh1_n.asm b/mpn/x86/k7/sublsh1_n.asm index 41993f99a..965348586 100644 --- a/mpn/x86/k7/sublsh1_n.asm +++ b/mpn/x86/k7/sublsh1_n.asm @@ -30,7 +30,7 @@ C cycles/limb C P5 C P6 model 0-8,10-12 C P6 model 9 (Banias) -C P6 model 13 (Dothan) +C P6 model 13 (Dothan) C P4 model 0 (Willamette) C P4 model 1 (?) C P4 model 2 (Northwood) @@ -38,12 +38,12 @@ C P4 model 3 (Prescott) C P4 model 4 (Nocona) C Intel Atom 6.75 C AMD K6 -C AMD K7 +C AMD K7 C AMD K8 C This is a basic sublsh1_n for k7, atom, and perhaps some other x86-32 C processors. It uses 2*4-way unrolling, for good reasons. -C +C C Breaking carry recurrency might be a good idea. We would then need separate C registers for the shift carry and add/subtract carry, which in turn would C force is to 2*2-way unrolling. @@ -114,7 +114,7 @@ ifdef(`CPU_P6',` adc %ebp, %ebp rcr %edx C restore 1st saved carry bit - + sbb %eax, (rp) sbb %ebx, 4(rp) sbb %ecx, 8(rp) diff --git a/mpn/x86/p6/bdiv_q_1.asm b/mpn/x86/p6/bdiv_q_1.asm index 3a8733a0d..0ffbc78e4 100644 --- a/mpn/x86/p6/bdiv_q_1.asm +++ b/mpn/x86/p6/bdiv_q_1.asm @@ -25,7 +25,7 @@ include(`../config.m4') C odd even divisor C P6: 10.0 12.0 cycles/limb -C MULFUNC_PROLOGUE(mpn_bdiv_q_1 mpn_pi1_bdiv_q_1) +C MULFUNC_PROLOGUE(mpn_bdiv_q_1 mpn_pi1_bdiv_q_1) C The odd case is basically the same as mpn_modexact_1_odd, just with an C extra store, and it runs at the same 10 cycles which is the dependent @@ -269,7 +269,7 @@ ifdef(`PIC',` imull %edx, %eax C inv*inv*d subl %eax, %ebp C inv = 2*inv - inv*inv*d - + jmp L(common) EPILOGUE() diff --git a/mpn/x86/pentium/bdiv_q_1.asm b/mpn/x86/pentium/bdiv_q_1.asm index 965173d1c..7e84fc817 100644 --- a/mpn/x86/pentium/bdiv_q_1.asm +++ b/mpn/x86/pentium/bdiv_q_1.asm @@ -27,7 +27,7 @@ C odd even C P54: 24.5 30.5 cycles/limb C P55: 23.0 28.0 -MULFUNC_PROLOGUE(mpn_bdiv_q_1 mpn_pi1_bdiv_q_1) +MULFUNC_PROLOGUE(mpn_bdiv_q_1 mpn_pi1_bdiv_q_1) C The P55 speeds noted above, 23 cycles odd or 28 cycles even, are as C expected. On P54 in the even case the shrdl pairing nonsense (see diff --git a/mpn/x86_64/div_qr_2n_pi1.asm b/mpn/x86_64/div_qr_2n_pi1.asm index 9f23012da..c28d0a02c 100644 --- a/mpn/x86_64/div_qr_2n_pi1.asm +++ b/mpn/x86_64/div_qr_2n_pi1.asm @@ -44,7 +44,7 @@ C TODO C * Store qh in the same stack slot as di_param, instead of pushing C it. (we could put it in register %rbp, but then we would need to C save and restore that instead, which doesn't seem like a win). - + ASM_START() TEXT ALIGN(16) @@ -56,7 +56,7 @@ PROLOGUE(mpn_div_qr_2n_pi1) push %r13 push %r12 push %rbx - + mov -16(up, un, 8), u1 mov -8(up, un, 8), u2 @@ -135,5 +135,5 @@ L(fix): C Unlikely update. u2 >= d1 inc t1 sub d0, u1 sbb d1, u2 - jmp L(bck) + jmp L(bck) EPILOGUE() diff --git a/mpn/x86_64/div_qr_2u_pi1.asm b/mpn/x86_64/div_qr_2u_pi1.asm index cfc7712d5..bdb64c148 100644 --- a/mpn/x86_64/div_qr_2u_pi1.asm +++ b/mpn/x86_64/div_qr_2u_pi1.asm @@ -66,7 +66,7 @@ deflit(`FRAME', 56) movl shift_param, R32(%rcx) C FIXME: Different code for SHLD_SLOW - + xor R32(u2), R32(u2) mov 8(up, un, 8), u1 shld %cl, u1, u2 @@ -173,7 +173,7 @@ L(fix): C Unlikely update. u2 >= d1 inc t1 sub d0, u1 sbb d1, u2 - jmp L(bck) + jmp L(bck) C Duplicated, just jumping back to a different address. L(fix_qh): C Unlikely update. u2 >= d1 @@ -185,5 +185,5 @@ L(fix_qh): C Unlikely update. u2 >= d1 inc t1 sub d0, u1 sbb d1, u2 - jmp L(bck_qh) + jmp L(bck_qh) EPILOGUE() diff --git a/mpn/x86_64/mod_1_1.asm b/mpn/x86_64/mod_1_1.asm index 6b233e074..56f708a75 100644 --- a/mpn/x86_64/mod_1_1.asm +++ b/mpn/x86_64/mod_1_1.asm @@ -51,7 +51,7 @@ C Note: This implementation needs B1modb only when cnt > 0 C The iteration is almost as follows, C C r_2 B^3 + r_1 B^2 + r_0 B + u = r_1 B2modb + (r_0 + r_2 B2mod) B + u -C +C C where r2 is a single bit represented as a mask. But to make sure that the C result fits in two limbs and a bit, carry from the addition C @@ -206,7 +206,7 @@ ifdef(`SHLD_SLOW',` ') imul %rdx, %r8 shr R8(%rcx), %r8 - mov %r8, 16(%rbx) C store B1modb + mov %r8, 16(%rbx) C store B1modb L(z): pop %r12 pop %rbx diff --git a/mpz/jacobi.c b/mpz/jacobi.c index afd9a49b4..8bfb2e92b 100644 --- a/mpz/jacobi.c +++ b/mpz/jacobi.c @@ -110,7 +110,7 @@ mpz_jacobi (mpz_srcptr a, mpz_srcptr b) result_bit1 ^= JACOBI_N1B_BIT1(blow); asize = -asize; } - + JACOBI_STRIP_LOW_ZEROS (result_bit1, blow, asrcp, asize, alow); /* Ensure asize >= bsize. Take advantage of the generalized @@ -147,7 +147,7 @@ mpz_jacobi (mpz_srcptr a, mpz_srcptr b) result_bit1 ^= JACOBI_RECIP_UU_BIT1 (alow, blow); } - + if (bsize == 1) { result_bit1 ^= JACOBI_TWOS_U_BIT1(btwos, alow); @@ -165,7 +165,7 @@ mpz_jacobi (mpz_srcptr a, mpz_srcptr b) % B, but when A is much larger than B, we have to allocate space for the large quotient. We use the same area, pointed to by bp, for both the quotient A/B and the working copy of B. */ - + TMP_MARK; if (asize >= 2*bsize) @@ -189,7 +189,7 @@ mpz_jacobi (mpz_srcptr a, mpz_srcptr b) result_bit1 ^= JACOBI_TWOS_U_BIT1(btwos, alow); ASSERT_NOCARRY (mpn_rshift (bp, bsrcp, bsize, btwos)); - bsize -= (ap[bsize-1] | bp[bsize-1]) == 0; + bsize -= (ap[bsize-1] | bp[bsize-1]) == 0; } else MPN_COPY (bp, bsrcp, bsize); diff --git a/tests/cxx/t-ops2.cc b/tests/cxx/t-ops2.cc index 9a6e7e020..4967ed208 100644 --- a/tests/cxx/t-ops2.cc +++ b/tests/cxx/t-ops2.cc @@ -148,18 +148,18 @@ void checkqf (){ CHECK_SI(T,0,3,*); CHECK_ALL_COMPARISONS(T,5.,2); CHECK_ALL_SIGNS_COMPARISONS(T,11.,3); - CHECK_MPZ(T,5,-2,<); - CHECK_MPZ(T,5,-2,>); + CHECK_MPZ(T,5,-2,<); + CHECK_MPZ(T,5,-2,>); CHECK_MPZ(T,5,-2,<=); CHECK_MPZ(T,5,-2,>=); CHECK_MPZ(T,5,-2,==); CHECK_MPZ(T,5,-2,!=); - CHECK_MPZ(T,0,0,<); - CHECK_MPZ(T,0,0,>); - CHECK_MPZ(T,0,0,<=); - CHECK_MPZ(T,0,0,>=); - CHECK_MPZ(T,0,0,==); - CHECK_MPZ(T,0,0,!=); + CHECK_MPZ(T,0,0,<); + CHECK_MPZ(T,0,0,>); + CHECK_MPZ(T,0,0,<=); + CHECK_MPZ(T,0,0,>=); + CHECK_MPZ(T,0,0,==); + CHECK_MPZ(T,0,0,!=); ASSERT_ALWAYS(T(6)<<2==6.*4); ASSERT_ALWAYS(T(6)>>2==6./4); ASSERT_ALWAYS(T(-13)<<2==-13.*4); @@ -217,18 +217,18 @@ void checkf (){ CHECK_MPQ(mpf_class,-5.5,-2.25,-); CHECK_MPQ(mpf_class,-5.5,-2.25,*); CHECK_MPQ(mpf_class,-5.25,-0.5,/); - CHECK_MPQ(mpf_class,5,-2,<); - CHECK_MPQ(mpf_class,5,-2,>); - CHECK_MPQ(mpf_class,5,-2,<=); - CHECK_MPQ(mpf_class,5,-2,>=); - CHECK_MPQ(mpf_class,5,-2,==); - CHECK_MPQ(mpf_class,5,-2,!=); - CHECK_MPQ(mpf_class,0,0,<); - CHECK_MPQ(mpf_class,0,0,>); - CHECK_MPQ(mpf_class,0,0,<=); - CHECK_MPQ(mpf_class,0,0,>=); - CHECK_MPQ(mpf_class,0,0,==); - CHECK_MPQ(mpf_class,0,0,!=); + CHECK_MPQ(mpf_class,5,-2,<); + CHECK_MPQ(mpf_class,5,-2,>); + CHECK_MPQ(mpf_class,5,-2,<=); + CHECK_MPQ(mpf_class,5,-2,>=); + CHECK_MPQ(mpf_class,5,-2,==); + CHECK_MPQ(mpf_class,5,-2,!=); + CHECK_MPQ(mpf_class,0,0,<); + CHECK_MPQ(mpf_class,0,0,>); + CHECK_MPQ(mpf_class,0,0,<=); + CHECK_MPQ(mpf_class,0,0,>=); + CHECK_MPQ(mpf_class,0,0,==); + CHECK_MPQ(mpf_class,0,0,!=); } int diff --git a/tests/devel/try.c b/tests/devel/try.c index bf09dd829..7ccb9de0b 100644 --- a/tests/devel/try.c +++ b/tests/devel/try.c @@ -459,7 +459,7 @@ validate_bdiv_q_1 refmpn_mul_1 (tp, dst, size, divisor); /* Set ignored low bits */ - tp[0] |= (src[0] & LOW_ZEROS_MASK (divisor)); + tp[0] |= (src[0] & LOW_ZEROS_MASK (divisor)); if (! refmpn_equal_anynail (tp, src, size)) { printf ("Bdiv wrong: res * divisor != src (mod B^size)\n"); diff --git a/tests/mpn/t-hgcd_appr.c b/tests/mpn/t-hgcd_appr.c index 912a1fde0..486b13061 100644 --- a/tests/mpn/t-hgcd_appr.c +++ b/tests/mpn/t-hgcd_appr.c @@ -261,7 +261,7 @@ one_test (mpz_t a, mpz_t b, int i) "after tp: %Mx\n" "expected: %Mx\n", hgcd_tp[hgcd_scratch], marker[3]); - + abort (); } @@ -424,7 +424,7 @@ hgcd_appr_valid_p (mpz_t a, mpz_t b, mp_size_t res0, mp_bitcnt_t dbits, abits, margin; mpz_t appr_r0, appr_r1, t, q; struct hgcd_ref appr; - + if (!res0) { if (!res1) @@ -433,7 +433,7 @@ hgcd_appr_valid_p (mpz_t a, mpz_t b, mp_size_t res0, fprintf (stderr, "mpn_hgcd_appr returned 1 when no reduction possible.\n"); return 0; } - + /* NOTE: No *_clear calls on error return, since we're going to abort anyway. */ mpz_init (t); @@ -441,7 +441,7 @@ hgcd_appr_valid_p (mpz_t a, mpz_t b, mp_size_t res0, hgcd_ref_init (&appr); mpz_init (appr_r0); mpz_init (appr_r1); - + if (mpz_size (ref_r0) <= s) { fprintf (stderr, "ref_r0 too small!!!: "); debug_mp (ref_r0, 16); @@ -460,7 +460,7 @@ hgcd_appr_valid_p (mpz_t a, mpz_t b, mp_size_t res0, fprintf (stderr, "ref |r0 - r1| too large!!!: "); debug_mp (t, 16); return 0; } - + if (!res1) { mpz_set (appr_r0, a); @@ -473,7 +473,7 @@ hgcd_appr_valid_p (mpz_t a, mpz_t b, mp_size_t res0, for (i = 0; i<2; i++) { unsigned j; - + for (j = 0; j<2; j++) { mp_size_t mn = hgcd->n; @@ -567,7 +567,7 @@ hgcd_appr_valid_p (mpz_t a, mpz_t b, mp_size_t res0, fprintf (stderr, "appr_r1: "); debug_mp (appr_r1, 16); fprintf (stderr, "ref_r1: "); debug_mp (ref_r1, 16); - + return 0; } mpz_clear (t); diff --git a/tests/mpn/t-mod_1.c b/tests/mpn/t-mod_1.c index f1966154d..2f86ba277 100644 --- a/tests/mpn/t-mod_1.c +++ b/tests/mpn/t-mod_1.c @@ -90,7 +90,7 @@ main (int argc, char **argv) rands = RANDS; mpz_init (a); mpz_init (b); - + for (i = 0; i < 300; i++) { mp_size_t asize; diff --git a/tests/mpn/t-mulmid.c b/tests/mpn/t-mulmid.c index ab224acea..a946aefe8 100644 --- a/tests/mpn/t-mulmid.c +++ b/tests/mpn/t-mulmid.c @@ -52,7 +52,7 @@ main (int argc, char **argv) bp = TMP_ALLOC_LIMBS (MAX_N); rp = TMP_ALLOC_LIMBS (MAX_N + 2); refp = TMP_ALLOC_LIMBS (MAX_N + 2); - + for (test = 0; test < COUNT; test++) { mp_size_t an, bn, rn; diff --git a/tests/mpz/t-jac.c b/tests/mpz/t-jac.c index 5d8cad177..34cd82e78 100644 --- a/tests/mpz/t-jac.c +++ b/tests/mpz/t-jac.c @@ -921,7 +921,7 @@ mpz_nextprime_step (mpz_ptr p, mpz_srcptr n, mpz_srcptr step_in) mpz_gcd (gcd, p, step); ASSERT_ALWAYS (mpz_cmp_ui (gcd, 1) == 0); mpz_clear (gcd); - + pn = SIZ(p); count_leading_zeros (cnt, PTR(p)[pn - 1]); nbits = pn * GMP_NUMB_BITS - (cnt - GMP_NAIL_BITS); @@ -1016,7 +1016,7 @@ check_large_quotients (void) mpz_set_ui (op1, 0); mpz_urandomb (bs, rands, 32); mpz_urandomb (bs, rands, mpz_get_ui (bs) % 10 + 1); - + gcd_size = 1 + mpz_get_ui (bs); if (gcd_size & 1) { diff --git a/tune/tune-gcd-p.c b/tune/tune-gcd-p.c index 3c3815bd2..6d8863178 100644 --- a/tune/tune-gcd-p.c +++ b/tune/tune-gcd-p.c @@ -39,7 +39,7 @@ search (double *minp, double (*f)(void *, int), void *ctx, int start, int end) double y[4]; int best_i; - + x[0] = start; x[3] = end; @@ -60,7 +60,7 @@ search (double *minp, double (*f)(void *, int), void *ctx, int start, int end) #if 0 printf("%d: %f, %d: %f, %d:, %f %d: %f\n", x[0], y[0], x[1], y[1], x[2], y[2], x[3], y[3]); -#endif +#endif for (best_i = 0, i = 1; i < 4; i++) if (y[i] < y[best_i]) best_i = i; diff --git a/tune/tuneup.c b/tune/tuneup.c index 444e5e429..4cc75eed1 100644 --- a/tune/tuneup.c +++ b/tune/tuneup.c @@ -203,7 +203,7 @@ mp_size_t divrem_1_norm_threshold = MP_SIZE_T_MAX; mp_size_t divrem_1_unnorm_threshold = MP_SIZE_T_MAX; mp_size_t mod_1_norm_threshold = MP_SIZE_T_MAX; mp_size_t mod_1_unnorm_threshold = MP_SIZE_T_MAX; -int mod_1_1p_method = 0; +int mod_1_1p_method = 0; mp_size_t mod_1n_to_mod_1_1_threshold = MP_SIZE_T_MAX; mp_size_t mod_1u_to_mod_1_1_threshold = MP_SIZE_T_MAX; mp_size_t mod_1_1_to_mod_1_2_threshold = MP_SIZE_T_MAX; |