diff options
author | tege <tege@gmplib.org> | 2002-04-10 01:17:56 +0200 |
---|---|---|
committer | tege <tege@gmplib.org> | 2002-04-10 01:17:56 +0200 |
commit | 60b3c48a4dcd9ba0c10ec0020854cee12cd51ee0 (patch) | |
tree | bce41ad4f97ac44abb3546bd45a6616193f2d7b8 /mpn | |
parent | 22fd3eee4abec4feabaeeb56ac70360a5d76396c (diff) | |
download | gmp-60b3c48a4dcd9ba0c10ec0020854cee12cd51ee0.tar.gz |
Nailify.
Update mp_size_t variables to use `n' suffix instead of `size' suffix.
Diffstat (limited to 'mpn')
-rw-r--r-- | mpn/generic/divrem_1.c | 98 | ||||
-rw-r--r-- | mpn/generic/divrem_2.c | 50 |
2 files changed, 94 insertions, 54 deletions
diff --git a/mpn/generic/divrem_1.c b/mpn/generic/divrem_1.c index d2d6c071d..eda1487cd 100644 --- a/mpn/generic/divrem_1.c +++ b/mpn/generic/divrem_1.c @@ -78,52 +78,57 @@ MA 02111-1307, USA. */ the dependent chain. */ mp_limb_t -mpn_divrem_1 (mp_ptr qp, mp_size_t xsize, - mp_srcptr ap, mp_size_t size, mp_limb_t d) +mpn_divrem_1 (mp_ptr qp, mp_size_t qxn, + mp_srcptr up, mp_size_t un, mp_limb_t d) { - mp_size_t total_size; + mp_size_t n; mp_size_t i; mp_limb_t n1, n0; mp_limb_t r = 0; - ASSERT (xsize >= 0); - ASSERT (size >= 0); + ASSERT (qxn >= 0); + ASSERT (un >= 0); ASSERT (d != 0); - /* FIXME: What's the correct overlap rule when xsize!=0? */ - ASSERT (MPN_SAME_OR_SEPARATE_P (qp+xsize, ap, size)); + /* FIXME: What's the correct overlap rule when qxn!=0? */ + ASSERT (MPN_SAME_OR_SEPARATE_P (qp+qxn, up, un)); - total_size = size + xsize; - if (total_size == 0) + n = un + qxn; + if (n == 0) return 0; - qp += (total_size - 1); /* dest high limb */ + d <<= GMP_NAIL_BITS; + + qp += (n - 1); /* Make qp point at most significant quotient limb */ if ((d & MP_LIMB_T_HIGHBIT) != 0) { - if (size != 0) + if (un != 0) { - /* High quotient limb is 0 or 1, and skip a divide step. */ - mp_limb_t q; - r = ap[size-1]; + /* High quotient limb is 0 or 1, skip a divide step. */ + mp_limb_t q; + r = up[un - 1] << GMP_NAIL_BITS; q = (r >= d); *qp-- = q; r -= (d & -q); - total_size--; - size--; + r >>= GMP_NAIL_BITS; + n--; + un--; } - if (BELOW_THRESHOLD (total_size, DIVREM_1_NORM_THRESHOLD)) + if (BELOW_THRESHOLD (n, DIVREM_1_NORM_THRESHOLD)) { plain: - for (i = size-1; i >= 0; i--) + for (i = un - 1; i >= 0; i--) { - n0 = ap[i]; + n0 = up[i] << GMP_NAIL_BITS; udiv_qrnnd (*qp, r, r, n0, d); + r >>= GMP_NAIL_BITS; qp--; } - for (i = 0; i < xsize; i++) + for (i = qxn - 1; i >= 0; i--) { udiv_qrnnd (*qp, r, r, 0, d); + r >>= GMP_NAIL_BITS; qp--; } return r; @@ -134,15 +139,17 @@ mpn_divrem_1 (mp_ptr qp, mp_size_t xsize, mp_limb_t dinv; invert_limb (dinv, d); - for (i = size-1; i >= 0; i--) + for (i = un - 1; i >= 0; i--) { - n0 = ap[i]; + n0 = up[i] << GMP_NAIL_BITS; udiv_qrnnd_preinv (*qp, r, r, n0, d, dinv); + r >>= GMP_NAIL_BITS; qp--; } - for (i = 0; i < xsize; i++) + for (i = qxn - 1; i >= 0; i--) { udiv_qrnnd_preinv (*qp, r, r, 0, d, dinv); + r >>= GMP_NAIL_BITS; qp--; } return r; @@ -150,26 +157,27 @@ mpn_divrem_1 (mp_ptr qp, mp_size_t xsize, } else { - int norm; + /* Most significant bit of divisor == 0. */ + int norm; /* Skip a division if high < divisor (high quotient 0). Testing here before before normalizing will still skip as often as possible. */ - if (size != 0) + if (un != 0) { - n1 = ap[size-1]; + n1 = up[un - 1] << GMP_NAIL_BITS; if (n1 < d) { - r = n1; + r = n1 >> GMP_NAIL_BITS; *qp-- = 0; - total_size--; - if (total_size == 0) + n--; + if (n == 0) return r; - size--; + un--; } } if (! UDIV_NEEDS_NORMALIZATION - && BELOW_THRESHOLD (total_size, DIVREM_1_UNNORM_THRESHOLD)) + && BELOW_THRESHOLD (n, DIVREM_1_UNNORM_THRESHOLD)) goto plain; count_leading_zeros (norm, d); @@ -179,25 +187,28 @@ mpn_divrem_1 (mp_ptr qp, mp_size_t xsize, #define EXTRACT ((n1 << norm) | (n0 >> (BITS_PER_MP_LIMB - norm))) if (UDIV_NEEDS_NORMALIZATION - && BELOW_THRESHOLD (total_size, DIVREM_1_UNNORM_THRESHOLD)) + && BELOW_THRESHOLD (n, DIVREM_1_UNNORM_THRESHOLD)) { - if (size != 0) + if (un != 0) { - n1 = ap[size-1]; + n1 = up[un - 1] << GMP_NAIL_BITS; r |= (n1 >> (BITS_PER_MP_LIMB - norm)); - for (i = size-2; i >= 0; i--) + for (i = un - 2; i >= 0; i--) { - n0 = ap[i]; + n0 = up[i] << GMP_NAIL_BITS; udiv_qrnnd (*qp, r, r, EXTRACT, d); + r >>= GMP_NAIL_BITS; qp--; n1 = n0; } udiv_qrnnd (*qp, r, r, n1 << norm, d); + r >>= GMP_NAIL_BITS; qp--; } - for (i = 0; i < xsize; i++) + for (i = qxn - 1; i >= 0; i--) { udiv_qrnnd (*qp, r, r, 0, d); + r >>= GMP_NAIL_BITS; qp--; } return r >> norm; @@ -206,23 +217,26 @@ mpn_divrem_1 (mp_ptr qp, mp_size_t xsize, { mp_limb_t dinv; invert_limb (dinv, d); - if (size != 0) + if (un != 0) { - n1 = ap[size-1]; + n1 = up[un - 1] << GMP_NAIL_BITS; r |= (n1 >> (BITS_PER_MP_LIMB - norm)); - for (i = size-2; i >= 0; i--) + for (i = un - 2; i >= 0; i--) { - n0 = ap[i]; + n0 = up[i] << GMP_NAIL_BITS; udiv_qrnnd_preinv (*qp, r, r, EXTRACT, d, dinv); + r >>= GMP_NAIL_BITS; qp--; n1 = n0; } udiv_qrnnd_preinv (*qp, r, r, n1 << norm, d, dinv); + r >>= GMP_NAIL_BITS; qp--; } - for (i = 0; i < xsize; i++) + for (i = qxn - 1; i >= 0; i--) { udiv_qrnnd_preinv (*qp, r, r, 0, d, dinv); + r >>= GMP_NAIL_BITS; qp--; } return r >> norm; diff --git a/mpn/generic/divrem_2.c b/mpn/generic/divrem_2.c index 47f7209a4..1052cb13d 100644 --- a/mpn/generic/divrem_2.c +++ b/mpn/generic/divrem_2.c @@ -63,7 +63,7 @@ MA 02111-1307, USA. */ mp_limb_t mpn_divrem_2 (mp_ptr qp, mp_size_t qxn, - mp_ptr np, mp_size_t nsize, + mp_ptr np, mp_size_t nn, mp_srcptr dp) { mp_limb_t most_significant_q_limb = 0; @@ -73,12 +73,12 @@ mpn_divrem_2 (mp_ptr qp, mp_size_t qxn, mp_limb_t d1inv; int use_preinv; - ASSERT (nsize >= 2); + ASSERT (nn >= 2); ASSERT (qxn >= 0); ASSERT (dp[1] & MP_LIMB_T_HIGHBIT); - ASSERT (! MPN_OVERLAP_P (qp, nsize-2+qxn, np, nsize) || qp+2 >= np); + ASSERT (! MPN_OVERLAP_P (qp, nn-2+qxn, np, nn) || qp+2 >= np); - np += nsize - 2; + np += nn - 2; d1 = dp[1]; d0 = dp[0]; n1 = np[1]; @@ -86,15 +86,21 @@ mpn_divrem_2 (mp_ptr qp, mp_size_t qxn, if (n1 >= d1 && (n1 > d1 || n0 >= d0)) { +#if GMP_NAIL_BITS == 0 sub_ddmmss (n1, n0, n1, n0, d1, d0); +#else + n0 = n0 - d0; + n1 = n1 - d1 - (n0 >> GMP_LIMB_BITS - 1); + n0 &= GMP_NUMB_MASK; +#endif most_significant_q_limb = 1; } - use_preinv = ABOVE_THRESHOLD (qxn + nsize - 2, DIVREM_2_THRESHOLD); + use_preinv = ABOVE_THRESHOLD (qxn + nn - 2, DIVREM_2_THRESHOLD); if (use_preinv) invert_limb (d1inv, d1); - for (i = qxn + nsize - 2 - 1; i >= 0; i--) + for (i = qxn + nn - 2 - 1; i >= 0; i--) { mp_limb_t q; mp_limb_t r; @@ -106,27 +112,35 @@ mpn_divrem_2 (mp_ptr qp, mp_size_t qxn, if (n1 == d1) { - /* Q should be either 111..111 or 111..110. Need special treatment + /* Q should be either 111..111 or 111..110. Need special handling of this rare case as normal division would give overflow. */ - q = ~(mp_limb_t) 0; + q = GMP_NUMB_MASK; - r = n0 + d1; + r = (n0 + d1) & GMP_NUMB_MASK; if (r < d1) /* Carry in the addition? */ { +#if GMP_NAIL_BITS == 0 add_ssaaaa (n1, n0, r - d0, np[0], 0, d0); +#else + n0 = np[0] + d0; + n1 = (r - d0 + (n0 >> GMP_NUMB_BITS)) & GMP_NUMB_MASK; + n0 &= GMP_NUMB_MASK; +#endif qp[i] = q; continue; } n1 = d0 - (d0 != 0); - n0 = -d0; + n0 = -d0 & GMP_NUMB_MASK; } else { if (use_preinv) udiv_qrnnd_preinv (q, r, n1, n0, d1, d1inv); else - udiv_qrnnd (q, r, n1, n0, d1); - umul_ppmm (n1, n0, d0, q); + udiv_qrnnd (q, r, n1, n0 << GMP_NAIL_BITS, d1 << GMP_NAIL_BITS); + r >>= GMP_NAIL_BITS; + umul_ppmm (n1, n0, d0, q << GMP_NAIL_BITS); + n0 >>= GMP_NAIL_BITS; } n2 = np[0]; @@ -137,14 +151,26 @@ mpn_divrem_2 (mp_ptr qp, mp_size_t qxn, /* The estimated Q was too large. */ q--; +#if GMP_NAIL_BITS == 0 sub_ddmmss (n1, n0, n1, n0, 0, d0); +#else + n0 = n0 - d0; + n1 = n1 - (n0 >> GMP_LIMB_BITS - 1); + n0 &= GMP_NUMB_MASK; +#endif r += d1; if (r >= d1) /* If not carry, test Q again. */ goto q_test; } qp[i] = q; +#if GMP_NAIL_BITS == 0 sub_ddmmss (n1, n0, r, n2, n1, n0); +#else + n0 = n2 - n0; + n1 = r - n1 - (n0 >> GMP_LIMB_BITS - 1); + n0 &= GMP_NUMB_MASK; +#endif } np[1] = n1; np[0] = n0; |