summaryrefslogtreecommitdiff
path: root/round_prec.c
diff options
context:
space:
mode:
Diffstat (limited to 'round_prec.c')
-rw-r--r--round_prec.c154
1 files changed, 24 insertions, 130 deletions
diff --git a/round_prec.c b/round_prec.c
index d938ffb84..514629143 100644
--- a/round_prec.c
+++ b/round_prec.c
@@ -25,129 +25,25 @@ MA 02111-1307, USA. */
#include "mpfr.h"
#include "mpfr-impl.h"
-/*
- * If flag = 0, puts in y the value of xp (with precision xprec and
- * sign 1 if negative=0, -1 otherwise) rounded to precision yprec and
- * direction rnd_mode. Supposes x is not zero nor NaN nor +/- Infinity
- * (i.e. *xp != 0).
- *
- * If inexp != NULL, computes the inexact flag of the rounding.
- * (In case of even rounding when rnd = GMP_RNDN, puts 2 or -2 in *inexp.)
- *
- * If flag = 1, just returns whether one should add 1 or not for rounding.
- *
- * Note: yprec may be < MPFR_PREC_MIN; in particular, it may be equal
- * to 1. In this case, the even rounding is done away from 0, which is
- * a natural generalization. Indeed, a number with 1-bit precision can
- * be seen as a denormalized number with more precision.
- */
-
-int
-mpfr_round_raw_generic(mp_limb_t *yp, mp_limb_t *xp, mp_prec_t xprec,
- int neg, mp_prec_t yprec, mp_rnd_t rnd_mode,
- int *inexp, int flag)
-{
- mp_size_t xsize, nw;
- mp_limb_t himask, lomask;
- int rw, carry = 0;
-
- xsize = (xprec-1)/BITS_PER_MP_LIMB + 1;
-
- nw = yprec / BITS_PER_MP_LIMB;
- rw = yprec & (BITS_PER_MP_LIMB - 1);
-
- if (flag && !inexp && (rnd_mode==GMP_RNDZ || xprec <= yprec
- || (rnd_mode==GMP_RNDU && neg)
- || (rnd_mode==GMP_RNDD && neg==0)))
- return 0;
-
- if (rw)
- {
- nw++;
- lomask = ((MP_LIMB_T_ONE << (BITS_PER_MP_LIMB - rw)) - MP_LIMB_T_ONE);
- himask = ~lomask;
- }
- else
- {
- lomask = -1;
- himask = -1;
- }
- MPFR_ASSERTD(nw >= 1);
-
- if (xprec <= yprec)
- { /* No rounding is necessary. */
- /* if yp=xp, maybe an overlap: MPN_COPY_DECR is ok when src <= dst */
- MPFR_ASSERTD(nw >= xsize);
- if (inexp)
- *inexp = 0;
- if (flag)
- return 0;
- MPN_COPY_DECR(yp + (nw - xsize), xp, xsize);
- MPN_ZERO(yp, nw - xsize);
- }
- else
- {
- mp_limb_t sb;
-
- if ((rnd_mode == GMP_RNDU && neg) ||
- (rnd_mode == GMP_RNDD && !neg))
- rnd_mode = GMP_RNDZ;
-
- if (inexp || rnd_mode != GMP_RNDZ)
- {
- mp_size_t k;
-
- k = xsize - nw;
- if (!rw)
- k--;
- MPFR_ASSERTD(k >= 0);
- sb = xp[k] & lomask; /* First non-significant bits */
- if (rnd_mode == GMP_RNDN)
- {
- mp_limb_t rbmask = MP_LIMB_T_ONE << (BITS_PER_MP_LIMB - rw - 1);
- if (sb & rbmask) /* rounding bit */
- sb &= ~rbmask; /* it is 1, clear it */
- else
- rnd_mode = GMP_RNDZ; /* it is 0, behave like rounding to 0 */
- }
- while (sb == 0 && k > 0)
- sb = xp[--k];
- if (rnd_mode == GMP_RNDN)
- { /* rounding to nearest, with rounding bit = 1 */
- if (sb == 0) /* Even rounding. */
- {
- sb = xp[xsize - nw] & (himask ^ (himask << 1));
- if (inexp)
- *inexp = ((neg != 0) ^ (sb != 0))
- ? MPFR_EVEN_INEX : -MPFR_EVEN_INEX;
- }
- else /* sb != 0 */
- {
- if (inexp)
- *inexp = (neg == 0) ? 1 : -1;
- }
- }
- else if (inexp)
- *inexp = sb == 0 ? 0
- : (((neg != 0) ^ (rnd_mode != GMP_RNDZ)) ? 1 : -1);
- }
- else
- sb = 0;
-
- if (flag)
- return sb != 0 && rnd_mode != GMP_RNDZ;
-
- if (sb != 0 && rnd_mode != GMP_RNDZ)
- carry = mpn_add_1(yp, xp + xsize - nw, nw,
- rw ? MP_LIMB_T_ONE << (BITS_PER_MP_LIMB - rw) : 1);
- else
- MPN_COPY_INCR(yp, xp + xsize - nw, nw);
-
- yp[0] &= himask;
- }
-
- return carry;
-}
+#define mpfr_round_raw_generic mpfr_round_raw
+#define flag 0
+#define use_inexp 1
+#include "round_raw_generic.c"
+
+#define mpfr_round_raw_generic mpfr_round_raw_2
+#define flag 1
+#define use_inexp 0
+#include "round_raw_generic.c"
+
+#define mpfr_round_raw_generic mpfr_round_raw_3
+#define flag 1
+#define use_inexp 1
+#include "round_raw_generic.c"
+
+#define mpfr_round_raw_generic mpfr_round_raw_4
+#define flag 0
+#define use_inexp 0
+#include "round_raw_generic.c"
int
mpfr_prec_round (mpfr_ptr x, mp_prec_t prec, mp_rnd_t rnd_mode)
@@ -168,7 +64,7 @@ mpfr_prec_round (mpfr_ptr x, mp_prec_t prec, mp_rnd_t rnd_mode)
/* Realloc mantissa */
mp_ptr tmp = (mp_ptr) (*__gmp_reallocate_func)
(MPFR_GET_REAL_PTR(x), MPFR_ALLOC_SIZE(ow), MPFR_ALLOC_SIZE(nw));
- MPFR_SET_MANT_PTR(x, tmp); /* mant pre must be set before alloc size */
+ MPFR_SET_MANT_PTR(x, tmp); /* mant ptr must be set before alloc size */
MPFR_SET_ALLOC_SIZE(x, nw); /* new number of allocated limbs */
}
@@ -252,6 +148,7 @@ mpfr_can_round_raw (mp_limb_t *bp, mp_size_t bn, int neg, mp_exp_t err0,
/* can round only in rounding to the nearest and err0 >= prec + 2 */
}
+ MPFR_ASSERT_SIGN(neg);
neg = MPFR_IS_NEG_SIGN(neg);
/* if the error is smaller than ulp(b), then anyway it will propagate
@@ -261,15 +158,12 @@ mpfr_can_round_raw (mp_limb_t *bp, mp_size_t bn, int neg, mp_exp_t err0,
/* warning: if k = m*BITS_PER_MP_LIMB, consider limb m-1 and not m */
k = (err - 1) / BITS_PER_MP_LIMB;
- s = err % BITS_PER_MP_LIMB;
- if (s)
- s = BITS_PER_MP_LIMB - s;
+ MPFR_UNSIGNED_MINUS_MODULO(s, err);
+
/* the error corresponds to bit s in limb k, the most significant limb
being limb 0 */
k1 = (prec - 1) / BITS_PER_MP_LIMB;
- s1 = prec % BITS_PER_MP_LIMB;
- if (s1)
- s1 = BITS_PER_MP_LIMB - s1;
+ MPFR_UNSIGNED_MINUS_MODULO(s1, prec);
/* the last significant bit is bit s1 in limb k1 */