summaryrefslogtreecommitdiff
path: root/mpn/generic/addmul_1.c
diff options
context:
space:
mode:
authortege <tege@gmplib.org>2002-04-05 21:46:16 +0200
committertege <tege@gmplib.org>2002-04-05 21:46:16 +0200
commitec8e4471edc756b9a4951496fe743e990fb79297 (patch)
treeab258f722ea1d16af6484b3b83067e48a6f135ce /mpn/generic/addmul_1.c
parentd32c80190fcf89403def0e769692b2c4a5729690 (diff)
downloadgmp-ec8e4471edc756b9a4951496fe743e990fb79297.tar.gz
Nailify. Revamp non-nail code.
Diffstat (limited to 'mpn/generic/addmul_1.c')
-rw-r--r--mpn/generic/addmul_1.c93
1 files changed, 60 insertions, 33 deletions
diff --git a/mpn/generic/addmul_1.c b/mpn/generic/addmul_1.c
index d5c18957b..f33f50318 100644
--- a/mpn/generic/addmul_1.c
+++ b/mpn/generic/addmul_1.c
@@ -1,8 +1,7 @@
-/* mpn_addmul_1, mpn_submul_1 -- multiply the S1_SIZE long limb vector
- pointed to by S1_PTR by S2_LIMB, add/subtract the S1_SIZE least
- significant limbs of the product to/from the limb vector pointed to by
- RES_PTR. Return the most significant limb of the product, adjusted for
- carry-out from the addition.
+/* mpn_addmul_1 -- multiply the N long limb vector pointed to by UP by VL,
+ add the N least significant limbs of the product to the limb vector
+ pointed to by RP. Return the most significant limb of the product,
+ adjusted for carry-out from the addition.
Copyright 1992, 1993, 1994, 1996, 2000, 2002 Free Software Foundation, Inc.
@@ -28,42 +27,70 @@ MA 02111-1307, USA. */
#include "longlong.h"
+#if GMP_NAIL_BITS == 0
+
mp_limb_t
-mpn_addmul_1 (register mp_ptr res_ptr,
- register mp_srcptr s1_ptr,
- mp_size_t s1_size,
- register mp_limb_t s2_limb)
+mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
{
- register mp_limb_t cy_limb;
- register mp_size_t j;
- register mp_limb_t prod_high, prod_low;
- register mp_limb_t x;
-
- ASSERT (s1_size >= 1);
- ASSERT (MPN_SAME_OR_SEPARATE_P (res_ptr, s1_ptr, s1_size));
-
- /* The loop counter and index J goes from -SIZE to -1. This way
- the loop becomes faster. */
- j = -s1_size;
+ mp_limb_t ul, cl, hpl, lpl, rl;
- /* Offset the base pointers to compensate for the negative indices. */
- res_ptr -= j;
- s1_ptr -= j;
+ ASSERT (n >= 1);
+ ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
- cy_limb = 0;
+ cl = 0;
do
{
- umul_ppmm (prod_high, prod_low, s1_ptr[j], s2_limb);
+ ul = *up++;
+ umul_ppmm (hpl, lpl, ul, vl);
- prod_low += cy_limb;
- cy_limb = (prod_low < cy_limb) + prod_high;
+ lpl += cl;
+ cl = (lpl < cl) + hpl;
- x = res_ptr[j];
- prod_low = x + prod_low;
- cy_limb += prod_low < x;;
- res_ptr[j] = prod_low;
+ rl = *rp;
+ lpl = rl + lpl;
+ cl += lpl < rl;
+ *rp++ = lpl;
}
- while (++j != 0);
+ while (--n != 0);
- return cy_limb;
+ return cl;
}
+
+#endif
+
+#if GMP_NAIL_BITS == 1
+you lose
+#endif
+
+#if GMP_NAIL_BITS >= 2
+
+mp_limb_t
+mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
+{
+ mp_limb_t shifted_vl, ul, rl, lpl, hpl, prev_hpl, xw, cl, xl;
+
+ ASSERT (vl >> GMP_NAIL_BITS == 0);
+ ASSERT (n >= 1);
+ ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
+
+ shifted_vl = vl << GMP_NAIL_BITS;
+ cl = 0;
+ prev_hpl = 0;
+ do
+ {
+ ul = *up++;
+ rl = *rp;
+ umul_ppmm (hpl, lpl, ul, shifted_vl);
+ lpl >>= GMP_NAIL_BITS;
+ xw = prev_hpl + lpl + rl + cl;
+ cl = xw >> GMP_NUMB_BITS;
+ xl = xw & GMP_NUMB_MASK;
+ *rp++ = xl;
+ prev_hpl = hpl;
+ }
+ while (--n != 0);
+
+ return prev_hpl + cl;
+}
+
+#endif