summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTorbjorn Granlund <tege@gmplib.org>2011-11-22 22:05:25 +0100
committerTorbjorn Granlund <tege@gmplib.org>2011-11-22 22:05:25 +0100
commit17a8a01f86586cbe7436565a7d22764f8f5988ea (patch)
tree85532578b6471bb13d24510ca3ee36c8fda095a6
parent042073d276059b723232c6db58c005645131d167 (diff)
downloadgmp-17a8a01f86586cbe7436565a7d22764f8f5988ea.tar.gz
Generalise new power6 addmul_1 to support also submul_1.
-rw-r--r--ChangeLog4
-rw-r--r--mpn/powerpc64/mode64/p6/aorsmul_1.asm (renamed from mpn/powerpc64/mode64/p6/addmul_1.asm)77
2 files changed, 53 insertions, 28 deletions
diff --git a/ChangeLog b/ChangeLog
index 1d6a44512..80e0f7a32 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,12 +1,12 @@
2011-11-22 Torbjorn Granlund <tege@gmplib.org>
+ * mpn/powerpc64/mode64/p6/aorsmul_1.asm: New file.
+
* configure.in: Don't fail fat builds under 64-bit DOS.
* mpn/powerpc64/mode64/aors_n.asm: Align loop for slightly better
power5 performance.
- * mpn/powerpc64/mode64/p6/addmul_1.asm: New file.
-
2011-11-21 Torbjorn Granlund <tege@gmplib.org>
* gmp-h.in (__GNU_MP_RELEASE): Renamed from typo name.
diff --git a/mpn/powerpc64/mode64/p6/addmul_1.asm b/mpn/powerpc64/mode64/p6/aorsmul_1.asm
index bffa6f308..4bd508488 100644
--- a/mpn/powerpc64/mode64/p6/addmul_1.asm
+++ b/mpn/powerpc64/mode64/p6/aorsmul_1.asm
@@ -1,4 +1,4 @@
-dnl PowerPC-64 mpn_addmul_1 optimised for power6.
+dnl PowerPC-64 mpn_addmul_1 and mpn_submul_1 optimised for power6.
dnl Copyright 1999, 2000, 2001, 2003, 2004, 2005, 2006, 2008, 2010, 2011
dnl Free Software Foundation, Inc.
@@ -20,19 +20,20 @@ dnl along with the GNU MP Library. If not, see http://www.gnu.org/licenses/.
include(`../config.m4')
-C cycles/limb
-C POWER3/PPC630 ?
-C POWER4/PPC970 ?
-C POWER5 ?
-C POWER6 12.25
-C POWER7 ?
+C mpn_addmul_1 mpn_submul_1
+C cycles/limb cycles/limb
+C POWER3/PPC630 ? ?
+C POWER4/PPC970 ? ?
+C POWER5 ? ?
+C POWER6 12.25 12.8
+C POWER7 ? ?
C TODO
C * Reduce register usage.
C * Schedule function entry code.
C * Unroll more. 8-way unrolling would bring us to 10 c/l, 16-way unrolling
C would bring us to 9 c/l.
-C * Generalise to handle submul_1.
+C * Handle n = 1 and perhaps n = 2 seperately, without saving any registers.
C INPUT PARAMETERS
define(`rp', `r3')
@@ -40,8 +41,27 @@ define(`up', `r4')
define(`n', `r5')
define(`v0', `r6')
+ifdef(`OPERATION_addmul_1',`
+ define(ADDSUBC, adde)
+ define(ADDSUB, addc)
+ define(func, mpn_addmul_1)
+ define(func_nc, mpn_addmul_1c) C FIXME: not really supported
+ define(AM, `$1')
+ define(SM, `')
+ define(CLRRSC, `addic $1, r0, 0')
+')
+ifdef(`OPERATION_submul_1',`
+ define(ADDSUBC, subfe)
+ define(ADDSUB, subfc)
+ define(func, mpn_submul_1)
+ define(func_nc, mpn_submul_1c) C FIXME: not really supported
+ define(AM, `')
+ define(SM, `$1')
+ define(CLRRSC, `subfc $1, r0, r0')
+')
+
ASM_START()
-PROLOGUE(mpn_addmul_1)
+PROLOGUE(func)
std r31, -8(r1)
std r30, -16(r1)
std r29, -24(r1)
@@ -57,11 +77,11 @@ PROLOGUE(mpn_addmul_1)
blt cr6, L(b1)
beq cr6, L(b2)
-L(b3): addi up, up, 16
+L(b3): ld r8, 0(up)
+ ld r7, 8(up)
+ ld r27, 16(up)
+ addi up, up, 16
addi rp, rp, 16
- ld r8, -16(up)
- ld r7, -8(up)
- ld r27, 0(up)
mulld r5, r8, v0
mulhdu r8, r8, v0
mulld r9, r7, v0
@@ -74,13 +94,13 @@ L(b3): addi up, up, 16
addc r9, r9, r8
adde r11, r11, r7
addze r12, r27
- addc r5, r5, r29
+ ADDSUB r5, r5, r29
b L(l3)
-L(b2): addi up, up, 8
+L(b2): ld r7, 0(up)
+ ld r27, 8(up)
+ addi up, up, 8
addi rp, rp, 8
- ld r7, -8(up)
- ld r27, 0(up)
mulld r9, r7, v0
mulhdu r7, r7, v0
mulld r11, r27, v0
@@ -89,22 +109,25 @@ L(b2): addi up, up, 8
ld r31, 0(rp)
addc r11, r11, r7
addze r12, r27
- addc r9, r9, r30
+ ADDSUB r9, r9, r30
b L(l2)
L(b1): ld r27, 0(up)
ld r31, 0(rp)
mulld r11, r27, v0
mulhdu r12, r27, v0
- addc r11, r11, r31
+ ADDSUB r11, r11, r31
b L(l1)
L(b0): addi up, up, -8
addi rp, rp, -8
- addic r12, r0, 0 C clear r12 and cy (use that r0 = 0)
+ CLRRSC( r12) C clear r12 and clr/set cy
ALIGN(32)
-L(top): ld r10, 8(up)
+L(top):
+SM(` subfe r11, r0, r0') C complement...
+SM(` addic r11, r11, 1') C ...carry flag
+ ld r10, 8(up)
ld r8, 16(up)
ld r7, 24(up)
ld r27, 32(up)
@@ -127,18 +150,20 @@ L(top): ld r10, 8(up)
adde r9, r9, r8
adde r11, r11, r7
addze r12, r27
- addc r0, r0, r28
+ ADDSUB r0, r0, r28
std r0, -24(rp)
- adde r5, r5, r29
+ ADDSUBC r5, r5, r29
L(l3): std r5, -16(rp)
- adde r9, r9, r30
+ ADDSUBC r9, r9, r30
L(l2): std r9, -8(rp)
- adde r11, r11, r31
+ ADDSUBC r11, r11, r31
L(l1): std r11, 0(rp)
bdnz L(top)
- addze r3, r12
+AM(` addze r3, r12')
+SM(` subfe r11, r0, r0') C complement...
ld r31, -8(r1)
+SM(` subf r3, r11, r12')
ld r30, -16(r1)
ld r29, -24(r1)
ld r28, -32(r1)