summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKevin Ryde <user42@zip.com.au>2000-04-16 00:46:46 +0200
committerKevin Ryde <user42@zip.com.au>2000-04-16 00:46:46 +0200
commitb411202ebcc8f817116fe7422683b52293b522f6 (patch)
tree98ad8121f767fce9c2b64dfa315ed22100719103
parent733ba8f43f4f96953fe416250cf8390571b69947 (diff)
downloadgmp-b411202ebcc8f817116fe7422683b52293b522f6.tar.gz
Change __mpn to __gmpn.
-rw-r--r--mpn/a29k/add_n.s6
-rw-r--r--mpn/a29k/addmul_1.s6
-rw-r--r--mpn/a29k/lshift.s6
-rw-r--r--mpn/a29k/mul_1.s6
-rw-r--r--mpn/a29k/rshift.s6
-rw-r--r--mpn/a29k/sub_n.s6
-rw-r--r--mpn/a29k/submul_1.s6
-rw-r--r--mpn/alpha/addmul_1.asm2
-rw-r--r--mpn/alpha/ev5/add_n.asm2
-rw-r--r--mpn/alpha/ev5/lshift.asm2
-rw-r--r--mpn/alpha/ev5/rshift.asm2
-rw-r--r--mpn/alpha/ev5/sub_n.asm2
-rw-r--r--mpn/alpha/mul_1.asm2
-rw-r--r--mpn/alpha/submul_1.asm2
-rw-r--r--mpn/arm/add_n.S8
-rw-r--r--mpn/arm/addmul_1.S8
-rw-r--r--mpn/arm/mul_1.S8
-rw-r--r--mpn/arm/sub_n.S8
-rw-r--r--mpn/clipper/add_n.s6
-rw-r--r--mpn/clipper/mul_1.s6
-rw-r--r--mpn/clipper/sub_n.s6
-rw-r--r--mpn/hppa/add_n.s6
-rw-r--r--mpn/hppa/hppa1_1/addmul_1.s6
-rw-r--r--mpn/hppa/hppa1_1/mul_1.s6
-rw-r--r--mpn/hppa/hppa1_1/pa7100/add_n.s6
-rw-r--r--mpn/hppa/hppa1_1/pa7100/addmul_1.S6
-rw-r--r--mpn/hppa/hppa1_1/pa7100/lshift.s6
-rw-r--r--mpn/hppa/hppa1_1/pa7100/rshift.s6
-rw-r--r--mpn/hppa/hppa1_1/pa7100/sub_n.s6
-rw-r--r--mpn/hppa/hppa1_1/pa7100/submul_1.S6
-rw-r--r--mpn/hppa/hppa1_1/submul_1.s8
-rw-r--r--mpn/hppa/hppa2_0/add_n.s6
-rw-r--r--mpn/hppa/hppa2_0/sub_n.s6
-rw-r--r--mpn/hppa/lshift.s6
-rw-r--r--mpn/hppa/rshift.s6
-rw-r--r--mpn/hppa/sub_n.s6
-rw-r--r--mpn/i960/add_n.s6
-rw-r--r--mpn/i960/addmul_1.s6
-rw-r--r--mpn/i960/mul_1.s6
-rw-r--r--mpn/i960/sub_n.s6
-rw-r--r--mpn/m68k/add_n.S10
-rw-r--r--mpn/m68k/lshift.S10
-rw-r--r--mpn/m68k/mc68020/addmul_1.S10
-rw-r--r--mpn/m68k/mc68020/mul_1.S10
-rw-r--r--mpn/m68k/mc68020/submul_1.S10
-rw-r--r--mpn/m68k/rshift.S10
-rw-r--r--mpn/m68k/sub_n.S10
-rw-r--r--mpn/m88k/add_n.s6
-rw-r--r--mpn/m88k/mc88110/add_n.S6
-rw-r--r--mpn/m88k/mc88110/addmul_1.s6
-rw-r--r--mpn/m88k/mc88110/mul_1.s6
-rw-r--r--mpn/m88k/mc88110/sub_n.S6
-rw-r--r--mpn/m88k/mul_1.s6
-rw-r--r--mpn/m88k/sub_n.s6
-rw-r--r--mpn/sh/add_n.s6
-rw-r--r--mpn/sh/sh2/addmul_1.s6
-rw-r--r--mpn/sh/sh2/mul_1.s6
-rw-r--r--mpn/sh/sh2/submul_1.s6
-rw-r--r--mpn/sh/sub_n.s6
-rw-r--r--mpn/sparc64/add_n.asm2
-rw-r--r--mpn/sparc64/copyi.asm2
-rw-r--r--mpn/sparc64/lshift.asm2
-rw-r--r--mpn/sparc64/rshift.asm2
-rw-r--r--mpn/sparc64/sub_n.asm2
-rw-r--r--mpn/x86/add_n.S18
-rw-r--r--mpn/x86/addmul_1.S10
-rw-r--r--mpn/x86/lshift.S10
-rw-r--r--mpn/x86/mul_1.S10
-rw-r--r--mpn/x86/mul_basecase.S8
-rw-r--r--mpn/x86/pentium/add_n.S18
-rw-r--r--mpn/x86/pentium/addmul_1.S10
-rw-r--r--mpn/x86/pentium/lshift.S10
-rw-r--r--mpn/x86/pentium/mul_1.S10
-rw-r--r--mpn/x86/pentium/mul_basecase.S8
-rw-r--r--mpn/x86/pentium/rshift.S10
-rw-r--r--mpn/x86/pentium/sub_n.S18
-rw-r--r--mpn/x86/pentium/submul_1.S10
-rw-r--r--mpn/x86/rshift.S10
-rw-r--r--mpn/x86/sub_n.S18
-rw-r--r--mpn/x86/submul_1.S10
-rw-r--r--mpn/z8000/add_n.s6
-rw-r--r--mpn/z8000/mul_1.s6
82 files changed, 287 insertions, 287 deletions
diff --git a/mpn/a29k/add_n.s b/mpn/a29k/add_n.s
index 74c20e3f7..0732e5779 100644
--- a/mpn/a29k/add_n.s
+++ b/mpn/a29k/add_n.s
@@ -1,4 +1,4 @@
-; 29000 __mpn_add -- Add two limb vectors of the same length > 0 and store
+; 29000 __gmpn_add -- Add two limb vectors of the same length > 0 and store
; sum in a third limb vector.
; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
@@ -38,9 +38,9 @@
.sect .lit,lit
.text
.align 4
- .global ___mpn_add_n
+ .global ___gmpn_add_n
.word 0x60000
-___mpn_add_n:
+___gmpn_add_n:
srl gr117,lr5,3
sub gr118,gr117,1
jmpt gr118,Ltail
diff --git a/mpn/a29k/addmul_1.s b/mpn/a29k/addmul_1.s
index 8c0ec96ce..f21ef2622 100644
--- a/mpn/a29k/addmul_1.s
+++ b/mpn/a29k/addmul_1.s
@@ -1,4 +1,4 @@
-; 29000 __mpn_addmul_1 -- Multiply a limb vector with a single limb and
+; 29000 __gmpn_addmul_1 -- Multiply a limb vector with a single limb and
; add the product to a second limb vector.
; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
@@ -31,9 +31,9 @@
.sect .lit,lit
.text
.align 4
- .global ___mpn_addmul_1
+ .global ___gmpn_addmul_1
.word 0x60000
-___mpn_addmul_1:
+___gmpn_addmul_1:
sub lr4,lr4,8
jmpt lr4,Ltail
const gr120,0 ; init cylimb reg
diff --git a/mpn/a29k/lshift.s b/mpn/a29k/lshift.s
index 7554e2cbb..fbf69da7d 100644
--- a/mpn/a29k/lshift.s
+++ b/mpn/a29k/lshift.s
@@ -1,4 +1,4 @@
-; 29000 __mpn_lshift --
+; 29000 __gmpn_lshift --
; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
@@ -32,9 +32,9 @@
.sect .lit,lit
.text
.align 4
- .global ___mpn_lshift
+ .global ___gmpn_lshift
.word 0x60000
-___mpn_lshift:
+___gmpn_lshift:
sll gr116,lr4,2
add lr3,gr116,lr3
add lr2,gr116,lr2
diff --git a/mpn/a29k/mul_1.s b/mpn/a29k/mul_1.s
index 5d120f48e..43798aae9 100644
--- a/mpn/a29k/mul_1.s
+++ b/mpn/a29k/mul_1.s
@@ -1,4 +1,4 @@
-; 29000 __mpn_mul_1 -- Multiply a limb vector with a single limb and
+; 29000 __gmpn_mul_1 -- Multiply a limb vector with a single limb and
; store the product in a second limb vector.
; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
@@ -31,9 +31,9 @@
.sect .lit,lit
.text
.align 4
- .global ___mpn_mul_1
+ .global ___gmpn_mul_1
.word 0x60000
-___mpn_mul_1:
+___gmpn_mul_1:
sub lr4,lr4,8
jmpt lr4,Ltail
const gr120,0 ; init cylimb reg
diff --git a/mpn/a29k/rshift.s b/mpn/a29k/rshift.s
index fe53b71e2..caed7e5b1 100644
--- a/mpn/a29k/rshift.s
+++ b/mpn/a29k/rshift.s
@@ -1,4 +1,4 @@
-; 29000 __mpn_rshift --
+; 29000 __gmpn_rshift --
; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
@@ -32,9 +32,9 @@
.sect .lit,lit
.text
.align 4
- .global ___mpn_rshift
+ .global ___gmpn_rshift
.word 0x60000
-___mpn_rshift:
+___gmpn_rshift:
load 0,0,gr119,lr3
add lr3,lr3,4
diff --git a/mpn/a29k/sub_n.s b/mpn/a29k/sub_n.s
index 3c8d61065..4a512eff8 100644
--- a/mpn/a29k/sub_n.s
+++ b/mpn/a29k/sub_n.s
@@ -1,4 +1,4 @@
-; 29000 __mpn_sub -- Subtract two limb vectors of the same length > 0 and
+; 29000 __gmpn_sub -- Subtract two limb vectors of the same length > 0 and
; store difference in a third limb vector.
; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
@@ -38,9 +38,9 @@
.sect .lit,lit
.text
.align 4
- .global ___mpn_sub_n
+ .global ___gmpn_sub_n
.word 0x60000
-___mpn_sub_n:
+___gmpn_sub_n:
srl gr117,lr5,3
sub gr118,gr117,1
jmpt gr118,Ltail
diff --git a/mpn/a29k/submul_1.s b/mpn/a29k/submul_1.s
index ca2ef72a9..87f6965a1 100644
--- a/mpn/a29k/submul_1.s
+++ b/mpn/a29k/submul_1.s
@@ -1,4 +1,4 @@
-; 29000 __mpn_submul_1 -- Multiply a limb vector with a single limb and
+; 29000 __gmpn_submul_1 -- Multiply a limb vector with a single limb and
; subtract the product from a second limb vector.
; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
@@ -31,9 +31,9 @@
.sect .lit,lit
.text
.align 4
- .global ___mpn_submul_1
+ .global ___gmpn_submul_1
.word 0x60000
-___mpn_submul_1:
+___gmpn_submul_1:
sub lr4,lr4,8
jmpt lr4,Ltail
const gr120,0 ; init cylimb reg
diff --git a/mpn/alpha/addmul_1.asm b/mpn/alpha/addmul_1.asm
index 0a42326b3..c6d547391 100644
--- a/mpn/alpha/addmul_1.asm
+++ b/mpn/alpha/addmul_1.asm
@@ -1,4 +1,4 @@
-dnl Alpha __mpn_addmul_1 -- Multiply a limb vector with a limb and add
+dnl Alpha __gmpn_addmul_1 -- Multiply a limb vector with a limb and add
dnl the result to a second limb vector.
dnl Copyright (C) 1992, 1994, 1995, 2000 Free Software Foundation, Inc.
diff --git a/mpn/alpha/ev5/add_n.asm b/mpn/alpha/ev5/add_n.asm
index 9b3484aa9..0109cd6e1 100644
--- a/mpn/alpha/ev5/add_n.asm
+++ b/mpn/alpha/ev5/add_n.asm
@@ -1,4 +1,4 @@
-dnl Alpha EV5 __mpn_add_n -- Add two limb vectors of the same length > 0 and
+dnl Alpha EV5 __gmpn_add_n -- Add two limb vectors of the same length > 0 and
dnl store sum in a third limb vector.
dnl Copyright (C) 1995, 1999, 2000 Free Software Foundation, Inc.
diff --git a/mpn/alpha/ev5/lshift.asm b/mpn/alpha/ev5/lshift.asm
index 23b9e8a10..d18bff0a2 100644
--- a/mpn/alpha/ev5/lshift.asm
+++ b/mpn/alpha/ev5/lshift.asm
@@ -1,4 +1,4 @@
-dnl Alpha EV5 __mpn_lshift -- Shift a number left.
+dnl Alpha EV5 __gmpn_lshift -- Shift a number left.
dnl Copyright (C) 1994, 1995, 2000 Free Software Foundation, Inc.
diff --git a/mpn/alpha/ev5/rshift.asm b/mpn/alpha/ev5/rshift.asm
index c3325579f..8d0ea9980 100644
--- a/mpn/alpha/ev5/rshift.asm
+++ b/mpn/alpha/ev5/rshift.asm
@@ -1,4 +1,4 @@
-dnl Alpha EV5 __mpn_rshift -- Shift a number right.
+dnl Alpha EV5 __gmpn_rshift -- Shift a number right.
dnl Copyright (C) 1994, 1995, 2000 Free Software Foundation, Inc.
diff --git a/mpn/alpha/ev5/sub_n.asm b/mpn/alpha/ev5/sub_n.asm
index 213c2c885..89076e4c6 100644
--- a/mpn/alpha/ev5/sub_n.asm
+++ b/mpn/alpha/ev5/sub_n.asm
@@ -1,4 +1,4 @@
-dnl Alpha EV5 __mpn_sub_n -- Subtract two limb vectors of the same length > 0
+dnl Alpha EV5 __gmpn_sub_n -- Subtract two limb vectors of the same length > 0
dnl and store difference in a third limb vector.
dnl Copyright (C) 1995, 1999, 2000 Free Software Foundation, Inc.
diff --git a/mpn/alpha/mul_1.asm b/mpn/alpha/mul_1.asm
index 94cd55c9c..81b089c37 100644
--- a/mpn/alpha/mul_1.asm
+++ b/mpn/alpha/mul_1.asm
@@ -1,4 +1,4 @@
-dnl Alpha __mpn_mul_1 -- Multiply a limb vector with a limb and store
+dnl Alpha __gmpn_mul_1 -- Multiply a limb vector with a limb and store
dnl the result in a second limb vector.
dnl Copyright (C) 1992, 1994, 1995, 2000 Free Software Foundation, Inc.
diff --git a/mpn/alpha/submul_1.asm b/mpn/alpha/submul_1.asm
index 5122d9e80..59a8cf7f7 100644
--- a/mpn/alpha/submul_1.asm
+++ b/mpn/alpha/submul_1.asm
@@ -1,4 +1,4 @@
-dnl Alpha __mpn_submul_1 -- Multiply a limb vector with a limb and
+dnl Alpha __gmpn_submul_1 -- Multiply a limb vector with a limb and
dnl subtract the result from a second limb vector.
dnl Copyright (C) 1992, 1994, 1995, 2000 Free Software Foundation, Inc.
diff --git a/mpn/arm/add_n.S b/mpn/arm/add_n.S
index 01521e191..a6bcbcbb1 100644
--- a/mpn/arm/add_n.S
+++ b/mpn/arm/add_n.S
@@ -35,9 +35,9 @@
.text
.align 0
- .global __mpn_add_n
- .type __mpn_add_n,%function
-__mpn_add_n:
+ .global __gmpn_add_n
+ .type __gmpn_add_n,%function
+__gmpn_add_n:
stmfd sp!, { r8, r9, lr }
movs n, n, lsr #1
bcc Lskip1
@@ -73,4 +73,4 @@ Lreturn:
adc r0, n, #0
ldmfd sp!, { r8, r9, pc }^
Lend:
- .size __mpn_add_n, Lend - __mpn_add_n
+ .size __gmpn_add_n, Lend - __gmpn_add_n
diff --git a/mpn/arm/addmul_1.S b/mpn/arm/addmul_1.S
index c5daee1b7..e9b878eaa 100644
--- a/mpn/arm/addmul_1.S
+++ b/mpn/arm/addmul_1.S
@@ -35,9 +35,9 @@
.text
.align 0
- .global __mpn_addmul_1
- .type __mpn_addmul_1,%function
-__mpn_addmul_1:
+ .global __gmpn_addmul_1
+ .type __gmpn_addmul_1,%function
+__gmpn_addmul_1:
stmfd sp!, { r7, r8, r9, lr }
mov lr, #0
movs n, n, lsr #1
@@ -95,4 +95,4 @@ Lreturn:
mov r0, lr
ldmfd sp!, { r7, r8, r9, pc }^
Lend:
- .size __mpn_addmul_1, Lend - __mpn_addmul_1
+ .size __gmpn_addmul_1, Lend - __gmpn_addmul_1
diff --git a/mpn/arm/mul_1.S b/mpn/arm/mul_1.S
index 1a5d28175..4ef4d8393 100644
--- a/mpn/arm/mul_1.S
+++ b/mpn/arm/mul_1.S
@@ -35,9 +35,9 @@
.text
.align 0
- .global __mpn_mul_1
- .type __mpn_mul_1,%function
-__mpn_mul_1:
+ .global __gmpn_mul_1
+ .type __gmpn_mul_1,%function
+__gmpn_mul_1:
stmfd sp!, { r7, r8, r9, lr }
mov lr, #0
movs n, n, lsr #1
@@ -79,4 +79,4 @@ Lreturn:
mov r0, lr
ldmfd sp!, { r7, r8, r9, pc }^
Lend:
- .size __mpn_mul_1, Lend - __mpn_mul_1
+ .size __gmpn_mul_1, Lend - __gmpn_mul_1
diff --git a/mpn/arm/sub_n.S b/mpn/arm/sub_n.S
index a2d891131..32a0dbc31 100644
--- a/mpn/arm/sub_n.S
+++ b/mpn/arm/sub_n.S
@@ -35,9 +35,9 @@
.text
.align 0
- .global __mpn_sub_n
- .type __mpn_sub_n,%function
-__mpn_sub_n:
+ .global __gmpn_sub_n
+ .type __gmpn_sub_n,%function
+__gmpn_sub_n:
stmfd sp!, { r8, r9, lr }
subs ip, ip, ip
tst n, #1
@@ -75,4 +75,4 @@ Lreturn:
and r0, r0, #1
ldmfd sp!, { r8, r9, pc }^
Lend:
- .size __mpn_sub_n, Lend - __mpn_sub_n
+ .size __gmpn_sub_n, Lend - __gmpn_sub_n
diff --git a/mpn/clipper/add_n.s b/mpn/clipper/add_n.s
index 8d9b98673..e6a83c3d4 100644
--- a/mpn/clipper/add_n.s
+++ b/mpn/clipper/add_n.s
@@ -1,4 +1,4 @@
-; Clipper __mpn_add_n -- Add two limb vectors of the same length > 0 and store
+; Clipper __gmpn_add_n -- Add two limb vectors of the same length > 0 and store
; sum in a third limb vector.
; Copyright (C) 1995 Free Software Foundation, Inc.
@@ -22,8 +22,8 @@
.text
.align 16
-.globl ___mpn_add_n
-___mpn_add_n:
+.globl ___gmpn_add_n
+___gmpn_add_n:
subq $8,sp
storw r6,(sp)
loadw 12(sp),r2
diff --git a/mpn/clipper/mul_1.s b/mpn/clipper/mul_1.s
index 44d92c3d5..2246c555d 100644
--- a/mpn/clipper/mul_1.s
+++ b/mpn/clipper/mul_1.s
@@ -1,4 +1,4 @@
-; Clipper __mpn_mul_1 -- Multiply a limb vector with a limb and store
+; Clipper __gmpn_mul_1 -- Multiply a limb vector with a limb and store
; the result in a second limb vector.
; Copyright (C) 1995 Free Software Foundation, Inc.
@@ -22,8 +22,8 @@
.text
.align 16
-.globl ___mpn_mul_1
-___mpn_mul_1:
+.globl ___gmpn_mul_1
+___gmpn_mul_1:
subq $8,sp
storw r6,(sp)
loadw 12(sp),r2
diff --git a/mpn/clipper/sub_n.s b/mpn/clipper/sub_n.s
index 882c99104..6d5798b80 100644
--- a/mpn/clipper/sub_n.s
+++ b/mpn/clipper/sub_n.s
@@ -1,4 +1,4 @@
-; Clipper __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and
+; Clipper __gmpn_sub_n -- Subtract two limb vectors of the same length > 0 and
; store difference in a third limb vector.
; Copyright (C) 1995 Free Software Foundation, Inc.
@@ -22,8 +22,8 @@
.text
.align 16
-.globl ___mpn_sub_n
-___mpn_sub_n:
+.globl ___gmpn_sub_n
+___gmpn_sub_n:
subq $8,sp
storw r6,(sp)
loadw 12(sp),r2
diff --git a/mpn/hppa/add_n.s b/mpn/hppa/add_n.s
index b4a142836..f326504a5 100644
--- a/mpn/hppa/add_n.s
+++ b/mpn/hppa/add_n.s
@@ -1,4 +1,4 @@
-; HP-PA __mpn_add_n -- Add two limb vectors of the same length > 0 and store
+; HP-PA __gmpn_add_n -- Add two limb vectors of the same length > 0 and store
; sum in a third limb vector.
; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
@@ -32,8 +32,8 @@
; unrolling useless. We can't come under 5 cycles/limb anyway.
.code
- .export __mpn_add_n
-__mpn_add_n
+ .export __gmpn_add_n
+__gmpn_add_n
.proc
.callinfo frame=0,no_calls
.entry
diff --git a/mpn/hppa/hppa1_1/addmul_1.s b/mpn/hppa/hppa1_1/addmul_1.s
index 0fdcb3cb2..e3a1dc260 100644
--- a/mpn/hppa/hppa1_1/addmul_1.s
+++ b/mpn/hppa/hppa1_1/addmul_1.s
@@ -1,4 +1,4 @@
-; HP-PA-1.1 __mpn_addmul_1 -- Multiply a limb vector with a limb and
+; HP-PA-1.1 __gmpn_addmul_1 -- Multiply a limb vector with a limb and
; add the result to a second limb vector.
; Copyright (C) 1992, 1993, 1994 Free Software Foundation, Inc.
@@ -37,8 +37,8 @@
; There are some ideas described in mul_1.s that applies to this code too.
.code
- .export __mpn_addmul_1
-__mpn_addmul_1
+ .export __gmpn_addmul_1
+__gmpn_addmul_1
.proc
.callinfo frame=64,no_calls
.entry
diff --git a/mpn/hppa/hppa1_1/mul_1.s b/mpn/hppa/hppa1_1/mul_1.s
index cdd0c1d7f..c192df5b5 100644
--- a/mpn/hppa/hppa1_1/mul_1.s
+++ b/mpn/hppa/hppa1_1/mul_1.s
@@ -1,4 +1,4 @@
-; HP-PA-1.1 __mpn_mul_1 -- Multiply a limb vector with a limb and store
+; HP-PA-1.1 __gmpn_mul_1 -- Multiply a limb vector with a limb and store
; the result in a second limb vector.
; Copyright (C) 1992, 1993, 1994 Free Software Foundation, Inc.
@@ -45,8 +45,8 @@
; in the cache.)
.code
- .export __mpn_mul_1
-__mpn_mul_1
+ .export __gmpn_mul_1
+__gmpn_mul_1
.proc
.callinfo frame=64,no_calls
.entry
diff --git a/mpn/hppa/hppa1_1/pa7100/add_n.s b/mpn/hppa/hppa1_1/pa7100/add_n.s
index 21fe16154..844fc3b11 100644
--- a/mpn/hppa/hppa1_1/pa7100/add_n.s
+++ b/mpn/hppa/hppa1_1/pa7100/add_n.s
@@ -1,4 +1,4 @@
-; HP-PA __mpn_add_n -- Add two limb vectors of the same length > 0 and store
+; HP-PA __gmpn_add_n -- Add two limb vectors of the same length > 0 and store
; sum in a third limb vector.
; This is optimized for the PA7100, where is runs at 4.25 cycles/limb
@@ -29,8 +29,8 @@
; size gr23
.code
- .export __mpn_add_n
-__mpn_add_n
+ .export __gmpn_add_n
+__gmpn_add_n
.proc
.callinfo frame=0,no_calls
.entry
diff --git a/mpn/hppa/hppa1_1/pa7100/addmul_1.S b/mpn/hppa/hppa1_1/pa7100/addmul_1.S
index eb1d12bf6..f640ccb03 100644
--- a/mpn/hppa/hppa1_1/pa7100/addmul_1.S
+++ b/mpn/hppa/hppa1_1/pa7100/addmul_1.S
@@ -1,4 +1,4 @@
-; HP-PA 7100/7200 __mpn_addmul_1 -- Multiply a limb vector with a limb and
+; HP-PA 7100/7200 __gmpn_addmul_1 -- Multiply a limb vector with a limb and
; add the result to a second limb vector.
; Copyright (C) 1995 Free Software Foundation, Inc.
@@ -41,8 +41,8 @@
#define hi3 %r1
.code
- .export __mpn_addmul_1
-__mpn_addmul_1
+ .export __gmpn_addmul_1
+__gmpn_addmul_1
.proc
.callinfo frame=128,no_calls
.entry
diff --git a/mpn/hppa/hppa1_1/pa7100/lshift.s b/mpn/hppa/hppa1_1/pa7100/lshift.s
index 4c74a505a..0d095818a 100644
--- a/mpn/hppa/hppa1_1/pa7100/lshift.s
+++ b/mpn/hppa/hppa1_1/pa7100/lshift.s
@@ -1,4 +1,4 @@
-; HP-PA __mpn_lshift --
+; HP-PA __gmpn_lshift --
; This is optimized for the PA7100, where is runs at 3.25 cycles/limb
; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
@@ -28,8 +28,8 @@
; cnt gr23
.code
- .export __mpn_lshift
-__mpn_lshift
+ .export __gmpn_lshift
+__gmpn_lshift
.proc
.callinfo frame=64,no_calls
.entry
diff --git a/mpn/hppa/hppa1_1/pa7100/rshift.s b/mpn/hppa/hppa1_1/pa7100/rshift.s
index 845418c53..b5cf2ff82 100644
--- a/mpn/hppa/hppa1_1/pa7100/rshift.s
+++ b/mpn/hppa/hppa1_1/pa7100/rshift.s
@@ -1,4 +1,4 @@
-; HP-PA __mpn_rshift --
+; HP-PA __gmpn_rshift --
; This is optimized for the PA7100, where is runs at 3.25 cycles/limb
; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
@@ -28,8 +28,8 @@
; cnt gr23
.code
- .export __mpn_rshift
-__mpn_rshift
+ .export __gmpn_rshift
+__gmpn_rshift
.proc
.callinfo frame=64,no_calls
.entry
diff --git a/mpn/hppa/hppa1_1/pa7100/sub_n.s b/mpn/hppa/hppa1_1/pa7100/sub_n.s
index 1e1ebcf91..b7862d3a1 100644
--- a/mpn/hppa/hppa1_1/pa7100/sub_n.s
+++ b/mpn/hppa/hppa1_1/pa7100/sub_n.s
@@ -1,4 +1,4 @@
-; HP-PA __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and
+; HP-PA __gmpn_sub_n -- Subtract two limb vectors of the same length > 0 and
; store difference in a third limb vector.
; This is optimized for the PA7100, where is runs at 4.25 cycles/limb
@@ -29,8 +29,8 @@
; size gr23
.code
- .export __mpn_sub_n
-__mpn_sub_n
+ .export __gmpn_sub_n
+__gmpn_sub_n
.proc
.callinfo frame=0,no_calls
.entry
diff --git a/mpn/hppa/hppa1_1/pa7100/submul_1.S b/mpn/hppa/hppa1_1/pa7100/submul_1.S
index a71176e68..c5a06ea91 100644
--- a/mpn/hppa/hppa1_1/pa7100/submul_1.S
+++ b/mpn/hppa/hppa1_1/pa7100/submul_1.S
@@ -1,4 +1,4 @@
-; HP-PA 7100/7200 __mpn_submul_1 -- Multiply a limb vector with a limb and
+; HP-PA 7100/7200 __gmpn_submul_1 -- Multiply a limb vector with a limb and
; subtract the result from a second limb vector.
; Copyright (C) 1995 Free Software Foundation, Inc.
@@ -41,8 +41,8 @@
#define hi3 %r1
.code
- .export __mpn_submul_1
-__mpn_submul_1
+ .export __gmpn_submul_1
+__gmpn_submul_1
.proc
.callinfo frame=128,no_calls
.entry
diff --git a/mpn/hppa/hppa1_1/submul_1.s b/mpn/hppa/hppa1_1/submul_1.s
index a4a385467..c19a9bc08 100644
--- a/mpn/hppa/hppa1_1/submul_1.s
+++ b/mpn/hppa/hppa1_1/submul_1.s
@@ -1,4 +1,4 @@
-; HP-PA-1.1 __mpn_submul_1 -- Multiply a limb vector with a limb and
+; HP-PA-1.1 __gmpn_submul_1 -- Multiply a limb vector with a limb and
; subtract the result from a second limb vector.
; Copyright (C) 1992, 1993, 1994 Free Software Foundation, Inc.
@@ -36,14 +36,14 @@
; There are some ideas described in mul_1.s that applies to this code too.
-; It seems possible to make this run as fast as __mpn_addmul_1, if we use
+; It seems possible to make this run as fast as __gmpn_addmul_1, if we use
; sub,>>= %r29,%r19,%r22
; addi 1,%r28,%r28
; but that requires reworking the hairy software pipeline...
.code
- .export __mpn_submul_1
-__mpn_submul_1
+ .export __gmpn_submul_1
+__gmpn_submul_1
.proc
.callinfo frame=64,no_calls
.entry
diff --git a/mpn/hppa/hppa2_0/add_n.s b/mpn/hppa/hppa2_0/add_n.s
index 373128340..655a4d816 100644
--- a/mpn/hppa/hppa2_0/add_n.s
+++ b/mpn/hppa/hppa2_0/add_n.s
@@ -1,4 +1,4 @@
-; HP-PA 2.0 32-bit __mpn_add_n -- Add two limb vectors of the same length > 0
+; HP-PA 2.0 32-bit __gmpn_add_n -- Add two limb vectors of the same length > 0
; and store sum in a third limb vector.
; Copyright (C) 1997, 1998 Free Software Foundation, Inc.
@@ -30,8 +30,8 @@
; This runs at 2 cycles/limb on PA8000.
.code
- .export __mpn_add_n
-__mpn_add_n
+ .export __gmpn_add_n
+__gmpn_add_n
.proc
.callinfo frame=0,no_calls
.entry
diff --git a/mpn/hppa/hppa2_0/sub_n.s b/mpn/hppa/hppa2_0/sub_n.s
index ce81037fb..d61af0dd3 100644
--- a/mpn/hppa/hppa2_0/sub_n.s
+++ b/mpn/hppa/hppa2_0/sub_n.s
@@ -1,4 +1,4 @@
-; HP-PA 2.0 32-bit __mpn_sub_n -- Subtract two limb vectors of the same
+; HP-PA 2.0 32-bit __gmpn_sub_n -- Subtract two limb vectors of the same
; length > 0 and store difference in a third limb vector.
; Copyright (C) 1997, 1998 Free Software Foundation, Inc.
@@ -30,8 +30,8 @@
; This runs at 2 cycles/limb on PA8000.
.code
- .export __mpn_sub_n
-__mpn_sub_n
+ .export __gmpn_sub_n
+__gmpn_sub_n
.proc
.callinfo frame=0,no_calls
.entry
diff --git a/mpn/hppa/lshift.s b/mpn/hppa/lshift.s
index abac6ec20..20cfb8fae 100644
--- a/mpn/hppa/lshift.s
+++ b/mpn/hppa/lshift.s
@@ -1,4 +1,4 @@
-; HP-PA __mpn_lshift --
+; HP-PA __gmpn_lshift --
; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
@@ -27,8 +27,8 @@
; cnt gr23
.code
- .export __mpn_lshift
-__mpn_lshift
+ .export __gmpn_lshift
+__gmpn_lshift
.proc
.callinfo frame=64,no_calls
.entry
diff --git a/mpn/hppa/rshift.s b/mpn/hppa/rshift.s
index c1480e5ab..fabe4d415 100644
--- a/mpn/hppa/rshift.s
+++ b/mpn/hppa/rshift.s
@@ -1,4 +1,4 @@
-; HP-PA __mpn_rshift --
+; HP-PA __gmpn_rshift --
; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
@@ -27,8 +27,8 @@
; cnt gr23
.code
- .export __mpn_rshift
-__mpn_rshift
+ .export __gmpn_rshift
+__gmpn_rshift
.proc
.callinfo frame=64,no_calls
.entry
diff --git a/mpn/hppa/sub_n.s b/mpn/hppa/sub_n.s
index 04fa3e1e3..486df5b6d 100644
--- a/mpn/hppa/sub_n.s
+++ b/mpn/hppa/sub_n.s
@@ -1,4 +1,4 @@
-; HP-PA __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and
+; HP-PA __gmpn_sub_n -- Subtract two limb vectors of the same length > 0 and
; store difference in a third limb vector.
; Copyright (C) 1992, 1994 Free Software Foundation, Inc.
@@ -32,8 +32,8 @@
; unrolling useless. We can't come under 5 cycles/limb anyway.
.code
- .export __mpn_sub_n
-__mpn_sub_n
+ .export __gmpn_sub_n
+__gmpn_sub_n
.proc
.callinfo frame=0,no_calls
.entry
diff --git a/mpn/i960/add_n.s b/mpn/i960/add_n.s
index 6e674822c..2f23fc504 100644
--- a/mpn/i960/add_n.s
+++ b/mpn/i960/add_n.s
@@ -1,4 +1,4 @@
-# I960 __mpn_add_n -- Add two limb vectors of the same length > 0 and store
+# I960 __gmpn_add_n -- Add two limb vectors of the same length > 0 and store
# sum in a third limb vector.
# Copyright (C) 1995 Free Software Foundation, Inc.
@@ -22,8 +22,8 @@
.text
.align 4
- .globl ___mpn_add_n
-___mpn_add_n:
+ .globl ___gmpn_add_n
+___gmpn_add_n:
mov 0,g6 # clear carry-save register
cmpo 1,0 # clear cy
diff --git a/mpn/i960/addmul_1.s b/mpn/i960/addmul_1.s
index db53f64e2..b3962c1ca 100644
--- a/mpn/i960/addmul_1.s
+++ b/mpn/i960/addmul_1.s
@@ -1,4 +1,4 @@
-# I960 __mpn_addmul_1 -- Multiply a limb vector with a limb and add
+# I960 __gmpn_addmul_1 -- Multiply a limb vector with a limb and add
# the result to a second limb vector.
# Copyright (C) 1995 Free Software Foundation, Inc.
@@ -22,8 +22,8 @@
.text
.align 4
- .globl ___mpn_mul_1
-___mpn_mul_1:
+ .globl ___gmpn_mul_1
+___gmpn_mul_1:
subo g2,0,g2
shlo 2,g2,g4
subo g4,g1,g1
diff --git a/mpn/i960/mul_1.s b/mpn/i960/mul_1.s
index 4ccaeabc4..543278c0b 100644
--- a/mpn/i960/mul_1.s
+++ b/mpn/i960/mul_1.s
@@ -1,4 +1,4 @@
-# I960 __mpn_mul_1 -- Multiply a limb vector with a limb and store
+# I960 __gmpn_mul_1 -- Multiply a limb vector with a limb and store
# the result in a second limb vector.
# Copyright (C) 1995 Free Software Foundation, Inc.
@@ -22,8 +22,8 @@
.text
.align 4
- .globl ___mpn_mul_1
-___mpn_mul_1:
+ .globl ___gmpn_mul_1
+___gmpn_mul_1:
subo g2,0,g2
shlo 2,g2,g4
subo g4,g1,g1
diff --git a/mpn/i960/sub_n.s b/mpn/i960/sub_n.s
index 01b94a172..278a4aa3d 100644
--- a/mpn/i960/sub_n.s
+++ b/mpn/i960/sub_n.s
@@ -1,4 +1,4 @@
-# I960 __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and
+# I960 __gmpn_sub_n -- Subtract two limb vectors of the same length > 0 and
# store difference in a third limb vector.
# Copyright (C) 1995 Free Software Foundation, Inc.
@@ -22,8 +22,8 @@
.text
.align 4
- .globl ___mpn_sub_n
-___mpn_sub_n:
+ .globl ___gmpn_sub_n
+___gmpn_sub_n:
mov 1,g6 # set carry-save register
cmpo 1,0 # clear cy
diff --git a/mpn/m68k/add_n.S b/mpn/m68k/add_n.S
index f14167092..a7318d298 100644
--- a/mpn/m68k/add_n.S
+++ b/mpn/m68k/add_n.S
@@ -1,4 +1,4 @@
-/* mc68020 __mpn_add_n -- Add two limb vectors of the same length > 0 and store
+/* mc68020 __gmpn_add_n -- Add two limb vectors of the same length > 0 and store
sum in a third limb vector.
Copyright (C) 1992, 1994, 1996, 1999 Free Software Foundation, Inc.
@@ -32,10 +32,10 @@ MA 02111-1307, USA. */
TEXT
ALIGN
- GLOBL C_SYMBOL_NAME(__mpn_add_n)
+ GLOBL C_SYMBOL_NAME(__gmpn_add_n)
-C_SYMBOL_NAME(__mpn_add_n:)
-PROLOG(__mpn_add_n)
+C_SYMBOL_NAME(__gmpn_add_n:)
+PROLOG(__gmpn_add_n)
/* Save used registers on the stack. */
movel R(d2),MEM_PREDEC(sp)
movel R(a2),MEM_PREDEC(sp)
@@ -76,4 +76,4 @@ L(L2:)
movel MEM_POSTINC(sp),R(d2)
rts
-EPILOG(__mpn_add_n)
+EPILOG(__gmpn_add_n)
diff --git a/mpn/m68k/lshift.S b/mpn/m68k/lshift.S
index fcd2fb3fd..5c02d0d0a 100644
--- a/mpn/m68k/lshift.S
+++ b/mpn/m68k/lshift.S
@@ -1,4 +1,4 @@
-/* mc68020 __mpn_lshift -- Shift left a low-level natural-number integer.
+/* mc68020 __gmpn_lshift -- Shift left a low-level natural-number integer.
Copyright (C) 1996, 1999 Free Software Foundation, Inc.
@@ -36,10 +36,10 @@ MA 02111-1307, USA. */
TEXT
ALIGN
- GLOBL C_SYMBOL_NAME(__mpn_lshift)
+ GLOBL C_SYMBOL_NAME(__gmpn_lshift)
-C_SYMBOL_NAME(__mpn_lshift:)
-PROLOG(__mpn_lshift)
+C_SYMBOL_NAME(__gmpn_lshift:)
+PROLOG(__gmpn_lshift)
/* Save used registers on the stack. */
moveml R(d2)-R(d6)/R(a2),MEM_PREDEC(sp)
@@ -147,4 +147,4 @@ L(LLend:)
/* Restore used registers from stack frame. */
moveml MEM_POSTINC(sp),R(d2)-R(d6)/R(a2)
rts
-EPILOG(__mpn_lshift)
+EPILOG(__gmpn_lshift)
diff --git a/mpn/m68k/mc68020/addmul_1.S b/mpn/m68k/mc68020/addmul_1.S
index c59ab5614..408d354a3 100644
--- a/mpn/m68k/mc68020/addmul_1.S
+++ b/mpn/m68k/mc68020/addmul_1.S
@@ -1,4 +1,4 @@
-/* mc68020 __mpn_addmul_1 -- Multiply a limb vector with a limb and add
+/* mc68020 __gmpn_addmul_1 -- Multiply a limb vector with a limb and add
the result to a second limb vector.
Copyright (C) 1992, 1994, 1996, 1999 Free Software Foundation, Inc.
@@ -32,10 +32,10 @@ MA 02111-1307, USA. */
TEXT
ALIGN
- GLOBL C_SYMBOL_NAME(__mpn_addmul_1)
+ GLOBL C_SYMBOL_NAME(__gmpn_addmul_1)
-C_SYMBOL_NAME(__mpn_addmul_1:)
-PROLOG(__mpn_addmul_1)
+C_SYMBOL_NAME(__gmpn_addmul_1:)
+PROLOG(__gmpn_addmul_1)
#define res_ptr a0
#define s1_ptr a1
@@ -80,4 +80,4 @@ L(L1:) movel MEM_POSTINC(s1_ptr),R(d3)
moveml MEM_POSTINC(sp),R(d2)-R(d5)
rts
-EPILOG(__mpn_addmul_1)
+EPILOG(__gmpn_addmul_1)
diff --git a/mpn/m68k/mc68020/mul_1.S b/mpn/m68k/mc68020/mul_1.S
index cf05bb407..dce2a47bf 100644
--- a/mpn/m68k/mc68020/mul_1.S
+++ b/mpn/m68k/mc68020/mul_1.S
@@ -1,4 +1,4 @@
-/* mc68020 __mpn_mul_1 -- Multiply a limb vector with a limb and store
+/* mc68020 __gmpn_mul_1 -- Multiply a limb vector with a limb and store
the result in a second limb vector.
Copyright (C) 1992, 1994, 1996, 1999 Free Software Foundation, Inc.
@@ -32,10 +32,10 @@ MA 02111-1307, USA. */
TEXT
ALIGN
- GLOBL C_SYMBOL_NAME(__mpn_mul_1)
+ GLOBL C_SYMBOL_NAME(__gmpn_mul_1)
-C_SYMBOL_NAME(__mpn_mul_1:)
-PROLOG(__mpn_mul_1)
+C_SYMBOL_NAME(__gmpn_mul_1:)
+PROLOG(__gmpn_mul_1)
#define res_ptr a0
#define s1_ptr a1
@@ -87,4 +87,4 @@ L(L1:) movel MEM_POSTINC(s1_ptr),R(d3)
movel MEM_POSTINC(sp),R(d2)
#endif
rts
-EPILOG(__mpn_mul_1)
+EPILOG(__gmpn_mul_1)
diff --git a/mpn/m68k/mc68020/submul_1.S b/mpn/m68k/mc68020/submul_1.S
index d2d1b1462..2f1ddc28b 100644
--- a/mpn/m68k/mc68020/submul_1.S
+++ b/mpn/m68k/mc68020/submul_1.S
@@ -1,4 +1,4 @@
-/* mc68020 __mpn_submul_1 -- Multiply a limb vector with a limb and subtract
+/* mc68020 __gmpn_submul_1 -- Multiply a limb vector with a limb and subtract
the result from a second limb vector.
Copyright (C) 1992, 1994, 1996, 1999 Free Software Foundation, Inc.
@@ -32,10 +32,10 @@ MA 02111-1307, USA. */
TEXT
ALIGN
- GLOBL C_SYMBOL_NAME(__mpn_submul_1)
+ GLOBL C_SYMBOL_NAME(__gmpn_submul_1)
-C_SYMBOL_NAME(__mpn_submul_1:)
-PROLOG(__mpn_submul_1)
+C_SYMBOL_NAME(__gmpn_submul_1:)
+PROLOG(__gmpn_submul_1)
#define res_ptr a0
#define s1_ptr a1
@@ -80,4 +80,4 @@ L(L1:) movel MEM_POSTINC(s1_ptr),R(d3)
moveml MEM_POSTINC(sp),R(d2)-R(d5)
rts
-EPILOG(__mpn_submul_1)
+EPILOG(__gmpn_submul_1)
diff --git a/mpn/m68k/rshift.S b/mpn/m68k/rshift.S
index 23ceb7c0b..188ab4c01 100644
--- a/mpn/m68k/rshift.S
+++ b/mpn/m68k/rshift.S
@@ -1,4 +1,4 @@
-/* mc68020 __mpn_rshift -- Shift right a low-level natural-number integer.
+/* mc68020 __gmpn_rshift -- Shift right a low-level natural-number integer.
Copyright (C) 1996, 1999 Free Software Foundation, Inc.
@@ -36,10 +36,10 @@ MA 02111-1307, USA. */
TEXT
ALIGN
- GLOBL C_SYMBOL_NAME(__mpn_rshift)
+ GLOBL C_SYMBOL_NAME(__gmpn_rshift)
-C_SYMBOL_NAME(__mpn_rshift:)
-PROLOG(__mpn_rshift)
+C_SYMBOL_NAME(__gmpn_rshift:)
+PROLOG(__gmpn_rshift)
/* Save used registers on the stack. */
moveml R(d2)-R(d6)/R(a2),MEM_PREDEC(sp)
@@ -146,4 +146,4 @@ L(LLend:)
/* Restore used registers from stack frame. */
moveml MEM_POSTINC(sp),R(d2)-R(d6)/R(a2)
rts
-EPILOG(__mpn_rshift)
+EPILOG(__gmpn_rshift)
diff --git a/mpn/m68k/sub_n.S b/mpn/m68k/sub_n.S
index 468179073..eea9b8555 100644
--- a/mpn/m68k/sub_n.S
+++ b/mpn/m68k/sub_n.S
@@ -1,4 +1,4 @@
-/* mc68020 __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and
+/* mc68020 __gmpn_sub_n -- Subtract two limb vectors of the same length > 0 and
store difference in a third limb vector.
Copyright (C) 1992, 1994, 1996, 1999 Free Software Foundation, Inc.
@@ -32,10 +32,10 @@ MA 02111-1307, USA. */
TEXT
ALIGN
- GLOBL C_SYMBOL_NAME(__mpn_sub_n)
+ GLOBL C_SYMBOL_NAME(__gmpn_sub_n)
-C_SYMBOL_NAME(__mpn_sub_n:)
-PROLOG(__mpn_sub_n)
+C_SYMBOL_NAME(__gmpn_sub_n:)
+PROLOG(__gmpn_sub_n)
/* Save used registers on the stack. */
movel R(d2),MEM_PREDEC(sp)
movel R(a2),MEM_PREDEC(sp)
@@ -76,4 +76,4 @@ L(L2:)
movel MEM_POSTINC(sp),R(d2)
rts
-EPILOG(__mpn_sub_n)
+EPILOG(__gmpn_sub_n)
diff --git a/mpn/m88k/add_n.s b/mpn/m88k/add_n.s
index 1b09ccef8..19dafa75c 100644
--- a/mpn/m88k/add_n.s
+++ b/mpn/m88k/add_n.s
@@ -1,4 +1,4 @@
-; mc88100 __mpn_add -- Add two limb vectors of the same length > 0 and store
+; mc88100 __gmpn_add -- Add two limb vectors of the same length > 0 and store
; sum in a third limb vector.
; Copyright (C) 1992, 1994, 1995 Free Software Foundation, Inc.
@@ -36,8 +36,8 @@
text
align 16
- global ___mpn_add_n
-___mpn_add_n:
+ global ___gmpn_add_n
+___gmpn_add_n:
ld r6,r3,0 ; read first limb from s1_ptr
extu r10,r5,3
ld r7,r4,0 ; read first limb from s2_ptr
diff --git a/mpn/m88k/mc88110/add_n.S b/mpn/m88k/mc88110/add_n.S
index 39a44e557..649273f6b 100644
--- a/mpn/m88k/mc88110/add_n.S
+++ b/mpn/m88k/mc88110/add_n.S
@@ -1,4 +1,4 @@
-; mc88110 __mpn_add_n -- Add two limb vectors of the same length > 0 and store
+; mc88110 __gmpn_add_n -- Add two limb vectors of the same length > 0 and store
; sum in a third limb vector.
; Copyright (C) 1995, 1996 Free Software Foundation, Inc.
@@ -31,8 +31,8 @@
text
align 16
- global C_SYMBOL_NAME(__mpn_add_n)
-C_SYMBOL_NAME(__mpn_add_n):
+ global C_SYMBOL_NAME(__gmpn_add_n)
+C_SYMBOL_NAME(__gmpn_add_n):
addu.co r0,r0,r0 ; clear cy flag
xor r12,s2_ptr,res_ptr
bb1 2,r12,L1
diff --git a/mpn/m88k/mc88110/addmul_1.s b/mpn/m88k/mc88110/addmul_1.s
index 2bd6f21af..d8dfa17c2 100644
--- a/mpn/m88k/mc88110/addmul_1.s
+++ b/mpn/m88k/mc88110/addmul_1.s
@@ -1,4 +1,4 @@
-; mc88110 __mpn_addmul_1 -- Multiply a limb vector with a single limb and
+; mc88110 __gmpn_addmul_1 -- Multiply a limb vector with a single limb and
; store the product in a second limb vector.
; Copyright (C) 1996 Free Software Foundation, Inc.
@@ -29,8 +29,8 @@
text
align 16
- global ___mpn_addmul_1
-___mpn_addmul_1:
+ global ___gmpn_addmul_1
+___gmpn_addmul_1:
lda r3,r3[r4]
lda r8,r2[r4] ; RES_PTR in r8 since r2 is retval
subu r4,r0,r4
diff --git a/mpn/m88k/mc88110/mul_1.s b/mpn/m88k/mc88110/mul_1.s
index 151890060..bfac61cde 100644
--- a/mpn/m88k/mc88110/mul_1.s
+++ b/mpn/m88k/mc88110/mul_1.s
@@ -1,4 +1,4 @@
-; mc88110 __mpn_mul_1 -- Multiply a limb vector with a single limb and
+; mc88110 __gmpn_mul_1 -- Multiply a limb vector with a single limb and
; store the product in a second limb vector.
; Copyright (C) 1992, 1994, 1995 Free Software Foundation, Inc.
@@ -29,8 +29,8 @@
text
align 16
- global ___mpn_mul_1
-___mpn_mul_1:
+ global ___gmpn_mul_1
+___gmpn_mul_1:
; Make S1_PTR and RES_PTR point at the end of their blocks
; and negate SIZE.
lda r3,r3[r4]
diff --git a/mpn/m88k/mc88110/sub_n.S b/mpn/m88k/mc88110/sub_n.S
index 685f024fd..1bff7f76e 100644
--- a/mpn/m88k/mc88110/sub_n.S
+++ b/mpn/m88k/mc88110/sub_n.S
@@ -1,4 +1,4 @@
-; mc88110 __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and
+; mc88110 __gmpn_sub_n -- Subtract two limb vectors of the same length > 0 and
; store difference in a third limb vector.
; Copyright (C) 1995, 1996 Free Software Foundation, Inc.
@@ -31,8 +31,8 @@
text
align 16
- global C_SYMBOL_NAME(__mpn_sub_n)
-C_SYMBOL_NAME(__mpn_sub_n):
+ global C_SYMBOL_NAME(__gmpn_sub_n)
+C_SYMBOL_NAME(__gmpn_sub_n):
subu.co r0,r0,r0 ; set cy flag
xor r12,s2_ptr,res_ptr
bb1 2,r12,L1
diff --git a/mpn/m88k/mul_1.s b/mpn/m88k/mul_1.s
index 26626bf95..bd1bff2b8 100644
--- a/mpn/m88k/mul_1.s
+++ b/mpn/m88k/mul_1.s
@@ -1,4 +1,4 @@
-; mc88100 __mpn_mul_1 -- Multiply a limb vector with a single limb and
+; mc88100 __gmpn_mul_1 -- Multiply a limb vector with a single limb and
; store the product in a second limb vector.
; Copyright (C) 1992, 1994, 1995 Free Software Foundation, Inc.
@@ -50,8 +50,8 @@
text
align 16
- global ___mpn_mul_1
-___mpn_mul_1:
+ global ___gmpn_mul_1
+___gmpn_mul_1:
; Make S1_PTR and RES_PTR point at the end of their blocks
; and negate SIZE.
diff --git a/mpn/m88k/sub_n.s b/mpn/m88k/sub_n.s
index 7dfffc980..d6ffa0941 100644
--- a/mpn/m88k/sub_n.s
+++ b/mpn/m88k/sub_n.s
@@ -1,4 +1,4 @@
-; mc88100 __mpn_sub -- Subtract two limb vectors of the same length > 0 and
+; mc88100 __gmpn_sub -- Subtract two limb vectors of the same length > 0 and
; store difference in a third limb vector.
; Copyright (C) 1992, 1994, 1996 Free Software Foundation, Inc.
@@ -36,8 +36,8 @@
text
align 16
- global ___mpn_sub_n
-___mpn_sub_n:
+ global ___gmpn_sub_n
+___gmpn_sub_n:
ld r6,r3,0 ; read first limb from s1_ptr
extu r10,r5,3
ld r7,r4,0 ; read first limb from s2_ptr
diff --git a/mpn/sh/add_n.s b/mpn/sh/add_n.s
index c7c03ff7f..ed908433b 100644
--- a/mpn/sh/add_n.s
+++ b/mpn/sh/add_n.s
@@ -1,4 +1,4 @@
-! SH __mpn_add_n -- Add two limb vectors of the same length > 0 and store
+! SH __gmpn_add_n -- Add two limb vectors of the same length > 0 and store
! sum in a third limb vector.
! Copyright (C) 1995, 1997 Free Software Foundation, Inc.
@@ -29,8 +29,8 @@
.text
.align 2
- .global ___mpn_add_n
-___mpn_add_n:
+ .global ___gmpn_add_n
+___gmpn_add_n:
mov #0,r3 ! clear cy save reg
Loop: mov.l @r5+,r1
diff --git a/mpn/sh/sh2/addmul_1.s b/mpn/sh/sh2/addmul_1.s
index 19d81da3d..c91eaa1ba 100644
--- a/mpn/sh/sh2/addmul_1.s
+++ b/mpn/sh/sh2/addmul_1.s
@@ -1,4 +1,4 @@
-! SH2 __mpn_addmul_1 -- Multiply a limb vector with a limb and add
+! SH2 __gmpn_addmul_1 -- Multiply a limb vector with a limb and add
! the result to a second limb vector.
! Copyright (C) 1995 Free Software Foundation, Inc.
@@ -29,8 +29,8 @@
.text
.align 1
- .global ___mpn_addmul_1
-___mpn_addmul_1:
+ .global ___gmpn_addmul_1
+___gmpn_addmul_1:
mov #0,r2 ! cy_limb = 0
mov #0,r0 ! Keep r0 = 0 for entire loop
clrt
diff --git a/mpn/sh/sh2/mul_1.s b/mpn/sh/sh2/mul_1.s
index 7ca275671..15dd4a317 100644
--- a/mpn/sh/sh2/mul_1.s
+++ b/mpn/sh/sh2/mul_1.s
@@ -1,4 +1,4 @@
-! SH2 __mpn_mul_1 -- Multiply a limb vector with a limb and store
+! SH2 __gmpn_mul_1 -- Multiply a limb vector with a limb and store
! the result in a second limb vector.
! Copyright (C) 1995 Free Software Foundation, Inc.
@@ -29,8 +29,8 @@
.text
.align 1
- .global ___mpn_mul_1
-___mpn_mul_1:
+ .global ___gmpn_mul_1
+___gmpn_mul_1:
mov #0,r2 ! cy_limb = 0
mov #0,r0 ! Keep r0 = 0 for entire loop
clrt
diff --git a/mpn/sh/sh2/submul_1.s b/mpn/sh/sh2/submul_1.s
index 9ef380ced..53ff7ed81 100644
--- a/mpn/sh/sh2/submul_1.s
+++ b/mpn/sh/sh2/submul_1.s
@@ -1,4 +1,4 @@
-! SH2 __mpn_submul_1 -- Multiply a limb vector with a limb and subtract
+! SH2 __gmpn_submul_1 -- Multiply a limb vector with a limb and subtract
! the result from a second limb vector.
! Copyright (C) 1995 Free Software Foundation, Inc.
@@ -29,8 +29,8 @@
.text
.align 1
- .global ___mpn_submul_1
-___mpn_submul_1:
+ .global ___gmpn_submul_1
+___gmpn_submul_1:
mov #0,r2 ! cy_limb = 0
mov #0,r0 ! Keep r0 = 0 for entire loop
clrt
diff --git a/mpn/sh/sub_n.s b/mpn/sh/sub_n.s
index f52e30aee..f3c0ca921 100644
--- a/mpn/sh/sub_n.s
+++ b/mpn/sh/sub_n.s
@@ -1,4 +1,4 @@
-! SH __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and store
+! SH __gmpn_sub_n -- Subtract two limb vectors of the same length > 0 and store
! difference in a third limb vector.
! Copyright (C) 1995, 1997 Free Software Foundation, Inc.
@@ -29,8 +29,8 @@
.text
.align 2
- .global ___mpn_sub_n
-___mpn_sub_n:
+ .global ___gmpn_sub_n
+___gmpn_sub_n:
mov #0,r3 ! clear cy save reg
Loop: mov.l @r5+,r1
diff --git a/mpn/sparc64/add_n.asm b/mpn/sparc64/add_n.asm
index ea9d8a9d8..ee625c36f 100644
--- a/mpn/sparc64/add_n.asm
+++ b/mpn/sparc64/add_n.asm
@@ -1,4 +1,4 @@
-! SPARC v9 __mpn_add_n -- Add two limb vectors of the same length > 0 and store
+! SPARC v9 __gmpn_add_n -- Add two limb vectors of the same length > 0 and store
! sum in a third limb vector.
! Copyright (C) 1999, 2000 Free Software Foundation, Inc.
diff --git a/mpn/sparc64/copyi.asm b/mpn/sparc64/copyi.asm
index da66401ef..81d96ac07 100644
--- a/mpn/sparc64/copyi.asm
+++ b/mpn/sparc64/copyi.asm
@@ -1,4 +1,4 @@
-! SPARC v9 __mpn_copy -- Copy a limb vector.
+! SPARC v9 __gmpn_copy -- Copy a limb vector.
! Copyright (C) 1999, 2000 Free Software Foundation, Inc.
diff --git a/mpn/sparc64/lshift.asm b/mpn/sparc64/lshift.asm
index 344b11e3d..5510c1b28 100644
--- a/mpn/sparc64/lshift.asm
+++ b/mpn/sparc64/lshift.asm
@@ -1,4 +1,4 @@
-! SPARC v9 __mpn_lshift --
+! SPARC v9 __gmpn_lshift --
! Copyright (C) 1996, 2000 Free Software Foundation, Inc.
diff --git a/mpn/sparc64/rshift.asm b/mpn/sparc64/rshift.asm
index 58d85fbbc..2dc087f71 100644
--- a/mpn/sparc64/rshift.asm
+++ b/mpn/sparc64/rshift.asm
@@ -1,4 +1,4 @@
-! SPARC v9 __mpn_rshift --
+! SPARC v9 __gmpn_rshift --
! Copyright (C) 1996, 2000 Free Software Foundation, Inc.
diff --git a/mpn/sparc64/sub_n.asm b/mpn/sparc64/sub_n.asm
index 0177042e9..3eaf5c12d 100644
--- a/mpn/sparc64/sub_n.asm
+++ b/mpn/sparc64/sub_n.asm
@@ -1,4 +1,4 @@
-! SPARC v9 __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and
+! SPARC v9 __gmpn_sub_n -- Subtract two limb vectors of the same length > 0 and
! store difference in a third limb vector.
! Copyright (C) 1999, 2000 Free Software Foundation, Inc.
diff --git a/mpn/x86/add_n.S b/mpn/x86/add_n.S
index 862550598..7c547e438 100644
--- a/mpn/x86/add_n.S
+++ b/mpn/x86/add_n.S
@@ -1,4 +1,4 @@
-/* i80386 __mpn_add_n -- Add two limb vectors of the same length > 0 and store
+/* i80386 __gmpn_add_n -- Add two limb vectors of the same length > 0 and store
sum in a third limb vector.
Copyright (C) 1992, 1994, 1995, 1996, 1999, 2000 Free Software Foundation,
@@ -33,9 +33,9 @@ MA 02111-1307, USA. */
.text
ALIGN (3)
- .globl C_SYMBOL_NAME(__mpn_add_nc)
-C_SYMBOL_NAME(__mpn_add_nc:)
- PROLOGUE(C_SYMBOL_NAME(__mpn_add_nc))
+ .globl C_SYMBOL_NAME(__gmpn_add_nc)
+C_SYMBOL_NAME(__gmpn_add_nc:)
+ PROLOGUE(C_SYMBOL_NAME(__gmpn_add_nc))
pushl %edi
pushl %esi
@@ -77,9 +77,9 @@ L0a: leal (%eax,%eax,8),%eax
jmp *%eax /* jump into loop */
ALIGN (3)
- .globl C_SYMBOL_NAME(__mpn_add_n)
-C_SYMBOL_NAME(__mpn_add_n:)
- PROLOGUE(C_SYMBOL_NAME(__mpn_add_n))
+ .globl C_SYMBOL_NAME(__gmpn_add_n)
+C_SYMBOL_NAME(__gmpn_add_n:)
+ PROLOGUE(C_SYMBOL_NAME(__gmpn_add_n))
pushl %edi
pushl %esi
@@ -155,5 +155,5 @@ Loop: movl (%esi),%eax
popl %esi
popl %edi
ret
- EPILOGUE(C_SYMBOL_NAME(__mpn_add_nc))
- EPILOGUE(C_SYMBOL_NAME(__mpn_add_n))
+ EPILOGUE(C_SYMBOL_NAME(__gmpn_add_nc))
+ EPILOGUE(C_SYMBOL_NAME(__gmpn_add_n))
diff --git a/mpn/x86/addmul_1.S b/mpn/x86/addmul_1.S
index 5b808d3ee..0686066f9 100644
--- a/mpn/x86/addmul_1.S
+++ b/mpn/x86/addmul_1.S
@@ -1,4 +1,4 @@
-/* x86 __mpn_addmul_1 (for 386 and 486) -- Multiply a limb vector
+/* x86 __gmpn_addmul_1 (for 386 and 486) -- Multiply a limb vector
with a limb and add the result to a second limb vector.
Copyright (C) 1992, 1994, 1997, 1999, 2000 Free Software Foundation, Inc.
@@ -37,9 +37,9 @@ MA 02111-1307, USA. */
TEXT
ALIGN (3)
-PROLOGUE(C_SYMBOL_NAME(__mpn_addmul_1))
- GLOBL C_SYMBOL_NAME(__mpn_addmul_1)
-C_SYMBOL_NAME(__mpn_addmul_1:)
+PROLOGUE(C_SYMBOL_NAME(__gmpn_addmul_1))
+ GLOBL C_SYMBOL_NAME(__gmpn_addmul_1)
+C_SYMBOL_NAME(__gmpn_addmul_1:)
INSN1(push,l ,R(edi))
INSN1(push,l ,R(esi))
@@ -115,4 +115,4 @@ Lend: movl %ebx,%eax
INSN1(pop,l ,R(edi))
ret
-EPILOGUE(C_SYMBOL_NAME(__mpn_addmul_1))
+EPILOGUE(C_SYMBOL_NAME(__gmpn_addmul_1))
diff --git a/mpn/x86/lshift.S b/mpn/x86/lshift.S
index 329c60105..59fb39fe2 100644
--- a/mpn/x86/lshift.S
+++ b/mpn/x86/lshift.S
@@ -1,4 +1,4 @@
-/* i80386 __mpn_lshift --
+/* i80386 __gmpn_lshift --
Copyright (C) 1992, 1994, 1996, 1999, 2000 Free Software Foundation, Inc.
@@ -31,9 +31,9 @@ MA 02111-1307, USA. */
.text
ALIGN (3)
-PROLOGUE(C_SYMBOL_NAME(__mpn_lshift))
- .globl C_SYMBOL_NAME(__mpn_lshift)
-C_SYMBOL_NAME(__mpn_lshift:)
+PROLOGUE(C_SYMBOL_NAME(__gmpn_lshift))
+ .globl C_SYMBOL_NAME(__gmpn_lshift)
+C_SYMBOL_NAME(__gmpn_lshift:)
pushl %edi
pushl %esi
pushl %ebx
@@ -84,4 +84,4 @@ Lend: shll %cl,%ebx /* compute least significant limb */
popl %edi
ret
-EPILOGUE(C_SYMBOL_NAME(__mpn_lshift))
+EPILOGUE(C_SYMBOL_NAME(__gmpn_lshift))
diff --git a/mpn/x86/mul_1.S b/mpn/x86/mul_1.S
index 2eb541a3d..af637b1d8 100644
--- a/mpn/x86/mul_1.S
+++ b/mpn/x86/mul_1.S
@@ -1,4 +1,4 @@
-/* x86 __mpn_mul_1 (for 386, 486, and Pentium Pro) -- Multiply a limb vector
+/* x86 __gmpn_mul_1 (for 386, 486, and Pentium Pro) -- Multiply a limb vector
with a limb and store the result in a second limb vector.
Copyright (C) 1992, 1994, 1997, 1998, 1999, 2000 Free Software Foundation,
@@ -38,9 +38,9 @@ MA 02111-1307, USA. */
TEXT
ALIGN (3)
-PROLOGUE(C_SYMBOL_NAME(__mpn_mul_1))
- GLOBL C_SYMBOL_NAME(__mpn_mul_1)
-C_SYMBOL_NAME(__mpn_mul_1:)
+PROLOGUE(C_SYMBOL_NAME(__gmpn_mul_1))
+ GLOBL C_SYMBOL_NAME(__gmpn_mul_1)
+C_SYMBOL_NAME(__gmpn_mul_1:)
INSN1(push,l ,R(edi))
INSN1(push,l ,R(esi))
@@ -116,4 +116,4 @@ Lend: movl %ebx,%eax
INSN1(pop,l ,R(edi))
ret
-EPILOGUE(C_SYMBOL_NAME(__mpn_mul_1))
+EPILOGUE(C_SYMBOL_NAME(__gmpn_mul_1))
diff --git a/mpn/x86/mul_basecase.S b/mpn/x86/mul_basecase.S
index 753c260c5..61be79328 100644
--- a/mpn/x86/mul_basecase.S
+++ b/mpn/x86/mul_basecase.S
@@ -51,9 +51,9 @@ sp => saved reg
.text
ALIGN (3)
-PROLOGUE(C_SYMBOL_NAME(__mpn_mul_basecase))
- .globl C_SYMBOL_NAME(__mpn_mul_basecase)
-C_SYMBOL_NAME(__mpn_mul_basecase:)
+PROLOGUE(C_SYMBOL_NAME(__gmpn_mul_basecase))
+ .globl C_SYMBOL_NAME(__gmpn_mul_basecase)
+C_SYMBOL_NAME(__gmpn_mul_basecase:)
subl $8,%esp
pushl %esi
pushl %ebp
@@ -192,4 +192,4 @@ Ldone: movl %edx,4(%edi) /* store to rptr[1] */
addl $8,%esp
ret
-EPILOGUE(C_SYMBOL_NAME(__mpn_mul_basecase))
+EPILOGUE(C_SYMBOL_NAME(__gmpn_mul_basecase))
diff --git a/mpn/x86/pentium/add_n.S b/mpn/x86/pentium/add_n.S
index e20fec75a..e8df25ebc 100644
--- a/mpn/x86/pentium/add_n.S
+++ b/mpn/x86/pentium/add_n.S
@@ -1,4 +1,4 @@
-/* Pentium __mpn_add_n -- Add two limb vectors of the same length > 0 and store
+/* Pentium __gmpn_add_n -- Add two limb vectors of the same length > 0 and store
sum in a third limb vector.
Copyright (C) 1992, 1994, 1995, 1996, 1999, 2000 Free Software Foundation,
@@ -33,9 +33,9 @@ MA 02111-1307, USA. */
.text
ALIGN (3)
-PROLOGUE(C_SYMBOL_NAME(__mpn_add_nc))
- .globl C_SYMBOL_NAME(__mpn_add_nc)
-C_SYMBOL_NAME(__mpn_add_nc:)
+PROLOGUE(C_SYMBOL_NAME(__gmpn_add_nc))
+ .globl C_SYMBOL_NAME(__gmpn_add_nc)
+C_SYMBOL_NAME(__gmpn_add_nc:)
pushl %edi
pushl %esi
pushl %ebx
@@ -64,13 +64,13 @@ Lendgo: movl 36(%esp),%eax
shrl $1,%eax /* shift bit 0 into carry */
jmp Lend
-EPILOGUE(C_SYMBOL_NAME(__mpn_add_nc))
+EPILOGUE(C_SYMBOL_NAME(__gmpn_add_nc))
ALIGN (3)
-PROLOGUE(C_SYMBOL_NAME(__mpn_add_n))
- .globl C_SYMBOL_NAME(__mpn_add_n)
-C_SYMBOL_NAME(__mpn_add_n:)
+PROLOGUE(C_SYMBOL_NAME(__gmpn_add_n))
+ .globl C_SYMBOL_NAME(__gmpn_add_n)
+C_SYMBOL_NAME(__gmpn_add_n:)
pushl %edi
pushl %esi
pushl %ebx
@@ -165,4 +165,4 @@ Lend2:
popl %edi
ret
-EPILOGUE(C_SYMBOL_NAME(__mpn_add_n))
+EPILOGUE(C_SYMBOL_NAME(__gmpn_add_n))
diff --git a/mpn/x86/pentium/addmul_1.S b/mpn/x86/pentium/addmul_1.S
index cb8f9287c..4441d9b56 100644
--- a/mpn/x86/pentium/addmul_1.S
+++ b/mpn/x86/pentium/addmul_1.S
@@ -1,4 +1,4 @@
-/* Pentium __mpn_addmul_1 -- Multiply a limb vector with a limb and add
+/* Pentium __gmpn_addmul_1 -- Multiply a limb vector with a limb and add
the result to a second limb vector.
Copyright (C) 1992, 1994, 1996, 1999, 2000 Free Software Foundation, Inc.
@@ -37,9 +37,9 @@ MA 02111-1307, USA. */
TEXT
ALIGN (3)
-PROLOGUE(C_SYMBOL_NAME(__mpn_addmul_1))
- GLOBL C_SYMBOL_NAME(__mpn_addmul_1)
-C_SYMBOL_NAME(__mpn_addmul_1:)
+PROLOGUE(C_SYMBOL_NAME(__gmpn_addmul_1))
+ GLOBL C_SYMBOL_NAME(__gmpn_addmul_1)
+C_SYMBOL_NAME(__gmpn_addmul_1:)
INSN1(push,l ,R(edi))
INSN1(push,l ,R(esi))
@@ -82,4 +82,4 @@ Loop: INSN2(adc,l ,R(ebx),$0)
INSN1(pop,l ,R(edi))
ret
-EPILOGUE(C_SYMBOL_NAME(__mpn_addmul_1))
+EPILOGUE(C_SYMBOL_NAME(__gmpn_addmul_1))
diff --git a/mpn/x86/pentium/lshift.S b/mpn/x86/pentium/lshift.S
index ff44c3a79..028648519 100644
--- a/mpn/x86/pentium/lshift.S
+++ b/mpn/x86/pentium/lshift.S
@@ -1,4 +1,4 @@
-/* Pentium optimized __mpn_lshift --
+/* Pentium optimized __gmpn_lshift --
Copyright (C) 1992, 1994, 1995, 1996, 1999, 2000 Free Software Foundation,
Inc.
@@ -32,9 +32,9 @@ MA 02111-1307, USA. */
.text
ALIGN (3)
-PROLOGUE(C_SYMBOL_NAME(__mpn_lshift))
- .globl C_SYMBOL_NAME(__mpn_lshift)
-C_SYMBOL_NAME(__mpn_lshift:)
+PROLOGUE(C_SYMBOL_NAME(__gmpn_lshift))
+ .globl C_SYMBOL_NAME(__gmpn_lshift)
+C_SYMBOL_NAME(__gmpn_lshift:)
pushl %edi
pushl %esi
pushl %ebx
@@ -217,4 +217,4 @@ LL1: movl %edx,(%edi) /* store last limb */
popl %edi
ret
-EPILOGUE(C_SYMBOL_NAME(__mpn_lshift))
+EPILOGUE(C_SYMBOL_NAME(__gmpn_lshift))
diff --git a/mpn/x86/pentium/mul_1.S b/mpn/x86/pentium/mul_1.S
index bdc3a2304..8574ed100 100644
--- a/mpn/x86/pentium/mul_1.S
+++ b/mpn/x86/pentium/mul_1.S
@@ -1,4 +1,4 @@
-/* Pentium __mpn_mul_1 -- Multiply a limb vector with a limb and store
+/* Pentium __gmpn_mul_1 -- Multiply a limb vector with a limb and store
the result in a second limb vector.
Copyright (C) 1992, 1994, 1996, 1999, 2000 Free Software Foundation, Inc.
@@ -37,9 +37,9 @@ MA 02111-1307, USA. */
TEXT
ALIGN (3)
-PROLOGUE(C_SYMBOL_NAME(__mpn_mul_1))
- GLOBL C_SYMBOL_NAME(__mpn_mul_1)
-C_SYMBOL_NAME(__mpn_mul_1:)
+PROLOGUE(C_SYMBOL_NAME(__gmpn_mul_1))
+ GLOBL C_SYMBOL_NAME(__gmpn_mul_1)
+C_SYMBOL_NAME(__gmpn_mul_1:)
INSN1(push,l ,R(edi))
INSN1(push,l ,R(esi))
@@ -78,4 +78,4 @@ Loop: INSN2(adc,l ,R(ebx),$0)
INSN1(pop,l ,R(edi))
ret
-EPILOGUE(C_SYMBOL_NAME(__mpn_mul_1))
+EPILOGUE(C_SYMBOL_NAME(__gmpn_mul_1))
diff --git a/mpn/x86/pentium/mul_basecase.S b/mpn/x86/pentium/mul_basecase.S
index 8e8464a30..1d9970ec0 100644
--- a/mpn/x86/pentium/mul_basecase.S
+++ b/mpn/x86/pentium/mul_basecase.S
@@ -38,9 +38,9 @@ sp => saved reg
.text
ALIGN (3)
-PROLOGUE(C_SYMBOL_NAME(__mpn_mul_basecase))
- .globl C_SYMBOL_NAME(__mpn_mul_basecase)
-C_SYMBOL_NAME(__mpn_mul_basecase:)
+PROLOGUE(C_SYMBOL_NAME(__gmpn_mul_basecase))
+ .globl C_SYMBOL_NAME(__gmpn_mul_basecase)
+C_SYMBOL_NAME(__gmpn_mul_basecase:)
pushl %eax /* dummy push for allocating stack slot */
pushl %esi
pushl %ebp
@@ -123,4 +123,4 @@ Ldone: movl %edx,4(%edi) /* store to rptr[1] */
popl %eax /* dummy pop for deallocating stack slot */
ret
-EPILOGUE(C_SYMBOL_NAME(__mpn_mul_basecase))
+EPILOGUE(C_SYMBOL_NAME(__gmpn_mul_basecase))
diff --git a/mpn/x86/pentium/rshift.S b/mpn/x86/pentium/rshift.S
index ed5dcd30e..884548350 100644
--- a/mpn/x86/pentium/rshift.S
+++ b/mpn/x86/pentium/rshift.S
@@ -1,4 +1,4 @@
-/* Pentium optimized __mpn_rshift -- mpn right shift.
+/* Pentium optimized __gmpn_rshift -- mpn right shift.
Copyright (C) 1992, 1994, 1995, 1996, 1999, 2000 Free Software Foundation,
Inc.
@@ -32,9 +32,9 @@ MA 02111-1307, USA. */
.text
ALIGN (3)
-PROLOGUE(C_SYMBOL_NAME(__mpn_rshift))
- .globl C_SYMBOL_NAME(__mpn_rshift)
-C_SYMBOL_NAME(__mpn_rshift:)
+PROLOGUE(C_SYMBOL_NAME(__gmpn_rshift))
+ .globl C_SYMBOL_NAME(__gmpn_rshift)
+C_SYMBOL_NAME(__gmpn_rshift:)
pushl %edi
pushl %esi
pushl %ebx
@@ -217,4 +217,4 @@ LL1: movl %edx,(%edi) /* store last limb */
popl %edi
ret
-EPILOGUE(C_SYMBOL_NAME(__mpn_rshift))
+EPILOGUE(C_SYMBOL_NAME(__gmpn_rshift))
diff --git a/mpn/x86/pentium/sub_n.S b/mpn/x86/pentium/sub_n.S
index 91f76a70f..ad9c1ccb1 100644
--- a/mpn/x86/pentium/sub_n.S
+++ b/mpn/x86/pentium/sub_n.S
@@ -1,4 +1,4 @@
-/* Pentium __mpn_sub_n -- Subtract two limb vectors of the same length > 0
+/* Pentium __gmpn_sub_n -- Subtract two limb vectors of the same length > 0
and store difference in a third limb vector.
Copyright (C) 1992, 1994, 1995, 1996, 1999, 2000 Free Software Foundation,
@@ -33,9 +33,9 @@ MA 02111-1307, USA. */
.text
ALIGN (3)
-PROLOGUE(C_SYMBOL_NAME(__mpn_sub_nc))
- .globl C_SYMBOL_NAME(__mpn_sub_nc)
-C_SYMBOL_NAME(__mpn_sub_nc:)
+PROLOGUE(C_SYMBOL_NAME(__gmpn_sub_nc))
+ .globl C_SYMBOL_NAME(__gmpn_sub_nc)
+C_SYMBOL_NAME(__gmpn_sub_nc:)
pushl %edi
pushl %esi
pushl %ebx
@@ -64,13 +64,13 @@ Lendgo: movl 36(%esp),%eax
shrl $1,%eax /* shift bit 0 into carry */
jmp Lend
-EPILOGUE(C_SYMBOL_NAME(__mpn_sub_nc))
+EPILOGUE(C_SYMBOL_NAME(__gmpn_sub_nc))
ALIGN (3)
-PROLOGUE(C_SYMBOL_NAME(__mpn_sub_n))
- .globl C_SYMBOL_NAME(__mpn_sub_n)
-C_SYMBOL_NAME(__mpn_sub_n:)
+PROLOGUE(C_SYMBOL_NAME(__gmpn_sub_n))
+ .globl C_SYMBOL_NAME(__gmpn_sub_n)
+C_SYMBOL_NAME(__gmpn_sub_n:)
pushl %edi
pushl %esi
pushl %ebx
@@ -165,4 +165,4 @@ Lend2:
popl %edi
ret
-EPILOGUE(C_SYMBOL_NAME(__mpn_sub_n))
+EPILOGUE(C_SYMBOL_NAME(__gmpn_sub_n))
diff --git a/mpn/x86/pentium/submul_1.S b/mpn/x86/pentium/submul_1.S
index 05aade979..17547b773 100644
--- a/mpn/x86/pentium/submul_1.S
+++ b/mpn/x86/pentium/submul_1.S
@@ -1,4 +1,4 @@
-/* Pentium __mpn_submul_1 -- Multiply a limb vector with a limb and subtract
+/* Pentium __gmpn_submul_1 -- Multiply a limb vector with a limb and subtract
the result from a second limb vector.
Copyright (C) 1992, 1994, 1996, 1999, 2000 Free Software Foundation, Inc.
@@ -37,9 +37,9 @@ MA 02111-1307, USA. */
TEXT
ALIGN (3)
-PROLOGUE(C_SYMBOL_NAME(__mpn_submul_1))
- GLOBL C_SYMBOL_NAME(__mpn_submul_1)
-C_SYMBOL_NAME(__mpn_submul_1:)
+PROLOGUE(C_SYMBOL_NAME(__gmpn_submul_1))
+ GLOBL C_SYMBOL_NAME(__gmpn_submul_1)
+C_SYMBOL_NAME(__gmpn_submul_1:)
INSN1(push,l ,R(edi))
INSN1(push,l ,R(esi))
@@ -82,4 +82,4 @@ Loop: INSN2(adc,l ,R(ebx),$0)
INSN1(pop,l ,R(edi))
ret
-EPILOGUE(C_SYMBOL_NAME(__mpn_submul_1))
+EPILOGUE(C_SYMBOL_NAME(__gmpn_submul_1))
diff --git a/mpn/x86/rshift.S b/mpn/x86/rshift.S
index 0da4bfda9..ea03fe5c1 100644
--- a/mpn/x86/rshift.S
+++ b/mpn/x86/rshift.S
@@ -1,4 +1,4 @@
-/* i80386 __mpn_rshift --
+/* i80386 __gmpn_rshift --
Copyright (C) 1992, 1994, 1996, 1999, 2000 Free Software Foundation, Inc.
@@ -31,9 +31,9 @@ MA 02111-1307, USA. */
.text
ALIGN (3)
-PROLOGUE(C_SYMBOL_NAME(__mpn_rshift))
- .globl C_SYMBOL_NAME(__mpn_rshift)
-C_SYMBOL_NAME(__mpn_rshift:)
+PROLOGUE(C_SYMBOL_NAME(__gmpn_rshift))
+ .globl C_SYMBOL_NAME(__gmpn_rshift)
+C_SYMBOL_NAME(__gmpn_rshift:)
pushl %edi
pushl %esi
pushl %ebx
@@ -86,4 +86,4 @@ Lend: shrl %cl,%ebx /* compute most significant limb */
popl %edi
ret
-EPILOGUE(C_SYMBOL_NAME(__mpn_rshift))
+EPILOGUE(C_SYMBOL_NAME(__gmpn_rshift))
diff --git a/mpn/x86/sub_n.S b/mpn/x86/sub_n.S
index 52a101961..d5e4c6804 100644
--- a/mpn/x86/sub_n.S
+++ b/mpn/x86/sub_n.S
@@ -1,4 +1,4 @@
-/* i80386 __mpn_sub_n -- Add two limb vectors of the same length > 0 and store
+/* i80386 __gmpn_sub_n -- Add two limb vectors of the same length > 0 and store
sum in a third limb vector.
Copyright (C) 1992, 1994, 1995, 1996, 1999, 2000 Free Software Foundation,
@@ -33,9 +33,9 @@ MA 02111-1307, USA. */
.text
ALIGN (3)
- .globl C_SYMBOL_NAME(__mpn_sub_nc)
-C_SYMBOL_NAME(__mpn_sub_nc:)
- PROLOGUE(C_SYMBOL_NAME(__mpn_sub_nc))
+ .globl C_SYMBOL_NAME(__gmpn_sub_nc)
+C_SYMBOL_NAME(__gmpn_sub_nc:)
+ PROLOGUE(C_SYMBOL_NAME(__gmpn_sub_nc))
pushl %edi
pushl %esi
@@ -77,9 +77,9 @@ L0a: leal (%eax,%eax,8),%eax
jmp *%eax /* jump into loop */
ALIGN (3)
- .globl C_SYMBOL_NAME(__mpn_sub_n)
-C_SYMBOL_NAME(__mpn_sub_n:)
- PROLOGUE(C_SYMBOL_NAME(__mpn_sub_n))
+ .globl C_SYMBOL_NAME(__gmpn_sub_n)
+C_SYMBOL_NAME(__gmpn_sub_n:)
+ PROLOGUE(C_SYMBOL_NAME(__gmpn_sub_n))
pushl %edi
pushl %esi
@@ -155,5 +155,5 @@ Loop: movl (%esi),%eax
popl %esi
popl %edi
ret
- EPILOGUE(C_SYMBOL_NAME(__mpn_sub_nc))
- EPILOGUE(C_SYMBOL_NAME(__mpn_sub_n))
+ EPILOGUE(C_SYMBOL_NAME(__gmpn_sub_nc))
+ EPILOGUE(C_SYMBOL_NAME(__gmpn_sub_n))
diff --git a/mpn/x86/submul_1.S b/mpn/x86/submul_1.S
index 280e7906b..7d3f6b454 100644
--- a/mpn/x86/submul_1.S
+++ b/mpn/x86/submul_1.S
@@ -1,4 +1,4 @@
-/* x86 __mpn_submul_1 (for 386 and 486) -- Multiply a limb vector
+/* x86 __gmpn_submul_1 (for 386 and 486) -- Multiply a limb vector
with a limb and store the result in a second limb vector.
Copyright (C) 1992, 1994, 1997, 1999, 2000 Free Software Foundation, Inc.
@@ -37,9 +37,9 @@ MA 02111-1307, USA. */
TEXT
ALIGN (3)
-PROLOGUE(C_SYMBOL_NAME(__mpn_submul_1))
- GLOBL C_SYMBOL_NAME(__mpn_submul_1)
-C_SYMBOL_NAME(__mpn_submul_1:)
+PROLOGUE(C_SYMBOL_NAME(__gmpn_submul_1))
+ GLOBL C_SYMBOL_NAME(__gmpn_submul_1)
+C_SYMBOL_NAME(__gmpn_submul_1:)
INSN1(push,l ,R(edi))
INSN1(push,l ,R(esi))
@@ -115,4 +115,4 @@ Lend: movl %ebx,%eax
INSN1(pop,l ,R(edi))
ret
-EPILOGUE(C_SYMBOL_NAME(__mpn_submul_1))
+EPILOGUE(C_SYMBOL_NAME(__gmpn_submul_1))
diff --git a/mpn/z8000/add_n.s b/mpn/z8000/add_n.s
index a50fc3ef5..538184fa0 100644
--- a/mpn/z8000/add_n.s
+++ b/mpn/z8000/add_n.s
@@ -1,4 +1,4 @@
-! Z8000 __mpn_add_n -- Add two limb vectors of equal, non-zero length.
+! Z8000 __gmpn_add_n -- Add two limb vectors of equal, non-zero length.
! Copyright (C) 1993, 1994 Free Software Foundation, Inc.
@@ -33,8 +33,8 @@
unseg
.text
even
- global ___mpn_add_n
-___mpn_add_n:
+ global ___gmpn_add_n
+___gmpn_add_n:
pop r0,@r6
pop r1,@r5
add r0,r1
diff --git a/mpn/z8000/mul_1.s b/mpn/z8000/mul_1.s
index f1126b5ab..1664a81d9 100644
--- a/mpn/z8000/mul_1.s
+++ b/mpn/z8000/mul_1.s
@@ -1,4 +1,4 @@
-! Z8000 __mpn_mul_1 -- Multiply a limb vector with a limb and store
+! Z8000 __gmpn_mul_1 -- Multiply a limb vector with a limb and store
! the result in a second limb vector.
! Copyright (C) 1993, 1994, 1995 Free Software Foundation, Inc.
@@ -30,8 +30,8 @@
unseg
.text
even
- global ___mpn_mul_1
-___mpn_mul_1:
+ global ___gmpn_mul_1
+___gmpn_mul_1:
sub r2,r2 ! zero carry limb
and r4,r4
jr mi,Lneg