From 92a17b1594132bf5f8c7f9782d85ad5b5bb2eba8 Mon Sep 17 00:00:00 2001 From: Kevin Ryde Date: Sun, 16 Apr 2000 01:20:01 +0200 Subject: Change __mpn to __gmpn. --- mpn/mips2/add_n.s | 10 +++++----- mpn/mips2/addmul_1.s | 10 +++++----- mpn/mips2/lshift.s | 10 +++++----- mpn/mips2/mul_1.s | 10 +++++----- mpn/mips2/rshift.s | 10 +++++----- mpn/mips2/sub_n.s | 10 +++++----- mpn/mips2/submul_1.s | 10 +++++----- mpn/mips3/add_n.s | 10 +++++----- mpn/mips3/addmul_1.s | 10 +++++----- mpn/mips3/lshift.s | 10 +++++----- mpn/mips3/mul_1.s | 10 +++++----- mpn/mips3/rshift.s | 10 +++++----- mpn/mips3/sub_n.s | 10 +++++----- mpn/mips3/submul_1.s | 10 +++++----- mpn/ns32k/add_n.s | 6 +++--- mpn/ns32k/addmul_1.s | 6 +++--- mpn/ns32k/mul_1.s | 6 +++--- mpn/ns32k/sub_n.s | 6 +++--- mpn/ns32k/submul_1.s | 6 +++--- mpn/pa64/add_n.s | 6 +++--- mpn/pa64/addmul_1.S | 6 +++--- mpn/pa64/lshift.s | 6 +++--- mpn/pa64/mul_1.S | 6 +++--- mpn/pa64/rshift.s | 6 +++--- mpn/pa64/sub_n.s | 6 +++--- mpn/pa64/submul_1.S | 6 +++--- mpn/power/add_n.s | 14 +++++++------- mpn/power/addmul_1.s | 14 +++++++------- mpn/power/lshift.s | 14 +++++++------- mpn/power/mul_1.s | 14 +++++++------- mpn/power/rshift.s | 14 +++++++------- mpn/power/sub_n.s | 14 +++++++------- mpn/power/submul_1.s | 16 ++++++++-------- mpn/pyr/add_n.s | 6 +++--- mpn/pyr/addmul_1.s | 6 +++--- mpn/pyr/mul_1.s | 6 +++--- mpn/pyr/sub_n.s | 6 +++--- mpn/tests/copy.c | 4 ++-- mpn/thumb/add_n.s | 6 +++--- mpn/thumb/sub_n.s | 6 +++--- mpn/vax/add_n.s | 6 +++--- mpn/vax/addmul_1.s | 6 +++--- mpn/vax/lshift.s | 6 +++--- mpn/vax/mul_1.s | 6 +++--- mpn/vax/rshift.s | 6 +++--- mpn/vax/sub_n.s | 6 +++--- mpn/vax/submul_1.s | 6 +++--- 47 files changed, 197 insertions(+), 197 deletions(-) (limited to 'mpn') diff --git a/mpn/mips2/add_n.s b/mpn/mips2/add_n.s index f5525cec4..214e8bf4a 100644 --- a/mpn/mips2/add_n.s +++ b/mpn/mips2/add_n.s @@ -1,4 +1,4 @@ - # MIPS2 __mpn_add_n -- Add two limb vectors of the same length > 0 and + # MIPS2 __gmpn_add_n -- Add two limb vectors of the same length > 0 and # store sum in a third limb vector. # Copyright (C) 1995 Free Software Foundation, Inc. @@ -29,9 +29,9 @@ .text .align 2 - .globl __mpn_add_n - .ent __mpn_add_n -__mpn_add_n: + .globl __gmpn_add_n + .ent __gmpn_add_n +__gmpn_add_n: .set noreorder .set nomacro @@ -117,4 +117,4 @@ __mpn_add_n: j $31 or $2,$2,$8 - .end __mpn_add_n + .end __gmpn_add_n diff --git a/mpn/mips2/addmul_1.s b/mpn/mips2/addmul_1.s index 6145771e3..8500e2d04 100644 --- a/mpn/mips2/addmul_1.s +++ b/mpn/mips2/addmul_1.s @@ -1,4 +1,4 @@ - # MIPS __mpn_addmul_1 -- Multiply a limb vector with a single limb and + # MIPS __gmpn_addmul_1 -- Multiply a limb vector with a single limb and # add the product to a second limb vector. # Copyright (C) 1992, 1994, 1996 Free Software Foundation, Inc. @@ -29,9 +29,9 @@ .text .align 4 - .globl __mpn_addmul_1 - .ent __mpn_addmul_1 -__mpn_addmul_1: + .globl __gmpn_addmul_1 + .ent __gmpn_addmul_1 +__gmpn_addmul_1: .set noreorder .set nomacro @@ -94,4 +94,4 @@ $LC0: lw $10,0($4) j $31 addu $2,$9,$2 # add high product limb and carry from addition - .end __mpn_addmul_1 + .end __gmpn_addmul_1 diff --git a/mpn/mips2/lshift.s b/mpn/mips2/lshift.s index ee92d7916..4fc874777 100644 --- a/mpn/mips2/lshift.s +++ b/mpn/mips2/lshift.s @@ -1,4 +1,4 @@ - # MIPS2 __mpn_lshift -- + # MIPS2 __gmpn_lshift -- # Copyright (C) 1995 Free Software Foundation, Inc. @@ -28,9 +28,9 @@ .text .align 2 - .globl __mpn_lshift - .ent __mpn_lshift -__mpn_lshift: + .globl __gmpn_lshift + .ent __gmpn_lshift +__gmpn_lshift: .set noreorder .set nomacro @@ -92,4 +92,4 @@ __mpn_lshift: .Lend: sll $8,$10,$7 j $31 sw $8,-4($4) - .end __mpn_lshift + .end __gmpn_lshift diff --git a/mpn/mips2/mul_1.s b/mpn/mips2/mul_1.s index d006fa122..d9feaaf82 100644 --- a/mpn/mips2/mul_1.s +++ b/mpn/mips2/mul_1.s @@ -1,4 +1,4 @@ - # MIPS __mpn_mul_1 -- Multiply a limb vector with a single limb and + # MIPS __gmpn_mul_1 -- Multiply a limb vector with a single limb and # store the product in a second limb vector. # Copyright (C) 1992, 1994, 1996 Free Software Foundation, Inc. @@ -29,9 +29,9 @@ .text .align 4 - .globl __mpn_mul_1 - .ent __mpn_mul_1 -__mpn_mul_1: + .globl __gmpn_mul_1 + .ent __gmpn_mul_1 +__gmpn_mul_1: .set noreorder .set nomacro @@ -82,4 +82,4 @@ $LC0: mflo $10 j $31 addu $2,$9,$2 # add high product limb and carry from addition - .end __mpn_mul_1 + .end __gmpn_mul_1 diff --git a/mpn/mips2/rshift.s b/mpn/mips2/rshift.s index a8beb4057..93bf739f3 100644 --- a/mpn/mips2/rshift.s +++ b/mpn/mips2/rshift.s @@ -1,4 +1,4 @@ - # MIPS2 __mpn_rshift -- + # MIPS2 __gmpn_rshift -- # Copyright (C) 1995 Free Software Foundation, Inc. @@ -28,9 +28,9 @@ .text .align 2 - .globl __mpn_rshift - .ent __mpn_rshift -__mpn_rshift: + .globl __gmpn_rshift + .ent __gmpn_rshift +__gmpn_rshift: .set noreorder .set nomacro @@ -89,4 +89,4 @@ __mpn_rshift: .Lend: srl $8,$10,$7 j $31 sw $8,0($4) - .end __mpn_rshift + .end __gmpn_rshift diff --git a/mpn/mips2/sub_n.s b/mpn/mips2/sub_n.s index 3368ef29d..3f51c3627 100644 --- a/mpn/mips2/sub_n.s +++ b/mpn/mips2/sub_n.s @@ -1,4 +1,4 @@ - # MIPS2 __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and + # MIPS2 __gmpn_sub_n -- Subtract two limb vectors of the same length > 0 and # store difference in a third limb vector. # Copyright (C) 1995 Free Software Foundation, Inc. @@ -29,9 +29,9 @@ .text .align 2 - .globl __mpn_sub_n - .ent __mpn_sub_n -__mpn_sub_n: + .globl __gmpn_sub_n + .ent __gmpn_sub_n +__gmpn_sub_n: .set noreorder .set nomacro @@ -117,4 +117,4 @@ __mpn_sub_n: j $31 or $2,$2,$8 - .end __mpn_sub_n + .end __gmpn_sub_n diff --git a/mpn/mips2/submul_1.s b/mpn/mips2/submul_1.s index 1324b6609..41ff1e523 100644 --- a/mpn/mips2/submul_1.s +++ b/mpn/mips2/submul_1.s @@ -1,4 +1,4 @@ - # MIPS __mpn_submul_1 -- Multiply a limb vector with a single limb and + # MIPS __gmpn_submul_1 -- Multiply a limb vector with a single limb and # subtract the product from a second limb vector. # Copyright (C) 1992, 1994, 1996 Free Software Foundation, Inc. @@ -29,9 +29,9 @@ .text .align 4 - .globl __mpn_submul_1 - .ent __mpn_submul_1 -__mpn_submul_1: + .globl __gmpn_submul_1 + .ent __gmpn_submul_1 +__gmpn_submul_1: .set noreorder .set nomacro @@ -94,4 +94,4 @@ $LC0: lw $10,0($4) j $31 addu $2,$9,$2 # add high product limb and carry from addition - .end __mpn_submul_1 + .end __gmpn_submul_1 diff --git a/mpn/mips3/add_n.s b/mpn/mips3/add_n.s index 996a449eb..ab60c1072 100644 --- a/mpn/mips3/add_n.s +++ b/mpn/mips3/add_n.s @@ -1,4 +1,4 @@ - # MIPS3 __mpn_add_n -- Add two limb vectors of the same length > 0 and + # MIPS3 __gmpn_add_n -- Add two limb vectors of the same length > 0 and # store sum in a third limb vector. # Copyright (C) 1995 Free Software Foundation, Inc. @@ -29,9 +29,9 @@ .text .align 2 - .globl __mpn_add_n - .ent __mpn_add_n -__mpn_add_n: + .globl __gmpn_add_n + .ent __gmpn_add_n +__gmpn_add_n: .set noreorder .set nomacro @@ -117,4 +117,4 @@ __mpn_add_n: j $31 or $2,$2,$8 - .end __mpn_add_n + .end __gmpn_add_n diff --git a/mpn/mips3/addmul_1.s b/mpn/mips3/addmul_1.s index cd75c1801..2ecf830fe 100644 --- a/mpn/mips3/addmul_1.s +++ b/mpn/mips3/addmul_1.s @@ -1,4 +1,4 @@ - # MIPS3 __mpn_addmul_1 -- Multiply a limb vector with a single limb and + # MIPS3 __gmpn_addmul_1 -- Multiply a limb vector with a single limb and # add the product to a second limb vector. # Copyright (C) 1992, 1994, 1995 Free Software Foundation, Inc. @@ -29,9 +29,9 @@ .text .align 4 - .globl __mpn_addmul_1 - .ent __mpn_addmul_1 -__mpn_addmul_1: + .globl __gmpn_addmul_1 + .ent __gmpn_addmul_1 +__gmpn_addmul_1: .set noreorder .set nomacro @@ -94,4 +94,4 @@ $LC0: ld $10,0($4) j $31 daddu $2,$9,$2 # add high product limb and carry from addition - .end __mpn_addmul_1 + .end __gmpn_addmul_1 diff --git a/mpn/mips3/lshift.s b/mpn/mips3/lshift.s index 324a6020c..5acee84d2 100644 --- a/mpn/mips3/lshift.s +++ b/mpn/mips3/lshift.s @@ -1,4 +1,4 @@ - # MIPS3 __mpn_lshift -- + # MIPS3 __gmpn_lshift -- # Copyright (C) 1995 Free Software Foundation, Inc. @@ -28,9 +28,9 @@ .text .align 2 - .globl __mpn_lshift - .ent __mpn_lshift -__mpn_lshift: + .globl __gmpn_lshift + .ent __gmpn_lshift +__gmpn_lshift: .set noreorder .set nomacro @@ -92,4 +92,4 @@ __mpn_lshift: .Lend: dsll $8,$10,$7 j $31 sd $8,-8($4) - .end __mpn_lshift + .end __gmpn_lshift diff --git a/mpn/mips3/mul_1.s b/mpn/mips3/mul_1.s index 281d0574a..9d71c469b 100644 --- a/mpn/mips3/mul_1.s +++ b/mpn/mips3/mul_1.s @@ -1,4 +1,4 @@ - # MIPS3 __mpn_mul_1 -- Multiply a limb vector with a single limb and + # MIPS3 __gmpn_mul_1 -- Multiply a limb vector with a single limb and # store the product in a second limb vector. # Copyright (C) 1992, 1994, 1995 Free Software Foundation, Inc. @@ -29,9 +29,9 @@ .text .align 4 - .globl __mpn_mul_1 - .ent __mpn_mul_1 -__mpn_mul_1: + .globl __gmpn_mul_1 + .ent __gmpn_mul_1 +__gmpn_mul_1: .set noreorder .set nomacro @@ -82,4 +82,4 @@ $LC0: mflo $10 j $31 daddu $2,$9,$2 # add high product limb and carry from addition - .end __mpn_mul_1 + .end __gmpn_mul_1 diff --git a/mpn/mips3/rshift.s b/mpn/mips3/rshift.s index 9920e1a9e..1568969b4 100644 --- a/mpn/mips3/rshift.s +++ b/mpn/mips3/rshift.s @@ -1,4 +1,4 @@ - # MIPS3 __mpn_rshift -- + # MIPS3 __gmpn_rshift -- # Copyright (C) 1995 Free Software Foundation, Inc. @@ -28,9 +28,9 @@ .text .align 2 - .globl __mpn_rshift - .ent __mpn_rshift -__mpn_rshift: + .globl __gmpn_rshift + .ent __gmpn_rshift +__gmpn_rshift: .set noreorder .set nomacro @@ -89,4 +89,4 @@ __mpn_rshift: .Lend: dsrl $8,$10,$7 j $31 sd $8,0($4) - .end __mpn_rshift + .end __gmpn_rshift diff --git a/mpn/mips3/sub_n.s b/mpn/mips3/sub_n.s index 56c77d8bc..366b3b014 100644 --- a/mpn/mips3/sub_n.s +++ b/mpn/mips3/sub_n.s @@ -1,4 +1,4 @@ - # MIPS3 __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and + # MIPS3 __gmpn_sub_n -- Subtract two limb vectors of the same length > 0 and # store difference in a third limb vector. # Copyright (C) 1995 Free Software Foundation, Inc. @@ -29,9 +29,9 @@ .text .align 2 - .globl __mpn_sub_n - .ent __mpn_sub_n -__mpn_sub_n: + .globl __gmpn_sub_n + .ent __gmpn_sub_n +__gmpn_sub_n: .set noreorder .set nomacro @@ -117,4 +117,4 @@ __mpn_sub_n: j $31 or $2,$2,$8 - .end __mpn_sub_n + .end __gmpn_sub_n diff --git a/mpn/mips3/submul_1.s b/mpn/mips3/submul_1.s index a9c9fa251..bfd93b2ef 100644 --- a/mpn/mips3/submul_1.s +++ b/mpn/mips3/submul_1.s @@ -1,4 +1,4 @@ - # MIPS3 __mpn_submul_1 -- Multiply a limb vector with a single limb and + # MIPS3 __gmpn_submul_1 -- Multiply a limb vector with a single limb and # subtract the product from a second limb vector. # Copyright (C) 1992, 1994, 1995 Free Software Foundation, Inc. @@ -29,9 +29,9 @@ .text .align 4 - .globl __mpn_submul_1 - .ent __mpn_submul_1 -__mpn_submul_1: + .globl __gmpn_submul_1 + .ent __gmpn_submul_1 +__gmpn_submul_1: .set noreorder .set nomacro @@ -94,4 +94,4 @@ $LC0: ld $10,0($4) j $31 daddu $2,$9,$2 # add high product limb and carry from addition - .end __mpn_submul_1 + .end __gmpn_submul_1 diff --git a/mpn/ns32k/add_n.s b/mpn/ns32k/add_n.s index dde2e15b5..9a38fdc21 100644 --- a/mpn/ns32k/add_n.s +++ b/mpn/ns32k/add_n.s @@ -1,4 +1,4 @@ -# ns32000 __mpn_add_n -- Add two limb vectors of the same length > 0 and store +# ns32000 __gmpn_add_n -- Add two limb vectors of the same length > 0 and store # sum in a third limb vector. # Copyright (C) 1992, 1994 Free Software Foundation, Inc. @@ -22,8 +22,8 @@ .align 1 -.globl ___mpn_add_n -___mpn_add_n: +.globl ___gmpn_add_n +___gmpn_add_n: save [r3,r4,r5] negd 28(sp),r3 movd r3,r0 diff --git a/mpn/ns32k/addmul_1.s b/mpn/ns32k/addmul_1.s index 205bfe3b3..96995144a 100644 --- a/mpn/ns32k/addmul_1.s +++ b/mpn/ns32k/addmul_1.s @@ -1,4 +1,4 @@ -# ns32000 __mpn_addmul_1 -- Multiply a limb vector with a limb and add +# ns32000 __gmpn_addmul_1 -- Multiply a limb vector with a limb and add # the result to a second limb vector. # Copyright (C) 1992, 1994 Free Software Foundation, Inc. @@ -22,8 +22,8 @@ .align 1 -.globl ___mpn_addmul_1 -___mpn_addmul_1: +.globl ___gmpn_addmul_1 +___gmpn_addmul_1: save [r3,r4,r5,r6,r7] negd 24(sp),r4 movd r4,r0 diff --git a/mpn/ns32k/mul_1.s b/mpn/ns32k/mul_1.s index 64e4abbba..50c9b9b3b 100644 --- a/mpn/ns32k/mul_1.s +++ b/mpn/ns32k/mul_1.s @@ -1,4 +1,4 @@ -# ns32000 __mpn_mul_1 -- Multiply a limb vector with a limb and store +# ns32000 __gmpn_mul_1 -- Multiply a limb vector with a limb and store # the result in a second limb vector. # Copyright (C) 1992, 1994 Free Software Foundation, Inc. @@ -22,8 +22,8 @@ .align 1 -.globl ___mpn_mul_1 -___mpn_mul_1: +.globl ___gmpn_mul_1 +___gmpn_mul_1: save [r3,r4,r5,r6,r7] negd 24(sp),r4 movd r4,r0 diff --git a/mpn/ns32k/sub_n.s b/mpn/ns32k/sub_n.s index ef6c889c5..46909f50d 100644 --- a/mpn/ns32k/sub_n.s +++ b/mpn/ns32k/sub_n.s @@ -1,4 +1,4 @@ -# ns32000 __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and +# ns32000 __gmpn_sub_n -- Subtract two limb vectors of the same length > 0 and # store difference in a third limb vector. # Copyright (C) 1992, 1994 Free Software Foundation, Inc. @@ -22,8 +22,8 @@ .align 1 -.globl ___mpn_sub_n -___mpn_sub_n: +.globl ___gmpn_sub_n +___gmpn_sub_n: save [r3,r4,r5] negd 28(sp),r3 movd r3,r0 diff --git a/mpn/ns32k/submul_1.s b/mpn/ns32k/submul_1.s index 509309532..62168daa7 100644 --- a/mpn/ns32k/submul_1.s +++ b/mpn/ns32k/submul_1.s @@ -1,4 +1,4 @@ -# ns32000 __mpn_submul_1 -- Multiply a limb vector with a limb and subtract +# ns32000 __gmpn_submul_1 -- Multiply a limb vector with a limb and subtract # the result from a second limb vector. # Copyright (C) 1992, 1994 Free Software Foundation, Inc. @@ -22,8 +22,8 @@ .align 1 -.globl ___mpn_submul_1 -___mpn_submul_1: +.globl ___gmpn_submul_1 +___gmpn_submul_1: save [r3,r4,r5,r6,r7] negd 24(sp),r4 movd r4,r0 diff --git a/mpn/pa64/add_n.s b/mpn/pa64/add_n.s index 0bb1ab051..3d3e467d0 100644 --- a/mpn/pa64/add_n.s +++ b/mpn/pa64/add_n.s @@ -1,4 +1,4 @@ -; HP-PA 2.0 __mpn_add_n -- Add two limb vectors of the same length > 0 and +; HP-PA 2.0 __gmpn_add_n -- Add two limb vectors of the same length > 0 and ; store sum in a third limb vector. ; Copyright (C) 1997 Free Software Foundation, Inc. @@ -31,8 +31,8 @@ .level 2.0n .code - .export __mpn_add_n -__mpn_add_n + .export __gmpn_add_n +__gmpn_add_n .proc .callinfo frame=0,args_saved .entry diff --git a/mpn/pa64/addmul_1.S b/mpn/pa64/addmul_1.S index 956561240..a174d4f86 100644 --- a/mpn/pa64/addmul_1.S +++ b/mpn/pa64/addmul_1.S @@ -1,4 +1,4 @@ -; HP-PA 2.0 64-bit __mpn_addmul_1 -- Multiply a limb vector with a limb and +; HP-PA 2.0 64-bit __gmpn_addmul_1 -- Multiply a limb vector with a limb and ; add the result to a second limb vector. ; Copyright (C) 1998, 1999 Free Software Foundation, Inc. @@ -43,8 +43,8 @@ #define t4 %r31 .level 2.0n .code - .export __mpn_addmul_1 -__mpn_addmul_1 + .export __gmpn_addmul_1 +__gmpn_addmul_1 .proc .callinfo frame=128,no_calls .entry diff --git a/mpn/pa64/lshift.s b/mpn/pa64/lshift.s index eaefec8c2..3a126624d 100644 --- a/mpn/pa64/lshift.s +++ b/mpn/pa64/lshift.s @@ -1,4 +1,4 @@ -; HP-PA 2.0 __mpn_lshift -- +; HP-PA 2.0 __gmpn_lshift -- ; Copyright (C) 1997 Free Software Foundation, Inc. @@ -30,8 +30,8 @@ .level 2.0n .code - .export __mpn_lshift -__mpn_lshift + .export __gmpn_lshift +__gmpn_lshift .proc .callinfo frame=0,args_saved .entry diff --git a/mpn/pa64/mul_1.S b/mpn/pa64/mul_1.S index 20d0d8718..b670a71ae 100644 --- a/mpn/pa64/mul_1.S +++ b/mpn/pa64/mul_1.S @@ -1,4 +1,4 @@ -; HP-PA 2.0 64-bit __mpn_mul_1 -- Multiply a limb vector with a limb and +; HP-PA 2.0 64-bit __gmpn_mul_1 -- Multiply a limb vector with a limb and ; store the result in a second limb vector. ; Copyright (C) 1998, 1999 Free Software Foundation, Inc. @@ -43,8 +43,8 @@ #define t4 %r31 .level 2.0n .code - .export __mpn_mul_1 -__mpn_mul_1 + .export __gmpn_mul_1 +__gmpn_mul_1 .proc .callinfo frame=128,no_calls .entry diff --git a/mpn/pa64/rshift.s b/mpn/pa64/rshift.s index 5ff956b4c..904ba920a 100644 --- a/mpn/pa64/rshift.s +++ b/mpn/pa64/rshift.s @@ -1,4 +1,4 @@ -; HP-PA 2.0 __mpn_rshift -- +; HP-PA 2.0 __gmpn_rshift -- ; Copyright (C) 1997 Free Software Foundation, Inc. @@ -30,8 +30,8 @@ .level 2.0n .code - .export __mpn_rshift -__mpn_rshift + .export __gmpn_rshift +__gmpn_rshift .proc .callinfo frame=0,args_saved .entry diff --git a/mpn/pa64/sub_n.s b/mpn/pa64/sub_n.s index 23a0462d0..59babf342 100644 --- a/mpn/pa64/sub_n.s +++ b/mpn/pa64/sub_n.s @@ -1,4 +1,4 @@ -; HP-PA 2.0 __mpn_sub_n -- Subtract two limb vectors of the same length > 0 +; HP-PA 2.0 __gmpn_sub_n -- Subtract two limb vectors of the same length > 0 ; and store difference in a third limb vector. ; Copyright (C) 1997 Free Software Foundation, Inc. @@ -31,8 +31,8 @@ .level 2.0n .code - .export __mpn_sub_n -__mpn_sub_n + .export __gmpn_sub_n +__gmpn_sub_n .proc .callinfo frame=0,args_saved .entry diff --git a/mpn/pa64/submul_1.S b/mpn/pa64/submul_1.S index bf875eedc..2a9402f22 100644 --- a/mpn/pa64/submul_1.S +++ b/mpn/pa64/submul_1.S @@ -1,4 +1,4 @@ -; HP-PA 2.0 64-bit __mpn_submul_1 -- Multiply a limb vector with a limb and +; HP-PA 2.0 64-bit __gmpn_submul_1 -- Multiply a limb vector with a limb and ; subtract the result from a second limb vector. ; Copyright (C) 1998, 1999 Free Software Foundation, Inc. @@ -43,8 +43,8 @@ #define t4 %r31 .level 2.0n .code - .export __mpn_submul_1 -__mpn_submul_1 + .export __gmpn_submul_1 +__gmpn_submul_1 .proc .callinfo frame=128,no_calls .entry diff --git a/mpn/power/add_n.s b/mpn/power/add_n.s index c9ec184aa..bad45c6d9 100644 --- a/mpn/power/add_n.s +++ b/mpn/power/add_n.s @@ -1,4 +1,4 @@ -# IBM POWER __mpn_add_n -- Add two limb vectors of equal, non-zero length. +# IBM POWER __gmpn_add_n -- Add two limb vectors of equal, non-zero length. # Copyright (C) 1992, 1994, 1995, 1996, 1999 Free Software Foundation, Inc. @@ -27,14 +27,14 @@ # size r6 .toc - .globl __mpn_add_n - .globl .__mpn_add_n - .csect __mpn_add_n[DS] -__mpn_add_n: - .long .__mpn_add_n, TOC[tc0], 0 + .globl __gmpn_add_n + .globl .__gmpn_add_n + .csect __gmpn_add_n[DS] +__gmpn_add_n: + .long .__gmpn_add_n, TOC[tc0], 0 .csect .text[PR] .align 2 -.__mpn_add_n: +.__gmpn_add_n: andil. 10,6,1 # odd or even number of limbs? l 8,0(4) # load least significant s1 limb l 0,0(5) # load least significant s2 limb diff --git a/mpn/power/addmul_1.s b/mpn/power/addmul_1.s index 4a3945882..b7744c0a9 100644 --- a/mpn/power/addmul_1.s +++ b/mpn/power/addmul_1.s @@ -1,4 +1,4 @@ -# IBM POWER __mpn_addmul_1 -- Multiply a limb vector with a limb and add +# IBM POWER __gmpn_addmul_1 -- Multiply a limb vector with a limb and add # the result to a second limb vector. # Copyright (C) 1992, 1994, 1999 Free Software Foundation, Inc. @@ -38,14 +38,14 @@ # we can branch in zero cycles, so that's how we perform the additions. .toc - .globl __mpn_addmul_1 - .globl .__mpn_addmul_1 - .csect __mpn_addmul_1[DS] -__mpn_addmul_1: - .long .__mpn_addmul_1[PR], TOC[tc0], 0 + .globl __gmpn_addmul_1 + .globl .__gmpn_addmul_1 + .csect __gmpn_addmul_1[DS] +__gmpn_addmul_1: + .long .__gmpn_addmul_1[PR], TOC[tc0], 0 .csect .text[PR] .align 2 -.__mpn_addmul_1: +.__gmpn_addmul_1: cal 3,-4(3) l 0,0(4) diff --git a/mpn/power/lshift.s b/mpn/power/lshift.s index c11f6d283..b0b27cb48 100644 --- a/mpn/power/lshift.s +++ b/mpn/power/lshift.s @@ -1,4 +1,4 @@ -# IBM POWER __mpn_lshift -- +# IBM POWER __gmpn_lshift -- # Copyright (C) 1992, 1994, 1999 Free Software Foundation, Inc. @@ -27,14 +27,14 @@ # cnt r6 .toc - .globl __mpn_lshift - .globl .__mpn_lshift - .csect __mpn_lshift[DS] -__mpn_lshift: - .long .__mpn_lshift, TOC[tc0], 0 + .globl __gmpn_lshift + .globl .__gmpn_lshift + .csect __gmpn_lshift[DS] +__gmpn_lshift: + .long .__gmpn_lshift, TOC[tc0], 0 .csect .text[PR] .align 2 -.__mpn_lshift: +.__gmpn_lshift: sli 0,5,2 cax 9,3,0 cax 4,4,0 diff --git a/mpn/power/mul_1.s b/mpn/power/mul_1.s index 097a41278..827ead0f0 100644 --- a/mpn/power/mul_1.s +++ b/mpn/power/mul_1.s @@ -1,4 +1,4 @@ -# IBM POWER __mpn_mul_1 -- Multiply a limb vector with a limb and store +# IBM POWER __gmpn_mul_1 -- Multiply a limb vector with a limb and store # the result in a second limb vector. # Copyright (C) 1992, 1994, 1999 Free Software Foundation, Inc. @@ -38,14 +38,14 @@ # we can branch in zero cycles, so that's how we perform the additions. .toc - .globl __mpn_mul_1 - .globl .__mpn_mul_1 - .csect __mpn_mul_1[DS] -__mpn_mul_1: - .long .__mpn_mul_1[PR], TOC[tc0], 0 + .globl __gmpn_mul_1 + .globl .__gmpn_mul_1 + .csect __gmpn_mul_1[DS] +__gmpn_mul_1: + .long .__gmpn_mul_1[PR], TOC[tc0], 0 .csect .text[PR] .align 2 -.__mpn_mul_1: +.__gmpn_mul_1: cal 3,-4(3) l 0,0(4) diff --git a/mpn/power/rshift.s b/mpn/power/rshift.s index 42cf6b89b..0bd15bae2 100644 --- a/mpn/power/rshift.s +++ b/mpn/power/rshift.s @@ -1,4 +1,4 @@ -# IBM POWER __mpn_rshift -- +# IBM POWER __gmpn_rshift -- # Copyright (C) 1992, 1994, 1999 Free Software Foundation, Inc. @@ -27,14 +27,14 @@ # cnt r6 .toc - .globl __mpn_rshift - .globl .__mpn_rshift - .csect __mpn_rshift[DS] -__mpn_rshift: - .long .__mpn_rshift, TOC[tc0], 0 + .globl __gmpn_rshift + .globl .__gmpn_rshift + .csect __gmpn_rshift[DS] +__gmpn_rshift: + .long .__gmpn_rshift, TOC[tc0], 0 .csect .text[PR] .align 2 -.__mpn_rshift: +.__gmpn_rshift: sfi 8,6,32 mtctr 5 # put limb count in CTR loop register l 0,0(4) # read least significant limb diff --git a/mpn/power/sub_n.s b/mpn/power/sub_n.s index 3b5c8029f..6dfbb8877 100644 --- a/mpn/power/sub_n.s +++ b/mpn/power/sub_n.s @@ -1,4 +1,4 @@ -# IBM POWER __mpn_sub_n -- Subtract two limb vectors of equal, non-zero length. +# IBM POWER __gmpn_sub_n -- Subtract two limb vectors of equal, non-zero length. # Copyright (C) 1992, 1994, 1995, 1996, 1999 Free Software Foundation, Inc. @@ -27,14 +27,14 @@ # size r6 .toc - .globl __mpn_sub_n - .globl .__mpn_sub_n - .csect __mpn_sub_n[DS] -__mpn_sub_n: - .long .__mpn_sub_n, TOC[tc0], 0 + .globl __gmpn_sub_n + .globl .__gmpn_sub_n + .csect __gmpn_sub_n[DS] +__gmpn_sub_n: + .long .__gmpn_sub_n, TOC[tc0], 0 .csect .text[PR] .align 2 -.__mpn_sub_n: +.__gmpn_sub_n: andil. 10,6,1 # odd or even number of limbs? l 8,0(4) # load least significant s1 limb l 0,0(5) # load least significant s2 limb diff --git a/mpn/power/submul_1.s b/mpn/power/submul_1.s index e1f73dc71..26083e4be 100644 --- a/mpn/power/submul_1.s +++ b/mpn/power/submul_1.s @@ -1,4 +1,4 @@ -# IBM POWER __mpn_submul_1 -- Multiply a limb vector with a limb and subtract +# IBM POWER __gmpn_submul_1 -- Multiply a limb vector with a limb and subtract # the result from a second limb vector. # Copyright (C) 1992, 1994, 1999 Free Software Foundation, Inc. @@ -38,14 +38,14 @@ # we can branch in zero cycles, so that's how we perform the additions. .toc - .globl __mpn_submul_1 - .globl .__mpn_submul_1 - .csect __mpn_submul_1[DS] -__mpn_submul_1: - .long .__mpn_submul_1[PR], TOC[tc0], 0 - .csect .__mpn_submul_1[PR] + .globl __gmpn_submul_1 + .globl .__gmpn_submul_1 + .csect __gmpn_submul_1[DS] +__gmpn_submul_1: + .long .__gmpn_submul_1[PR], TOC[tc0], 0 + .csect .__gmpn_submul_1[PR] .align 2 -.__mpn_submul_1: +.__gmpn_submul_1: cal 3,-4(3) l 0,0(4) diff --git a/mpn/pyr/add_n.s b/mpn/pyr/add_n.s index 416c66020..bb3b92ae9 100644 --- a/mpn/pyr/add_n.s +++ b/mpn/pyr/add_n.s @@ -1,4 +1,4 @@ -# Pyramid __mpn_add_n -- Add two limb vectors of the same length > 0 and store +# Pyramid __gmpn_add_n -- Add two limb vectors of the same length > 0 and store # sum in a third limb vector. # Copyright (C) 1995 Free Software Foundation, Inc. @@ -22,8 +22,8 @@ .text .align 2 -.globl ___mpn_add_n -___mpn_add_n: +.globl ___gmpn_add_n +___gmpn_add_n: movw $-1,tr0 # representation for carry clear movw pr3,tr2 diff --git a/mpn/pyr/addmul_1.s b/mpn/pyr/addmul_1.s index a1495cac8..7eb372395 100644 --- a/mpn/pyr/addmul_1.s +++ b/mpn/pyr/addmul_1.s @@ -1,4 +1,4 @@ -# Pyramid __mpn_addmul_1 -- Multiply a limb vector with a limb and add +# Pyramid __gmpn_addmul_1 -- Multiply a limb vector with a limb and add # the result to a second limb vector. # Copyright (C) 1995 Free Software Foundation, Inc. @@ -22,8 +22,8 @@ .text .align 2 -.globl ___mpn_addmul_1 -___mpn_addmul_1: +.globl ___gmpn_addmul_1 +___gmpn_addmul_1: mova (pr0)[pr2*4],pr0 mova (pr1)[pr2*4],pr1 mnegw pr2,pr2 diff --git a/mpn/pyr/mul_1.s b/mpn/pyr/mul_1.s index e6b97910f..2c41bde46 100644 --- a/mpn/pyr/mul_1.s +++ b/mpn/pyr/mul_1.s @@ -1,4 +1,4 @@ -# Pyramid __mpn_mul_1 -- Multiply a limb vector with a limb and store +# Pyramid __gmpn_mul_1 -- Multiply a limb vector with a limb and store # the result in a second limb vector. # Copyright (C) 1995 Free Software Foundation, Inc. @@ -22,8 +22,8 @@ .text .align 2 -.globl ___mpn_mul_1 -___mpn_mul_1: +.globl ___gmpn_mul_1 +___gmpn_mul_1: mova (pr0)[pr2*4],pr0 mova (pr1)[pr2*4],pr1 mnegw pr2,pr2 diff --git a/mpn/pyr/sub_n.s b/mpn/pyr/sub_n.s index 5664859cf..e8e3c028d 100644 --- a/mpn/pyr/sub_n.s +++ b/mpn/pyr/sub_n.s @@ -1,4 +1,4 @@ -# Pyramid __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and +# Pyramid __gmpn_sub_n -- Subtract two limb vectors of the same length > 0 and # store difference in a third limb vector. # Copyright (C) 1995 Free Software Foundation, Inc. @@ -22,8 +22,8 @@ .text .align 2 -.globl ___mpn_sub_n -___mpn_sub_n: +.globl ___gmpn_sub_n +___gmpn_sub_n: movw $-1,tr0 # representation for carry clear movw pr3,tr2 diff --git a/mpn/tests/copy.c b/mpn/tests/copy.c index dc1487bfb..2f0898d4b 100644 --- a/mpn/tests/copy.c +++ b/mpn/tests/copy.c @@ -151,7 +151,7 @@ main (argc, argv) t0 = cputime(); for (i = 0; i < TIMES; i++) - __mpn_copy (dx+1, s1, size); + __gmpn_copy (dx+1, s1, size); t = cputime() - t0; printf ("mpn_copy: %ldms (%.2f cycles/limb)\n", t, ((double) t * CLOCK) / (OPS * 1000.0)); @@ -172,7 +172,7 @@ main (argc, argv) } refmpn_copy (dx+1, s1, size); - __mpn_copy (dy+1, s1, size); + __gmpn_copy (dy+1, s1, size); #ifdef PRINT mpn_print (dx+1, size); mpn_print (dy+1, size); diff --git a/mpn/thumb/add_n.s b/mpn/thumb/add_n.s index 0642060cf..61907ef47 100644 --- a/mpn/thumb/add_n.s +++ b/mpn/thumb/add_n.s @@ -1,4 +1,4 @@ -@ ARM/Thumb __mpn_add -- Add two limb vectors of the same length > 0 and store +@ ARM/Thumb __gmpn_add -- Add two limb vectors of the same length > 0 and store @ sum in a third limb vector. @ Copyright (C) 1997 Free Software Foundation, Inc. @@ -32,8 +32,8 @@ .text .thumb .align 0 - .global ___mpn_add_n -___mpn_add_n: + .global ___gmpn_add_n +___gmpn_add_n: push {r4, r5, r6, lr} mov r6, #1 @ init carry save register diff --git a/mpn/thumb/sub_n.s b/mpn/thumb/sub_n.s index ef2c90108..86ffe1ea8 100644 --- a/mpn/thumb/sub_n.s +++ b/mpn/thumb/sub_n.s @@ -1,4 +1,4 @@ -@ ARM/Thumb __mpn_sub -- Subtract two limb vectors of the same length > 0 and +@ ARM/Thumb __gmpn_sub -- Subtract two limb vectors of the same length > 0 and @ store difference in a third limb vector. @ Copyright (C) 1997 Free Software Foundation, Inc. @@ -32,8 +32,8 @@ .text .thumb .align 0 - .global ___mpn_sub_n -___mpn_sub_n: + .global ___gmpn_sub_n +___gmpn_sub_n: push {r4, r5, r6, lr} mov r6, #1 @ init carry save register diff --git a/mpn/vax/add_n.s b/mpn/vax/add_n.s index e24e1b6fc..e640413bd 100644 --- a/mpn/vax/add_n.s +++ b/mpn/vax/add_n.s @@ -1,4 +1,4 @@ -# VAX __mpn_add_n -- Add two limb vectors of the same length > 0 and store +# VAX __gmpn_add_n -- Add two limb vectors of the same length > 0 and store # sum in a third limb vector. # Copyright (C) 1999 Free Software Foundation, Inc. @@ -29,8 +29,8 @@ .text .align 1 -.globl ___mpn_add_n -___mpn_add_n: +.globl ___gmpn_add_n +___gmpn_add_n: .word 0x0 movl 16(ap),r0 movl 12(ap),r1 diff --git a/mpn/vax/addmul_1.s b/mpn/vax/addmul_1.s index 70cd0451c..38ff93d98 100644 --- a/mpn/vax/addmul_1.s +++ b/mpn/vax/addmul_1.s @@ -1,4 +1,4 @@ -# VAX __mpn_addmul_1 -- Multiply a limb vector with a limb and add +# VAX __gmpn_addmul_1 -- Multiply a limb vector with a limb and add # the result to a second limb vector. # Copyright (C) 1992, 1994, 1996 Free Software Foundation, Inc. @@ -29,8 +29,8 @@ .text .align 1 -.globl ___mpn_addmul_1 -___mpn_addmul_1: +.globl ___gmpn_addmul_1 +___gmpn_addmul_1: .word 0xfc0 movl 12(ap),r4 movl 8(ap),r8 diff --git a/mpn/vax/lshift.s b/mpn/vax/lshift.s index 7b9942df9..b870fc7c1 100644 --- a/mpn/vax/lshift.s +++ b/mpn/vax/lshift.s @@ -1,4 +1,4 @@ -# VAX __mpn_lshift -- left shift. +# VAX __gmpn_lshift -- left shift. # Copyright (C) 1999 Free Software Foundation, Inc. @@ -30,8 +30,8 @@ .text .align 1 -.globl ___mpn_lshift -___mpn_lshift: +.globl ___gmpn_lshift +___gmpn_lshift: .word 0x1c0 movl 4(ap),r7 movl 8(ap),r6 diff --git a/mpn/vax/mul_1.s b/mpn/vax/mul_1.s index 5a9b65b6e..b018e98a0 100644 --- a/mpn/vax/mul_1.s +++ b/mpn/vax/mul_1.s @@ -1,4 +1,4 @@ -# VAX __mpn_mul_1 -- Multiply a limb vector with a limb and store +# VAX __gmpn_mul_1 -- Multiply a limb vector with a limb and store # the result in a second limb vector. # Copyright (C) 1992, 1994, 1996 Free Software Foundation, Inc. @@ -29,8 +29,8 @@ .text .align 1 -.globl ___mpn_mul_1 -___mpn_mul_1: +.globl ___gmpn_mul_1 +___gmpn_mul_1: .word 0xfc0 movl 12(ap),r4 movl 8(ap),r8 diff --git a/mpn/vax/rshift.s b/mpn/vax/rshift.s index bb8c4fb2d..f11d8044b 100644 --- a/mpn/vax/rshift.s +++ b/mpn/vax/rshift.s @@ -1,4 +1,4 @@ -# VAX __mpn_rshift -- right shift. +# VAX __gmpn_rshift -- right shift. # Copyright (C) 1999 Free Software Foundation, Inc. @@ -30,8 +30,8 @@ .text .align 1 -.globl ___mpn_rshift -___mpn_rshift: +.globl ___gmpn_rshift +___gmpn_rshift: .word 0x1c0 movl 4(ap),r7 movl 8(ap),r6 diff --git a/mpn/vax/sub_n.s b/mpn/vax/sub_n.s index ed68b0f16..7c486c679 100644 --- a/mpn/vax/sub_n.s +++ b/mpn/vax/sub_n.s @@ -1,4 +1,4 @@ -# VAX __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and store +# VAX __gmpn_sub_n -- Subtract two limb vectors of the same length > 0 and store # difference in a third limb vector. # Copyright (C) 1999 Free Software Foundation, Inc. @@ -29,8 +29,8 @@ .text .align 1 -.globl ___mpn_sub_n -___mpn_sub_n: +.globl ___gmpn_sub_n +___gmpn_sub_n: .word 0x0 movl 16(ap),r0 movl 12(ap),r1 diff --git a/mpn/vax/submul_1.s b/mpn/vax/submul_1.s index fa39d11e4..765793517 100644 --- a/mpn/vax/submul_1.s +++ b/mpn/vax/submul_1.s @@ -1,4 +1,4 @@ -# VAX __mpn_submul_1 -- Multiply a limb vector with a limb and subtract +# VAX __gmpn_submul_1 -- Multiply a limb vector with a limb and subtract # the result from a second limb vector. # Copyright (C) 1992, 1994, 1996 Free Software Foundation, Inc. @@ -29,8 +29,8 @@ .text .align 1 -.globl ___mpn_submul_1 -___mpn_submul_1: +.globl ___gmpn_submul_1 +___gmpn_submul_1: .word 0xfc0 movl 12(ap),r4 movl 8(ap),r8 -- cgit v1.2.1