summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael B. Brutman <brutman@us.ibm.com>2010-10-24 21:47:33 -0400
committerLuis Machado <luisgpm@br.ibm.com>2010-11-01 00:48:37 -0200
commite938cde08cd24f40db4e6e421148ba32f660d2f9 (patch)
tree1c4b1c5c6fbd9e320828050904bfcf37df64c664
parentf17cdc83d319b9b6ce10c04944c7a3dd3e3b2ff6 (diff)
downloadglibc-e938cde08cd24f40db4e6e421148ba32f660d2f9.tar.gz
powerpc: PPCA2/A2 optimized memcpy function
(cherry picked from commit d0b9e94faf18f2585e7d0ae0b10daefb12f93059)
-rw-r--r--ChangeLog10
-rw-r--r--sysdeps/powerpc/dl-procinfo.c5
-rw-r--r--sysdeps/powerpc/dl-procinfo.h7
-rw-r--r--sysdeps/powerpc/powerpc32/a2/memcpy.S511
-rw-r--r--sysdeps/powerpc/powerpc64/a2/memcpy.S501
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/powerpc32/a2/Implies2
-rw-r--r--sysdeps/unix/sysv/linux/powerpc/powerpc64/a2/Implies2
7 files changed, 1035 insertions, 3 deletions
diff --git a/ChangeLog b/ChangeLog
index 42c8f96633..d0fd53ed07 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,13 @@
+2010-10-20 Michael B. Brutman <brutman@us.ibm.com>
+
+ * sysdeps/powerpc/dl-procinfo.c: Add support for ppca2 platform
+ * sysdeps/powerpc/dl-procinfo.h: Add support for ppca2 platform
+ * sysdeps/powerpc/powerpc32/a2/memcpy.S: New file.
+ * sysdeps/powerpc/powerpc64/a2/memcpy.S: Likewise.
+ * sysdeps/unix/sysv/linux/powerpc/powerpc32/a2/Implies: New
+ submachine.
+ * sysdeps/unix/sysv/linux/powerpc/powerpc64/a2/Implies: Likewise.
+
2010-05-01 Alan Modra <amodra@gmail.com>
* sysdeps/powerpc/powerpc32/power4/memcmp.S: Correct cfi for r24.
diff --git a/sysdeps/powerpc/dl-procinfo.c b/sysdeps/powerpc/dl-procinfo.c
index 1c74c2a905..2ca76d8b70 100644
--- a/sysdeps/powerpc/dl-procinfo.c
+++ b/sysdeps/powerpc/dl-procinfo.c
@@ -68,7 +68,7 @@ PROCINFO_CLASS const char _dl_powerpc_cap_flags[25][10]
#if !defined PROCINFO_DECL && defined SHARED
._dl_powerpc_platforms
#else
-PROCINFO_CLASS const char _dl_powerpc_platforms[8][12]
+PROCINFO_CLASS const char _dl_powerpc_platforms[9][12]
#endif
#ifndef PROCINFO_DECL
= {
@@ -79,7 +79,8 @@ PROCINFO_CLASS const char _dl_powerpc_platforms[8][12]
[PPC_PLATFORM_POWER6] = "power6",
[PPC_PLATFORM_CELL_BE] = "ppc-cell-be",
[PPC_PLATFORM_POWER6X] = "power6x",
- [PPC_PLATFORM_POWER7] = "power7"
+ [PPC_PLATFORM_POWER7] = "power7",
+ [PPC_PLATFORM_PPCA2] = "ppca2"
}
#endif
#if !defined SHARED || defined PROCINFO_DECL
diff --git a/sysdeps/powerpc/dl-procinfo.h b/sysdeps/powerpc/dl-procinfo.h
index 254195a94c..2ae35644a8 100644
--- a/sysdeps/powerpc/dl-procinfo.h
+++ b/sysdeps/powerpc/dl-procinfo.h
@@ -31,7 +31,7 @@
#define HWCAP_IMPORTANT (PPC_FEATURE_HAS_ALTIVEC \
+ PPC_FEATURE_HAS_DFP)
-#define _DL_PLATFORMS_COUNT 8
+#define _DL_PLATFORMS_COUNT 9
#define _DL_FIRST_PLATFORM 32
/* Mask to filter out platforms. */
@@ -47,6 +47,7 @@
#define PPC_PLATFORM_CELL_BE 5
#define PPC_PLATFORM_POWER6X 6
#define PPC_PLATFORM_POWER7 7
+#define PPC_PLATFORM_PPCA2 8
static inline const char *
__attribute__ ((unused))
@@ -123,6 +124,10 @@ _dl_string_platform (const char *str)
GLRO(dl_powerpc_platforms)[PPC_PLATFORM_CELL_BE] + 3)
== 0)
return _DL_FIRST_PLATFORM + PPC_PLATFORM_CELL_BE;
+ else if (strcmp (str + 3,
+ GLRO(dl_powerpc_platforms)[PPC_PLATFORM_PPCA2] + 3)
+ == 0)
+ return _DL_FIRST_PLATFORM + PPC_PLATFORM_PPCA2;
}
return -1;
diff --git a/sysdeps/powerpc/powerpc32/a2/memcpy.S b/sysdeps/powerpc/powerpc32/a2/memcpy.S
new file mode 100644
index 0000000000..472f7a393b
--- /dev/null
+++ b/sysdeps/powerpc/powerpc32/a2/memcpy.S
@@ -0,0 +1,511 @@
+/* Optimized memcpy implementation for PowerPC A2.
+ Copyright (C) 2010 Free Software Foundation, Inc.
+ Contributed by Michael Brutman <brutman@us.ibm.com>.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+#define PREFETCH_AHEAD 4 /* no cache lines SRC prefetching ahead */
+#define ZERO_AHEAD 2 /* no cache lines DST zeroing ahead */
+
+ .machine a2
+EALIGN (BP_SYM (memcpy), 5, 0)
+ CALL_MCOUNT
+
+ dcbt 0,r4 /* Prefetch ONE SRC cacheline */
+ cmplwi cr1,r5,16 /* is size < 16 ? */
+ mr r6,r3 /* Copy dest reg to r6; */
+ blt+ cr1,L(shortcopy)
+
+
+ /* Big copy (16 bytes or more)
+
+ Figure out how far to the nearest quadword boundary, or if we are
+ on one already.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ */
+
+ neg r8,r3 /* LS 4 bits = # bytes to 8-byte dest bdry */
+ clrlwi r8,r8,32-4 /* align to 16byte boundary */
+ sub r7,r4,r3 /* compute offset to src from dest */
+ cmplwi cr0,r8,0 /* Were we aligned on a 16 byte bdy? */
+ beq+ L(dst_aligned)
+
+
+
+ /* Destination is not aligned on quadword boundary. Get us to one.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ r7 - offset to src from dest
+ r8 - number of bytes to quadword boundary
+ */
+
+ mtcrf 0x01,r8 /* put #bytes to boundary into cr7 */
+ subf r5,r8,r5 /* adjust remaining len */
+
+ bf cr7*4+3,1f
+ lbzx r0,r7,r6 /* copy 1 byte addr */
+ stb r0,0(r6)
+ addi r6,r6,1
+1:
+ bf cr7*4+2,2f
+ lhzx r0,r7,r6 /* copy 2 byte addr */
+ sth r0,0(r6)
+ addi r6,r6,2
+2:
+ bf cr7*4+1,4f
+ lwzx r0,r7,r6 /* copy 4 byte addr */
+ stw r0,0(r6)
+ addi r6,r6,4
+4:
+ bf cr7*4+0,8f
+ lfdx r0,r7,r6 /* copy 8 byte addr */
+ stfd r0,0(r6)
+ addi r6,r6,8
+8:
+ add r4,r7,r6 /* update src addr */
+
+
+
+ /* Dest is quadword aligned now.
+
+ Lots of decisions to make. If we are copying less than a cache
+ line we won't be here long. If we are not on a cache line
+ boundary we need to get there. And then we need to figure out
+ how many cache lines ahead to pre-touch.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ */
+
+
+ .align 4
+L(dst_aligned):
+
+
+#ifdef SHARED
+ mflr r0
+/* Establishes GOT addressability so we can load __cache_line_size
+ from static. This value was set from the aux vector during startup. */
+ bcl 20,31,1f
+1:
+ mflr r9
+ addis r9,r9,__cache_line_size-1b@ha
+ lwz r9,__cache_line_size-1b@l(r9)
+ mtlr r0
+#else
+/* Load __cache_line_size from static. This value was set from the
+ aux vector during startup. */
+ lis r9,__cache_line_size@ha
+ lwz r9,__cache_line_size@l(r9)
+#endif
+
+ cmplwi cr5, r9, 0
+ bne+ cr5,L(cachelineset)
+ li r9,64
+
+
+
+L(cachelineset):
+
+ addi r10,r9,-1
+
+ cmpw cr5,r5,r10 /* Less than a cacheline to go? */
+
+ neg r7,r6 /* How far to next cacheline bdy? */
+
+ addi r6,r6,-8 /* prepare for stdu */
+ cmpwi cr0,r9,128
+ addi r4,r4,-8 /* prepare for ldu */
+
+
+ ble+ cr5,L(lessthancacheline)
+
+ beq- cr0,L(big_lines) /* 128 byte line code */
+
+
+
+
+ /* More than a cacheline left to go, and using 64 byte cachelines */
+
+ clrlwi r7,r7,32-6 /* How far to next cacheline bdy? */
+
+ cmplwi cr6,r7,0 /* Are we on a cacheline bdy already? */
+
+ /* Reduce total len by what it takes to get to the next cache line */
+ subf r5,r7,r5
+ srwi r7,r7,4 /* How many qws to get to the line bdy? */
+
+ /* How many full cache lines to copy after getting to a line bdy? */
+ srwi r10,r5,6
+
+ cmplwi r10,0 /* If no full cache lines to copy ... */
+ li r11,0 /* number cachelines to copy with prefetch */
+ beq L(nocacheprefetch)
+
+
+ /* We are here because we have at least one full cache line to copy,
+ and therefore some pre-touching to do. */
+
+ cmplwi r10,PREFETCH_AHEAD
+ li r12,64+8 /* prefetch distance */
+ ble L(lessthanmaxprefetch)
+
+ /* We can only do so much pre-fetching. R11 will have the count of
+ lines left to prefetch after the initial batch of prefetches
+ are executed. */
+
+ subi r11,r10,PREFETCH_AHEAD
+ li r10,PREFETCH_AHEAD
+
+L(lessthanmaxprefetch):
+ mtctr r10
+
+ /* At this point r10/ctr hold the number of lines to prefetch in this
+ initial batch, and r11 holds any remainder. */
+
+L(prefetchSRC):
+ dcbt r12,r4
+ addi r12,r12,64
+ bdnz L(prefetchSRC)
+
+
+ /* Prefetching is done, or was not needed.
+
+ cr6 - are we on a cacheline boundary already?
+ r7 - number of quadwords to the next cacheline boundary
+ */
+
+L(nocacheprefetch):
+ mtctr r7
+
+ cmplwi cr1,r5,64 /* Less than a cache line to copy? */
+
+ /* How many bytes are left after we copy whatever full
+ cache lines we can get? */
+ clrlwi r5,r5,32-6
+
+ beq cr6,L(cachelinealigned)
+
+
+ /* Copy quadwords up to the next cacheline boundary */
+
+L(aligntocacheline):
+ lfd fp9,0x08(r4)
+ lfdu fp10,0x10(r4)
+ stfd fp9,0x08(r6)
+ stfdu fp10,0x10(r6)
+ bdnz L(aligntocacheline)
+
+
+ .align 4
+L(cachelinealigned): /* copy while cache lines */
+
+ blt- cr1,L(lessthancacheline) /* size <64 */
+
+L(outerloop):
+ cmpwi r11,0
+ mtctr r11
+ beq- L(endloop)
+
+ li r11,64*ZERO_AHEAD +8 /* DCBZ dist */
+
+ .align 4
+ /* Copy whole cachelines, optimized by prefetching SRC cacheline */
+L(loop): /* Copy aligned body */
+ dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
+ lfd fp9, 0x08(r4)
+ dcbz r11,r6
+ lfd fp10, 0x10(r4)
+ lfd fp11, 0x18(r4)
+ lfd fp12, 0x20(r4)
+ stfd fp9, 0x08(r6)
+ stfd fp10, 0x10(r6)
+ stfd fp11, 0x18(r6)
+ stfd fp12, 0x20(r6)
+ lfd fp9, 0x28(r4)
+ lfd fp10, 0x30(r4)
+ lfd fp11, 0x38(r4)
+ lfdu fp12, 0x40(r4)
+ stfd fp9, 0x28(r6)
+ stfd fp10, 0x30(r6)
+ stfd fp11, 0x38(r6)
+ stfdu fp12, 0x40(r6)
+
+ bdnz L(loop)
+
+
+L(endloop):
+ cmpwi r10,0
+ beq- L(endloop2)
+ mtctr r10
+
+L(loop2): /* Copy aligned body */
+ lfd fp9, 0x08(r4)
+ lfd fp10, 0x10(r4)
+ lfd fp11, 0x18(r4)
+ lfd fp12, 0x20(r4)
+ stfd fp9, 0x08(r6)
+ stfd fp10, 0x10(r6)
+ stfd fp11, 0x18(r6)
+ stfd fp12, 0x20(r6)
+ lfd fp9, 0x28(r4)
+ lfd fp10, 0x30(r4)
+ lfd fp11, 0x38(r4)
+ lfdu fp12, 0x40(r4)
+ stfd fp9, 0x28(r6)
+ stfd fp10, 0x30(r6)
+ stfd fp11, 0x38(r6)
+ stfdu fp12, 0x40(r6)
+
+ bdnz L(loop2)
+L(endloop2):
+
+
+ .align 4
+L(lessthancacheline): /* Was there less than cache to do ? */
+ cmplwi cr0,r5,16
+ srwi r7,r5,4 /* divide size by 16 */
+ blt- L(do_lt16)
+ mtctr r7
+
+L(copy_remaining):
+ lfd fp9, 0x08(r4)
+ lfdu fp10, 0x10(r4)
+ stfd fp9, 0x08(r6)
+ stfdu fp10, 0x10(r6)
+ bdnz L(copy_remaining)
+
+L(do_lt16): /* less than 16 ? */
+ cmplwi cr0,r5,0 /* copy remaining bytes (0-15) */
+ beqlr+ /* no rest to copy */
+ addi r4,r4,8
+ addi r6,r6,8
+
+L(shortcopy): /* SIMPLE COPY to handle size =< 15 bytes */
+ mtcrf 0x01,r5
+ sub r7,r4,r6
+ bf- cr7*4+0,8f
+ lfdx fp9,r7,r6 /* copy 8 byte */
+ stfd fp9,0(r6)
+ addi r6,r6,8
+8:
+ bf cr7*4+1,4f
+ lwzx r0,r7,r6 /* copy 4 byte */
+ stw r0,0(r6)
+ addi r6,r6,4
+4:
+ bf cr7*4+2,2f
+ lhzx r0,r7,r6 /* copy 2 byte */
+ sth r0,0(r6)
+ addi r6,r6,2
+2:
+ bf cr7*4+3,1f
+ lbzx r0,r7,r6 /* copy 1 byte */
+ stb r0,0(r6)
+1:
+ blr
+
+
+
+
+
+ /* Similar to above, but for use with 128 byte lines. */
+
+
+L(big_lines):
+
+ clrlwi r7,r7,32-7 /* How far to next cacheline bdy? */
+
+ cmplwi cr6,r7,0 /* Are we on a cacheline bdy already? */
+
+ /* Reduce total len by what it takes to get to the next cache line */
+ subf r5,r7,r5
+ srwi r7,r7,4 /* How many qw to get to the line bdy? */
+
+ /* How many full cache lines to copy after getting to a line bdy? */
+ srwi r10,r5,7
+
+ cmplwi r10,0 /* If no full cache lines to copy ... */
+ li r11,0 /* number cachelines to copy with prefetch */
+ beq L(nocacheprefetch_128)
+
+
+ /* We are here because we have at least one full cache line to copy,
+ and therefore some pre-touching to do. */
+
+ cmplwi r10,PREFETCH_AHEAD
+ li r12,128+8 /* prefetch distance */
+ ble L(lessthanmaxprefetch_128)
+
+ /* We can only do so much pre-fetching. R11 will have the count of
+ lines left to prefetch after the initial batch of prefetches
+ are executed. */
+
+ subi r11,r10,PREFETCH_AHEAD
+ li r10,PREFETCH_AHEAD
+
+L(lessthanmaxprefetch_128):
+ mtctr r10
+
+ /* At this point r10/ctr hold the number of lines to prefetch in this
+ initial batch, and r11 holds any remainder. */
+
+L(prefetchSRC_128):
+ dcbt r12,r4
+ addi r12,r12,128
+ bdnz L(prefetchSRC_128)
+
+
+ /* Prefetching is done, or was not needed.
+
+ cr6 - are we on a cacheline boundary already?
+ r7 - number of quadwords to the next cacheline boundary
+ */
+
+L(nocacheprefetch_128):
+ mtctr r7
+
+ cmplwi cr1,r5,128 /* Less than a cache line to copy? */
+
+ /* How many bytes are left after we copy whatever full
+ cache lines we can get? */
+ clrlwi r5,r5,32-7
+
+ beq cr6,L(cachelinealigned_128)
+
+
+ /* Copy quadwords up to the next cacheline boundary */
+
+L(aligntocacheline_128):
+ lfd fp9,0x08(r4)
+ lfdu fp10,0x10(r4)
+ stfd fp9,0x08(r6)
+ stfdu fp10,0x10(r6)
+ bdnz L(aligntocacheline_128)
+
+
+L(cachelinealigned_128): /* copy while cache lines */
+
+ blt- cr1,L(lessthancacheline) /* size <128 */
+
+L(outerloop_128):
+ cmpwi r11,0
+ mtctr r11
+ beq- L(endloop_128)
+
+ li r11,128*ZERO_AHEAD +8 /* DCBZ dist */
+
+ .align 4
+ /* Copy whole cachelines, optimized by prefetching SRC cacheline */
+L(loop_128): /* Copy aligned body */
+ dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
+ lfd fp9, 0x08(r4)
+ dcbz r11,r6
+ lfd fp10, 0x10(r4)
+ lfd fp11, 0x18(r4)
+ lfd fp12, 0x20(r4)
+ stfd fp9, 0x08(r6)
+ stfd fp10, 0x10(r6)
+ stfd fp11, 0x18(r6)
+ stfd fp12, 0x20(r6)
+ lfd fp9, 0x28(r4)
+ lfd fp10, 0x30(r4)
+ lfd fp11, 0x38(r4)
+ lfd fp12, 0x40(r4)
+ stfd fp9, 0x28(r6)
+ stfd fp10, 0x30(r6)
+ stfd fp11, 0x38(r6)
+ stfd fp12, 0x40(r6)
+ lfd fp9, 0x48(r4)
+ lfd fp10, 0x50(r4)
+ lfd fp11, 0x58(r4)
+ lfd fp12, 0x60(r4)
+ stfd fp9, 0x48(r6)
+ stfd fp10, 0x50(r6)
+ stfd fp11, 0x58(r6)
+ stfd fp12, 0x60(r6)
+ lfd fp9, 0x68(r4)
+ lfd fp10, 0x70(r4)
+ lfd fp11, 0x78(r4)
+ lfdu fp12, 0x80(r4)
+ stfd fp9, 0x68(r6)
+ stfd fp10, 0x70(r6)
+ stfd fp11, 0x78(r6)
+ stfdu fp12, 0x80(r6)
+
+ bdnz L(loop_128)
+
+
+L(endloop_128):
+ cmpwi r10,0
+ beq- L(endloop2_128)
+ mtctr r10
+
+L(loop2_128): /* Copy aligned body */
+ lfd fp9, 0x08(r4)
+ lfd fp10, 0x10(r4)
+ lfd fp11, 0x18(r4)
+ lfd fp12, 0x20(r4)
+ stfd fp9, 0x08(r6)
+ stfd fp10, 0x10(r6)
+ stfd fp11, 0x18(r6)
+ stfd fp12, 0x20(r6)
+ lfd fp9, 0x28(r4)
+ lfd fp10, 0x30(r4)
+ lfd fp11, 0x38(r4)
+ lfd fp12, 0x40(r4)
+ stfd fp9, 0x28(r6)
+ stfd fp10, 0x30(r6)
+ stfd fp11, 0x38(r6)
+ stfd fp12, 0x40(r6)
+ lfd fp9, 0x48(r4)
+ lfd fp10, 0x50(r4)
+ lfd fp11, 0x58(r4)
+ lfd fp12, 0x60(r4)
+ stfd fp9, 0x48(r6)
+ stfd fp10, 0x50(r6)
+ stfd fp11, 0x58(r6)
+ stfd fp12, 0x60(r6)
+ lfd fp9, 0x68(r4)
+ lfd fp10, 0x70(r4)
+ lfd fp11, 0x78(r4)
+ lfdu fp12, 0x80(r4)
+ stfd fp9, 0x68(r6)
+ stfd fp10, 0x70(r6)
+ stfd fp11, 0x78(r6)
+ stfdu fp12, 0x80(r6)
+ bdnz L(loop2_128)
+L(endloop2_128):
+
+ b L(lessthancacheline)
+
+
+END (BP_SYM (memcpy))
+libc_hidden_builtin_def (memcpy)
diff --git a/sysdeps/powerpc/powerpc64/a2/memcpy.S b/sysdeps/powerpc/powerpc64/a2/memcpy.S
new file mode 100644
index 0000000000..ac95171aa8
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/a2/memcpy.S
@@ -0,0 +1,501 @@
+/* Optimized memcpy implementation for PowerPC A2.
+ Copyright (C) 2010 Free Software Foundation, Inc.
+ Contributed by Michael Brutman <brutman@us.ibm.com>.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+#define PREFETCH_AHEAD 4 /* no cache lines SRC prefetching ahead */
+#define ZERO_AHEAD 2 /* no cache lines DST zeroing ahead */
+
+ .section ".toc","aw"
+.LC0:
+ .tc __cache_line_size[TC],__cache_line_size
+ .section ".text"
+ .align 2
+
+
+ .machine a2
+EALIGN (BP_SYM (memcpy), 5, 0)
+ CALL_MCOUNT 3
+
+ dcbt 0,r4 /* Prefetch ONE SRC cacheline */
+ cmpldi cr1,r5,16 /* is size < 16 ? */
+ mr r6,r3 /* Copy dest reg to r6; */
+ blt+ cr1,L(shortcopy)
+
+
+ /* Big copy (16 bytes or more)
+
+ Figure out how far to the nearest quadword boundary, or if we are
+ on one already. Also get the cache line size.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ */
+
+ neg r8,r3 /* LS 4 bits = # bytes to 8-byte dest bdry */
+ ld r9,.LC0@toc(r2) /* Get cache line size (part 1) */
+ clrldi r8,r8,64-4 /* align to 16byte boundary */
+ sub r7,r4,r3 /* compute offset to src from dest */
+ lwz r9,0(r9) /* Get cache line size (part 2) */
+ cmpldi cr0,r8,0 /* Were we aligned on a 16 byte bdy? */
+ addi r10,r9,-1 /* Cache line mask */
+ beq+ L(dst_aligned)
+
+
+
+ /* Destination is not aligned on quadword boundary. Get us to one.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ r7 - offset to src from dest
+ r8 - number of bytes to quadword boundary
+ */
+
+ mtcrf 0x01,r8 /* put #bytes to boundary into cr7 */
+ subf r5,r8,r5 /* adjust remaining len */
+
+ bf cr7*4+3,1f
+ lbzx r0,r7,r6 /* copy 1 byte addr */
+ stb r0,0(r6)
+ addi r6,r6,1
+1:
+ bf cr7*4+2,2f
+ lhzx r0,r7,r6 /* copy 2 byte addr */
+ sth r0,0(r6)
+ addi r6,r6,2
+2:
+ bf cr7*4+1,4f
+ lwzx r0,r7,r6 /* copy 4 byte addr */
+ stw r0,0(r6)
+ addi r6,r6,4
+4:
+ bf cr7*4+0,8f
+ ldx r0,r7,r6 /* copy 8 byte addr */
+ std r0,0(r6)
+ addi r6,r6,8
+8:
+ add r4,r7,r6 /* update src addr */
+
+
+
+ /* Dest is quadword aligned now.
+
+ Lots of decisions to make. If we are copying less than a cache
+ line we won't be here long. If we are not on a cache line
+ boundary we need to get there. And then we need to figure out
+ how many cache lines ahead to pre-touch.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ */
+
+
+ .align 4
+L(dst_aligned):
+
+
+ cmpd cr5,r5,r10 /* Less than a cacheline to go? */
+
+ neg r7,r6 /* How far to next cacheline bdy? */
+
+ addi r6,r6,-8 /* prepare for stdu */
+ cmpdi cr0,r9,128
+ addi r4,r4,-8 /* prepare for ldu */
+
+
+ ble+ cr5,L(lessthancacheline)
+
+ beq- cr0,L(big_lines) /* 128 byte line code */
+
+
+
+ /* More than a cacheline left to go, and using 64 byte cachelines */
+
+ clrldi r7,r7,64-6 /* How far to next cacheline bdy? */
+
+ cmpldi cr6,r7,0 /* Are we on a cacheline bdy already? */
+
+ /* Reduce total len by what it takes to get to the next cache line */
+ subf r5,r7,r5
+ srdi r7,r7,4 /* How many qws to get to the line bdy? */
+
+ /* How many full cache lines to copy after getting to a line bdy? */
+ srdi r10,r5,6
+
+ cmpldi r10,0 /* If no full cache lines to copy ... */
+ li r11,0 /* number cachelines to copy with prefetch */
+ beq L(nocacheprefetch)
+
+
+ /* We are here because we have at least one full cache line to copy,
+ and therefore some pre-touching to do. */
+
+ cmpldi r10,PREFETCH_AHEAD
+ li r12,64+8 /* prefetch distance */
+ ble L(lessthanmaxprefetch)
+
+ /* We can only do so much pre-fetching. R11 will have the count of
+ lines left to prefetch after the initial batch of prefetches
+ are executed. */
+
+ subi r11,r10,PREFETCH_AHEAD
+ li r10,PREFETCH_AHEAD
+
+L(lessthanmaxprefetch):
+ mtctr r10
+
+ /* At this point r10/ctr hold the number of lines to prefetch in this
+ initial batch, and r11 holds any remainder. */
+
+L(prefetchSRC):
+ dcbt r12,r4
+ addi r12,r12,64
+ bdnz L(prefetchSRC)
+
+
+ /* Prefetching is done, or was not needed.
+
+ cr6 - are we on a cacheline boundary already?
+ r7 - number of quadwords to the next cacheline boundary
+ */
+
+L(nocacheprefetch):
+ mtctr r7
+
+ cmpldi cr1,r5,64 /* Less than a cache line to copy? */
+
+ /* How many bytes are left after we copy whatever full
+ cache lines we can get? */
+ clrldi r5,r5,64-6
+
+ beq cr6,L(cachelinealigned)
+
+
+ /* Copy quadwords up to the next cacheline boundary */
+
+L(aligntocacheline):
+ ld r9,0x08(r4)
+ ld r7,0x10(r4)
+ addi r4,r4,0x10
+ std r9,0x08(r6)
+ stdu r7,0x10(r6)
+ bdnz L(aligntocacheline)
+
+
+ .align 4
+L(cachelinealigned): /* copy while cache lines */
+
+ blt- cr1,L(lessthancacheline) /* size <64 */
+
+L(outerloop):
+ cmpdi r11,0
+ mtctr r11
+ beq- L(endloop)
+
+ li r11,64*ZERO_AHEAD +8 /* DCBZ dist */
+
+ .align 4
+ /* Copy whole cachelines, optimized by prefetching SRC cacheline */
+L(loop): /* Copy aligned body */
+ dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
+ ld r9, 0x08(r4)
+ dcbz r11,r6
+ ld r7, 0x10(r4)
+ ld r8, 0x18(r4)
+ ld r0, 0x20(r4)
+ std r9, 0x08(r6)
+ std r7, 0x10(r6)
+ std r8, 0x18(r6)
+ std r0, 0x20(r6)
+ ld r9, 0x28(r4)
+ ld r7, 0x30(r4)
+ ld r8, 0x38(r4)
+ ld r0, 0x40(r4)
+ addi r4, r4,0x40
+ std r9, 0x28(r6)
+ std r7, 0x30(r6)
+ std r8, 0x38(r6)
+ stdu r0, 0x40(r6)
+
+ bdnz L(loop)
+
+
+L(endloop):
+ cmpdi r10,0
+ beq- L(endloop2)
+ mtctr r10
+
+L(loop2): /* Copy aligned body */
+ ld r9, 0x08(r4)
+ ld r7, 0x10(r4)
+ ld r8, 0x18(r4)
+ ld r0, 0x20(r4)
+ std r9, 0x08(r6)
+ std r7, 0x10(r6)
+ std r8, 0x18(r6)
+ std r0, 0x20(r6)
+ ld r9, 0x28(r4)
+ ld r7, 0x30(r4)
+ ld r8, 0x38(r4)
+ ld r0, 0x40(r4)
+ addi r4, r4,0x40
+ std r9, 0x28(r6)
+ std r7, 0x30(r6)
+ std r8, 0x38(r6)
+ stdu r0, 0x40(r6)
+
+ bdnz L(loop2)
+L(endloop2):
+
+
+ .align 4
+L(lessthancacheline): /* Was there less than cache to do ? */
+ cmpldi cr0,r5,16
+ srdi r7,r5,4 /* divide size by 16 */
+ blt- L(do_lt16)
+ mtctr r7
+
+L(copy_remaining):
+ ld r8,0x08(r4)
+ ld r7,0x10(r4)
+ addi r4,r4,0x10
+ std r8,0x08(r6)
+ stdu r7,0x10(r6)
+ bdnz L(copy_remaining)
+
+L(do_lt16): /* less than 16 ? */
+ cmpldi cr0,r5,0 /* copy remaining bytes (0-15) */
+ beqlr+ /* no rest to copy */
+ addi r4,r4,8
+ addi r6,r6,8
+
+L(shortcopy): /* SIMPLE COPY to handle size =< 15 bytes */
+ mtcrf 0x01,r5
+ sub r7,r4,r6
+ bf- cr7*4+0,8f
+ ldx r0,r7,r6 /* copy 8 byte */
+ std r0,0(r6)
+ addi r6,r6,8
+8:
+ bf cr7*4+1,4f
+ lwzx r0,r7,r6 /* copy 4 byte */
+ stw r0,0(r6)
+ addi r6,r6,4
+4:
+ bf cr7*4+2,2f
+ lhzx r0,r7,r6 /* copy 2 byte */
+ sth r0,0(r6)
+ addi r6,r6,2
+2:
+ bf cr7*4+3,1f
+ lbzx r0,r7,r6 /* copy 1 byte */
+ stb r0,0(r6)
+1:
+ blr
+
+
+
+
+
+ /* Similar to above, but for use with 128 byte lines. */
+
+
+L(big_lines):
+
+ clrldi r7,r7,64-7 /* How far to next cacheline bdy? */
+
+ cmpldi cr6,r7,0 /* Are we on a cacheline bdy already? */
+
+ /* Reduce total len by what it takes to get to the next cache line */
+ subf r5,r7,r5
+ srdi r7,r7,4 /* How many qws to get to the line bdy? */
+
+ /* How many full cache lines to copy after getting to a line bdy? */
+ srdi r10,r5,7
+
+ cmpldi r10,0 /* If no full cache lines to copy ... */
+ li r11,0 /* number cachelines to copy with prefetch */
+ beq L(nocacheprefetch_128)
+
+
+ /* We are here because we have at least one full cache line to copy,
+ and therefore some pre-touching to do. */
+
+ cmpldi r10,PREFETCH_AHEAD
+ li r12,128+8 /* prefetch distance */
+ ble L(lessthanmaxprefetch_128)
+
+ /* We can only do so much pre-fetching. R11 will have the count of
+ lines left to prefetch after the initial batch of prefetches
+ are executed. */
+
+ subi r11,r10,PREFETCH_AHEAD
+ li r10,PREFETCH_AHEAD
+
+L(lessthanmaxprefetch_128):
+ mtctr r10
+
+ /* At this point r10/ctr hold the number of lines to prefetch in this
+ initial batch, and r11 holds any remainder. */
+
+L(prefetchSRC_128):
+ dcbt r12,r4
+ addi r12,r12,128
+ bdnz L(prefetchSRC_128)
+
+
+ /* Prefetching is done, or was not needed.
+
+ cr6 - are we on a cacheline boundary already?
+ r7 - number of quadwords to the next cacheline boundary
+ */
+
+L(nocacheprefetch_128):
+ mtctr r7
+
+ cmpldi cr1,r5,128 /* Less than a cache line to copy? */
+
+ /* How many bytes are left after we copy whatever full
+ cache lines we can get? */
+ clrldi r5,r5,64-7
+
+ beq cr6,L(cachelinealigned_128)
+
+
+ /* Copy quadwords up to the next cacheline boundary */
+
+L(aligntocacheline_128):
+ ld r9,0x08(r4)
+ ld r7,0x10(r4)
+ addi r4,r4,0x10
+ std r9,0x08(r6)
+ stdu r7,0x10(r6)
+ bdnz L(aligntocacheline_128)
+
+
+L(cachelinealigned_128): /* copy while cache lines */
+
+ blt- cr1,L(lessthancacheline) /* size <128 */
+
+L(outerloop_128):
+ cmpdi r11,0
+ mtctr r11
+ beq- L(endloop_128)
+
+ li r11,128*ZERO_AHEAD +8 /* DCBZ dist */
+
+ .align 4
+ /* Copy whole cachelines, optimized by prefetching SRC cacheline */
+L(loop_128): /* Copy aligned body */
+ dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
+ ld r9, 0x08(r4)
+ dcbz r11,r6
+ ld r7, 0x10(r4)
+ ld r8, 0x18(r4)
+ ld r0, 0x20(r4)
+ std r9, 0x08(r6)
+ std r7, 0x10(r6)
+ std r8, 0x18(r6)
+ std r0, 0x20(r6)
+ ld r9, 0x28(r4)
+ ld r7, 0x30(r4)
+ ld r8, 0x38(r4)
+ ld r0, 0x40(r4)
+ std r9, 0x28(r6)
+ std r7, 0x30(r6)
+ std r8, 0x38(r6)
+ std r0, 0x40(r6)
+ ld r9, 0x48(r4)
+ ld r7, 0x50(r4)
+ ld r8, 0x58(r4)
+ ld r0, 0x60(r4)
+ std r9, 0x48(r6)
+ std r7, 0x50(r6)
+ std r8, 0x58(r6)
+ std r0, 0x60(r6)
+ ld r9, 0x68(r4)
+ ld r7, 0x70(r4)
+ ld r8, 0x78(r4)
+ ld r0, 0x80(r4)
+ addi r4, r4,0x80
+ std r9, 0x68(r6)
+ std r7, 0x70(r6)
+ std r8, 0x78(r6)
+ stdu r0, 0x80(r6)
+
+ bdnz L(loop_128)
+
+
+L(endloop_128):
+ cmpdi r10,0
+ beq- L(endloop2_128)
+ mtctr r10
+
+L(loop2_128): /* Copy aligned body */
+ ld r9, 0x08(r4)
+ ld r7, 0x10(r4)
+ ld r8, 0x18(r4)
+ ld r0, 0x20(r4)
+ std r9, 0x08(r6)
+ std r7, 0x10(r6)
+ std r8, 0x18(r6)
+ std r0, 0x20(r6)
+ ld r9, 0x28(r4)
+ ld r7, 0x30(r4)
+ ld r8, 0x38(r4)
+ ld r0, 0x40(r4)
+ std r9, 0x28(r6)
+ std r7, 0x30(r6)
+ std r8, 0x38(r6)
+ std r0, 0x40(r6)
+ ld r9, 0x48(r4)
+ ld r7, 0x50(r4)
+ ld r8, 0x58(r4)
+ ld r0, 0x60(r4)
+ std r9, 0x48(r6)
+ std r7, 0x50(r6)
+ std r8, 0x58(r6)
+ std r0, 0x60(r6)
+ ld r9, 0x68(r4)
+ ld r7, 0x70(r4)
+ ld r8, 0x78(r4)
+ ld r0, 0x80(r4)
+ addi r4, r4,0x80
+ std r9, 0x68(r6)
+ std r7, 0x70(r6)
+ std r8, 0x78(r6)
+ stdu r0, 0x80(r6)
+
+ bdnz L(loop2_128)
+L(endloop2_128):
+
+ b L(lessthancacheline)
+
+
+END_GEN_TB (BP_SYM (memcpy),TB_TOCLESS)
+libc_hidden_builtin_def (memcpy)
diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc32/a2/Implies b/sysdeps/unix/sysv/linux/powerpc/powerpc32/a2/Implies
new file mode 100644
index 0000000000..6d72414e55
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/powerpc/powerpc32/a2/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc32/a2/fpu
+powerpc/powerpc32/a2
diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc64/a2/Implies b/sysdeps/unix/sysv/linux/powerpc/powerpc64/a2/Implies
new file mode 100644
index 0000000000..39b19e9c1f
--- /dev/null
+++ b/sysdeps/unix/sysv/linux/powerpc/powerpc64/a2/Implies
@@ -0,0 +1,2 @@
+powerpc/powerpc64/a2/fpu
+powerpc/powerpc64/a2