summaryrefslogtreecommitdiff
path: root/libc/sysdeps/powerpc
diff options
context:
space:
mode:
authorjoseph <joseph@7b3dc134-2b1b-0410-93df-9e9f96275f8d>2010-11-07 12:11:40 +0000
committerjoseph <joseph@7b3dc134-2b1b-0410-93df-9e9f96275f8d>2010-11-07 12:11:40 +0000
commit486caa1f257e98014ac78f7f7715876f705c17dd (patch)
tree99cad456a5df680ad632040afc583c4862c83b47 /libc/sysdeps/powerpc
parente2dc087599c61bfc5ed2ab08b70d4fa69836b8b5 (diff)
downloadeglibc2-486caa1f257e98014ac78f7f7715876f705c17dd.tar.gz
Merge changes between r11842 and r11980 from /fsf/trunk.
git-svn-id: svn://svn.eglibc.org/trunk@11981 7b3dc134-2b1b-0410-93df-9e9f96275f8d
Diffstat (limited to 'libc/sysdeps/powerpc')
-rw-r--r--libc/sysdeps/powerpc/dl-procinfo.c5
-rw-r--r--libc/sysdeps/powerpc/dl-procinfo.h7
-rw-r--r--libc/sysdeps/powerpc/powerpc32/a2/memcpy.S511
-rw-r--r--libc/sysdeps/powerpc/powerpc32/power7/mempcpy.S471
-rw-r--r--libc/sysdeps/powerpc/powerpc64/a2/memcpy.S501
-rw-r--r--libc/sysdeps/powerpc/powerpc64/power7/mempcpy.S458
6 files changed, 1950 insertions, 3 deletions
diff --git a/libc/sysdeps/powerpc/dl-procinfo.c b/libc/sysdeps/powerpc/dl-procinfo.c
index 1c74c2a90..2ca76d8b7 100644
--- a/libc/sysdeps/powerpc/dl-procinfo.c
+++ b/libc/sysdeps/powerpc/dl-procinfo.c
@@ -68,7 +68,7 @@ PROCINFO_CLASS const char _dl_powerpc_cap_flags[25][10]
#if !defined PROCINFO_DECL && defined SHARED
._dl_powerpc_platforms
#else
-PROCINFO_CLASS const char _dl_powerpc_platforms[8][12]
+PROCINFO_CLASS const char _dl_powerpc_platforms[9][12]
#endif
#ifndef PROCINFO_DECL
= {
@@ -79,7 +79,8 @@ PROCINFO_CLASS const char _dl_powerpc_platforms[8][12]
[PPC_PLATFORM_POWER6] = "power6",
[PPC_PLATFORM_CELL_BE] = "ppc-cell-be",
[PPC_PLATFORM_POWER6X] = "power6x",
- [PPC_PLATFORM_POWER7] = "power7"
+ [PPC_PLATFORM_POWER7] = "power7",
+ [PPC_PLATFORM_PPCA2] = "ppca2"
}
#endif
#if !defined SHARED || defined PROCINFO_DECL
diff --git a/libc/sysdeps/powerpc/dl-procinfo.h b/libc/sysdeps/powerpc/dl-procinfo.h
index 254195a94..2ae35644a 100644
--- a/libc/sysdeps/powerpc/dl-procinfo.h
+++ b/libc/sysdeps/powerpc/dl-procinfo.h
@@ -31,7 +31,7 @@
#define HWCAP_IMPORTANT (PPC_FEATURE_HAS_ALTIVEC \
+ PPC_FEATURE_HAS_DFP)
-#define _DL_PLATFORMS_COUNT 8
+#define _DL_PLATFORMS_COUNT 9
#define _DL_FIRST_PLATFORM 32
/* Mask to filter out platforms. */
@@ -47,6 +47,7 @@
#define PPC_PLATFORM_CELL_BE 5
#define PPC_PLATFORM_POWER6X 6
#define PPC_PLATFORM_POWER7 7
+#define PPC_PLATFORM_PPCA2 8
static inline const char *
__attribute__ ((unused))
@@ -123,6 +124,10 @@ _dl_string_platform (const char *str)
GLRO(dl_powerpc_platforms)[PPC_PLATFORM_CELL_BE] + 3)
== 0)
return _DL_FIRST_PLATFORM + PPC_PLATFORM_CELL_BE;
+ else if (strcmp (str + 3,
+ GLRO(dl_powerpc_platforms)[PPC_PLATFORM_PPCA2] + 3)
+ == 0)
+ return _DL_FIRST_PLATFORM + PPC_PLATFORM_PPCA2;
}
return -1;
diff --git a/libc/sysdeps/powerpc/powerpc32/a2/memcpy.S b/libc/sysdeps/powerpc/powerpc32/a2/memcpy.S
new file mode 100644
index 000000000..472f7a393
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/a2/memcpy.S
@@ -0,0 +1,511 @@
+/* Optimized memcpy implementation for PowerPC A2.
+ Copyright (C) 2010 Free Software Foundation, Inc.
+ Contributed by Michael Brutman <brutman@us.ibm.com>.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+#define PREFETCH_AHEAD 4 /* no cache lines SRC prefetching ahead */
+#define ZERO_AHEAD 2 /* no cache lines DST zeroing ahead */
+
+ .machine a2
+EALIGN (BP_SYM (memcpy), 5, 0)
+ CALL_MCOUNT
+
+ dcbt 0,r4 /* Prefetch ONE SRC cacheline */
+ cmplwi cr1,r5,16 /* is size < 16 ? */
+ mr r6,r3 /* Copy dest reg to r6; */
+ blt+ cr1,L(shortcopy)
+
+
+ /* Big copy (16 bytes or more)
+
+ Figure out how far to the nearest quadword boundary, or if we are
+ on one already.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ */
+
+ neg r8,r3 /* LS 4 bits = # bytes to 8-byte dest bdry */
+ clrlwi r8,r8,32-4 /* align to 16byte boundary */
+ sub r7,r4,r3 /* compute offset to src from dest */
+ cmplwi cr0,r8,0 /* Were we aligned on a 16 byte bdy? */
+ beq+ L(dst_aligned)
+
+
+
+ /* Destination is not aligned on quadword boundary. Get us to one.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ r7 - offset to src from dest
+ r8 - number of bytes to quadword boundary
+ */
+
+ mtcrf 0x01,r8 /* put #bytes to boundary into cr7 */
+ subf r5,r8,r5 /* adjust remaining len */
+
+ bf cr7*4+3,1f
+ lbzx r0,r7,r6 /* copy 1 byte addr */
+ stb r0,0(r6)
+ addi r6,r6,1
+1:
+ bf cr7*4+2,2f
+ lhzx r0,r7,r6 /* copy 2 byte addr */
+ sth r0,0(r6)
+ addi r6,r6,2
+2:
+ bf cr7*4+1,4f
+ lwzx r0,r7,r6 /* copy 4 byte addr */
+ stw r0,0(r6)
+ addi r6,r6,4
+4:
+ bf cr7*4+0,8f
+ lfdx r0,r7,r6 /* copy 8 byte addr */
+ stfd r0,0(r6)
+ addi r6,r6,8
+8:
+ add r4,r7,r6 /* update src addr */
+
+
+
+ /* Dest is quadword aligned now.
+
+ Lots of decisions to make. If we are copying less than a cache
+ line we won't be here long. If we are not on a cache line
+ boundary we need to get there. And then we need to figure out
+ how many cache lines ahead to pre-touch.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ */
+
+
+ .align 4
+L(dst_aligned):
+
+
+#ifdef SHARED
+ mflr r0
+/* Establishes GOT addressability so we can load __cache_line_size
+ from static. This value was set from the aux vector during startup. */
+ bcl 20,31,1f
+1:
+ mflr r9
+ addis r9,r9,__cache_line_size-1b@ha
+ lwz r9,__cache_line_size-1b@l(r9)
+ mtlr r0
+#else
+/* Load __cache_line_size from static. This value was set from the
+ aux vector during startup. */
+ lis r9,__cache_line_size@ha
+ lwz r9,__cache_line_size@l(r9)
+#endif
+
+ cmplwi cr5, r9, 0
+ bne+ cr5,L(cachelineset)
+ li r9,64
+
+
+
+L(cachelineset):
+
+ addi r10,r9,-1
+
+ cmpw cr5,r5,r10 /* Less than a cacheline to go? */
+
+ neg r7,r6 /* How far to next cacheline bdy? */
+
+ addi r6,r6,-8 /* prepare for stdu */
+ cmpwi cr0,r9,128
+ addi r4,r4,-8 /* prepare for ldu */
+
+
+ ble+ cr5,L(lessthancacheline)
+
+ beq- cr0,L(big_lines) /* 128 byte line code */
+
+
+
+
+ /* More than a cacheline left to go, and using 64 byte cachelines */
+
+ clrlwi r7,r7,32-6 /* How far to next cacheline bdy? */
+
+ cmplwi cr6,r7,0 /* Are we on a cacheline bdy already? */
+
+ /* Reduce total len by what it takes to get to the next cache line */
+ subf r5,r7,r5
+ srwi r7,r7,4 /* How many qws to get to the line bdy? */
+
+ /* How many full cache lines to copy after getting to a line bdy? */
+ srwi r10,r5,6
+
+ cmplwi r10,0 /* If no full cache lines to copy ... */
+ li r11,0 /* number cachelines to copy with prefetch */
+ beq L(nocacheprefetch)
+
+
+ /* We are here because we have at least one full cache line to copy,
+ and therefore some pre-touching to do. */
+
+ cmplwi r10,PREFETCH_AHEAD
+ li r12,64+8 /* prefetch distance */
+ ble L(lessthanmaxprefetch)
+
+ /* We can only do so much pre-fetching. R11 will have the count of
+ lines left to prefetch after the initial batch of prefetches
+ are executed. */
+
+ subi r11,r10,PREFETCH_AHEAD
+ li r10,PREFETCH_AHEAD
+
+L(lessthanmaxprefetch):
+ mtctr r10
+
+ /* At this point r10/ctr hold the number of lines to prefetch in this
+ initial batch, and r11 holds any remainder. */
+
+L(prefetchSRC):
+ dcbt r12,r4
+ addi r12,r12,64
+ bdnz L(prefetchSRC)
+
+
+ /* Prefetching is done, or was not needed.
+
+ cr6 - are we on a cacheline boundary already?
+ r7 - number of quadwords to the next cacheline boundary
+ */
+
+L(nocacheprefetch):
+ mtctr r7
+
+ cmplwi cr1,r5,64 /* Less than a cache line to copy? */
+
+ /* How many bytes are left after we copy whatever full
+ cache lines we can get? */
+ clrlwi r5,r5,32-6
+
+ beq cr6,L(cachelinealigned)
+
+
+ /* Copy quadwords up to the next cacheline boundary */
+
+L(aligntocacheline):
+ lfd fp9,0x08(r4)
+ lfdu fp10,0x10(r4)
+ stfd fp9,0x08(r6)
+ stfdu fp10,0x10(r6)
+ bdnz L(aligntocacheline)
+
+
+ .align 4
+L(cachelinealigned): /* copy while cache lines */
+
+ blt- cr1,L(lessthancacheline) /* size <64 */
+
+L(outerloop):
+ cmpwi r11,0
+ mtctr r11
+ beq- L(endloop)
+
+ li r11,64*ZERO_AHEAD +8 /* DCBZ dist */
+
+ .align 4
+ /* Copy whole cachelines, optimized by prefetching SRC cacheline */
+L(loop): /* Copy aligned body */
+ dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
+ lfd fp9, 0x08(r4)
+ dcbz r11,r6
+ lfd fp10, 0x10(r4)
+ lfd fp11, 0x18(r4)
+ lfd fp12, 0x20(r4)
+ stfd fp9, 0x08(r6)
+ stfd fp10, 0x10(r6)
+ stfd fp11, 0x18(r6)
+ stfd fp12, 0x20(r6)
+ lfd fp9, 0x28(r4)
+ lfd fp10, 0x30(r4)
+ lfd fp11, 0x38(r4)
+ lfdu fp12, 0x40(r4)
+ stfd fp9, 0x28(r6)
+ stfd fp10, 0x30(r6)
+ stfd fp11, 0x38(r6)
+ stfdu fp12, 0x40(r6)
+
+ bdnz L(loop)
+
+
+L(endloop):
+ cmpwi r10,0
+ beq- L(endloop2)
+ mtctr r10
+
+L(loop2): /* Copy aligned body */
+ lfd fp9, 0x08(r4)
+ lfd fp10, 0x10(r4)
+ lfd fp11, 0x18(r4)
+ lfd fp12, 0x20(r4)
+ stfd fp9, 0x08(r6)
+ stfd fp10, 0x10(r6)
+ stfd fp11, 0x18(r6)
+ stfd fp12, 0x20(r6)
+ lfd fp9, 0x28(r4)
+ lfd fp10, 0x30(r4)
+ lfd fp11, 0x38(r4)
+ lfdu fp12, 0x40(r4)
+ stfd fp9, 0x28(r6)
+ stfd fp10, 0x30(r6)
+ stfd fp11, 0x38(r6)
+ stfdu fp12, 0x40(r6)
+
+ bdnz L(loop2)
+L(endloop2):
+
+
+ .align 4
+L(lessthancacheline): /* Was there less than cache to do ? */
+ cmplwi cr0,r5,16
+ srwi r7,r5,4 /* divide size by 16 */
+ blt- L(do_lt16)
+ mtctr r7
+
+L(copy_remaining):
+ lfd fp9, 0x08(r4)
+ lfdu fp10, 0x10(r4)
+ stfd fp9, 0x08(r6)
+ stfdu fp10, 0x10(r6)
+ bdnz L(copy_remaining)
+
+L(do_lt16): /* less than 16 ? */
+ cmplwi cr0,r5,0 /* copy remaining bytes (0-15) */
+ beqlr+ /* no rest to copy */
+ addi r4,r4,8
+ addi r6,r6,8
+
+L(shortcopy): /* SIMPLE COPY to handle size =< 15 bytes */
+ mtcrf 0x01,r5
+ sub r7,r4,r6
+ bf- cr7*4+0,8f
+ lfdx fp9,r7,r6 /* copy 8 byte */
+ stfd fp9,0(r6)
+ addi r6,r6,8
+8:
+ bf cr7*4+1,4f
+ lwzx r0,r7,r6 /* copy 4 byte */
+ stw r0,0(r6)
+ addi r6,r6,4
+4:
+ bf cr7*4+2,2f
+ lhzx r0,r7,r6 /* copy 2 byte */
+ sth r0,0(r6)
+ addi r6,r6,2
+2:
+ bf cr7*4+3,1f
+ lbzx r0,r7,r6 /* copy 1 byte */
+ stb r0,0(r6)
+1:
+ blr
+
+
+
+
+
+ /* Similar to above, but for use with 128 byte lines. */
+
+
+L(big_lines):
+
+ clrlwi r7,r7,32-7 /* How far to next cacheline bdy? */
+
+ cmplwi cr6,r7,0 /* Are we on a cacheline bdy already? */
+
+ /* Reduce total len by what it takes to get to the next cache line */
+ subf r5,r7,r5
+ srwi r7,r7,4 /* How many qw to get to the line bdy? */
+
+ /* How many full cache lines to copy after getting to a line bdy? */
+ srwi r10,r5,7
+
+ cmplwi r10,0 /* If no full cache lines to copy ... */
+ li r11,0 /* number cachelines to copy with prefetch */
+ beq L(nocacheprefetch_128)
+
+
+ /* We are here because we have at least one full cache line to copy,
+ and therefore some pre-touching to do. */
+
+ cmplwi r10,PREFETCH_AHEAD
+ li r12,128+8 /* prefetch distance */
+ ble L(lessthanmaxprefetch_128)
+
+ /* We can only do so much pre-fetching. R11 will have the count of
+ lines left to prefetch after the initial batch of prefetches
+ are executed. */
+
+ subi r11,r10,PREFETCH_AHEAD
+ li r10,PREFETCH_AHEAD
+
+L(lessthanmaxprefetch_128):
+ mtctr r10
+
+ /* At this point r10/ctr hold the number of lines to prefetch in this
+ initial batch, and r11 holds any remainder. */
+
+L(prefetchSRC_128):
+ dcbt r12,r4
+ addi r12,r12,128
+ bdnz L(prefetchSRC_128)
+
+
+ /* Prefetching is done, or was not needed.
+
+ cr6 - are we on a cacheline boundary already?
+ r7 - number of quadwords to the next cacheline boundary
+ */
+
+L(nocacheprefetch_128):
+ mtctr r7
+
+ cmplwi cr1,r5,128 /* Less than a cache line to copy? */
+
+ /* How many bytes are left after we copy whatever full
+ cache lines we can get? */
+ clrlwi r5,r5,32-7
+
+ beq cr6,L(cachelinealigned_128)
+
+
+ /* Copy quadwords up to the next cacheline boundary */
+
+L(aligntocacheline_128):
+ lfd fp9,0x08(r4)
+ lfdu fp10,0x10(r4)
+ stfd fp9,0x08(r6)
+ stfdu fp10,0x10(r6)
+ bdnz L(aligntocacheline_128)
+
+
+L(cachelinealigned_128): /* copy while cache lines */
+
+ blt- cr1,L(lessthancacheline) /* size <128 */
+
+L(outerloop_128):
+ cmpwi r11,0
+ mtctr r11
+ beq- L(endloop_128)
+
+ li r11,128*ZERO_AHEAD +8 /* DCBZ dist */
+
+ .align 4
+ /* Copy whole cachelines, optimized by prefetching SRC cacheline */
+L(loop_128): /* Copy aligned body */
+ dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
+ lfd fp9, 0x08(r4)
+ dcbz r11,r6
+ lfd fp10, 0x10(r4)
+ lfd fp11, 0x18(r4)
+ lfd fp12, 0x20(r4)
+ stfd fp9, 0x08(r6)
+ stfd fp10, 0x10(r6)
+ stfd fp11, 0x18(r6)
+ stfd fp12, 0x20(r6)
+ lfd fp9, 0x28(r4)
+ lfd fp10, 0x30(r4)
+ lfd fp11, 0x38(r4)
+ lfd fp12, 0x40(r4)
+ stfd fp9, 0x28(r6)
+ stfd fp10, 0x30(r6)
+ stfd fp11, 0x38(r6)
+ stfd fp12, 0x40(r6)
+ lfd fp9, 0x48(r4)
+ lfd fp10, 0x50(r4)
+ lfd fp11, 0x58(r4)
+ lfd fp12, 0x60(r4)
+ stfd fp9, 0x48(r6)
+ stfd fp10, 0x50(r6)
+ stfd fp11, 0x58(r6)
+ stfd fp12, 0x60(r6)
+ lfd fp9, 0x68(r4)
+ lfd fp10, 0x70(r4)
+ lfd fp11, 0x78(r4)
+ lfdu fp12, 0x80(r4)
+ stfd fp9, 0x68(r6)
+ stfd fp10, 0x70(r6)
+ stfd fp11, 0x78(r6)
+ stfdu fp12, 0x80(r6)
+
+ bdnz L(loop_128)
+
+
+L(endloop_128):
+ cmpwi r10,0
+ beq- L(endloop2_128)
+ mtctr r10
+
+L(loop2_128): /* Copy aligned body */
+ lfd fp9, 0x08(r4)
+ lfd fp10, 0x10(r4)
+ lfd fp11, 0x18(r4)
+ lfd fp12, 0x20(r4)
+ stfd fp9, 0x08(r6)
+ stfd fp10, 0x10(r6)
+ stfd fp11, 0x18(r6)
+ stfd fp12, 0x20(r6)
+ lfd fp9, 0x28(r4)
+ lfd fp10, 0x30(r4)
+ lfd fp11, 0x38(r4)
+ lfd fp12, 0x40(r4)
+ stfd fp9, 0x28(r6)
+ stfd fp10, 0x30(r6)
+ stfd fp11, 0x38(r6)
+ stfd fp12, 0x40(r6)
+ lfd fp9, 0x48(r4)
+ lfd fp10, 0x50(r4)
+ lfd fp11, 0x58(r4)
+ lfd fp12, 0x60(r4)
+ stfd fp9, 0x48(r6)
+ stfd fp10, 0x50(r6)
+ stfd fp11, 0x58(r6)
+ stfd fp12, 0x60(r6)
+ lfd fp9, 0x68(r4)
+ lfd fp10, 0x70(r4)
+ lfd fp11, 0x78(r4)
+ lfdu fp12, 0x80(r4)
+ stfd fp9, 0x68(r6)
+ stfd fp10, 0x70(r6)
+ stfd fp11, 0x78(r6)
+ stfdu fp12, 0x80(r6)
+ bdnz L(loop2_128)
+L(endloop2_128):
+
+ b L(lessthancacheline)
+
+
+END (BP_SYM (memcpy))
+libc_hidden_builtin_def (memcpy)
diff --git a/libc/sysdeps/powerpc/powerpc32/power7/mempcpy.S b/libc/sysdeps/powerpc/powerpc32/power7/mempcpy.S
new file mode 100644
index 000000000..5e0525645
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc32/power7/mempcpy.S
@@ -0,0 +1,471 @@
+/* Optimized mempcpy implementation for POWER7.
+ Copyright (C) 2010 Free Software Foundation, Inc.
+ Contributed by Luis Machado <luisgpm@br.ibm.com>.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+/* __ptr_t [r3] __mempcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+ Returns 'dst' + 'len'. */
+
+ .machine power7
+EALIGN (BP_SYM (__mempcpy), 5, 0)
+ CALL_MCOUNT
+
+ stwu 1,-32(1)
+ cfi_adjust_cfa_offset(32)
+ stw 30,20(1)
+ cfi_offset(30,(20-32))
+ stw 31,24(1)
+ mr 30,3
+ cmplwi cr1,5,31
+ neg 0,3
+ cfi_offset(31,-8)
+ ble cr1,L(copy_LT_32) /* If move < 32 bytes use short move
+ code. */
+
+ andi. 11,3,7 /* Check alignment of DST. */
+ clrlwi 10,4,29 /* Check alignment of SRC. */
+ cmplw cr6,10,11 /* SRC and DST alignments match? */
+ mr 12,4
+ mr 31,5
+ bne cr6,L(copy_GE_32_unaligned)
+
+ srwi 9,5,3 /* Number of full quadwords remaining. */
+
+ beq L(copy_GE_32_aligned_cont)
+
+ clrlwi 0,0,29
+ mtcrf 0x01,0
+ subf 31,0,5
+
+ /* Get the SRC aligned to 8 bytes. */
+
+1: bf 31,2f
+ lbz 6,0(12)
+ addi 12,12,1
+ stb 6,0(3)
+ addi 3,3,1
+2: bf 30,4f
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+4: bf 29,0f
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+0:
+ clrlwi 10,12,29 /* Check alignment of SRC again. */
+ srwi 9,31,3 /* Number of full doublewords remaining. */
+
+L(copy_GE_32_aligned_cont):
+
+ clrlwi 11,31,29
+ mtcrf 0x01,9
+
+ srwi 8,31,5
+ cmplwi cr1,9,4
+ cmplwi cr6,11,0
+ mr 11,12
+
+ /* Copy 1~3 doublewords so the main loop starts
+ at a multiple of 32 bytes. */
+
+ bf 30,1f
+ lfd 6,0(12)
+ lfd 7,8(12)
+ addi 11,12,16
+ mtctr 8
+ stfd 6,0(3)
+ stfd 7,8(3)
+ addi 10,3,16
+ bf 31,4f
+ lfd 0,16(12)
+ stfd 0,16(3)
+ blt cr1,3f
+ addi 11,12,24
+ addi 10,3,24
+ b 4f
+
+ .align 4
+1: /* Copy 1 doubleword and set the counter. */
+ mr 10,3
+ mtctr 8
+ bf 31,4f
+ lfd 6,0(12)
+ addi 11,12,8
+ stfd 6,0(3)
+ addi 10,3,8
+
+ .align 4
+4: /* Main aligned copy loop. Copies 32-bytes at a time. */
+ lfd 6,0(11)
+ lfd 7,8(11)
+ lfd 8,16(11)
+ lfd 0,24(11)
+ addi 11,11,32
+
+ stfd 6,0(10)
+ stfd 7,8(10)
+ stfd 8,16(10)
+ stfd 0,24(10)
+ addi 10,10,32
+ bdnz 4b
+3:
+
+ /* Check for tail bytes. */
+
+ clrrwi 0,31,3
+ mtcrf 0x01,31
+ beq cr6,0f
+
+.L9:
+ add 3,3,0
+ add 12,12,0
+
+ /* At this point we have a tail of 0-7 bytes and we know that the
+ destination is doubleword-aligned. */
+4: /* Copy 4 bytes. */
+ bf 29,2f
+
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+2: /* Copy 2 bytes. */
+ bf 30,1f
+
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+1: /* Copy 1 byte. */
+ bf 31,0f
+
+ lbz 6,0(12)
+ stb 6,0(3)
+0: /* Return DST + LEN pointer. */
+ add 3,30,5
+ lwz 30,20(1)
+ lwz 31,24(1)
+ addi 1,1,32
+ blr
+
+ /* Handle copies of 0~31 bytes. */
+ .align 4
+L(copy_LT_32):
+ cmplwi cr6,5,8
+ mr 12,4
+ mtcrf 0x01,5
+ ble cr6,L(copy_LE_8)
+
+ /* At least 9 bytes to go. */
+ neg 8,4
+ clrrwi 11,4,2
+ andi. 0,8,3
+ cmplwi cr1,5,16
+ mr 10,5
+ beq L(copy_LT_32_aligned)
+
+ /* Force 4-bytes alignment for SRC. */
+ mtocrf 0x01,0
+ subf 10,0,5
+2: bf 30,1f
+
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+1: bf 31,L(end_4bytes_alignment)
+
+ lbz 6,0(12)
+ addi 12,12,1
+ stb 6,0(3)
+ addi 3,3,1
+
+ .align 4
+L(end_4bytes_alignment):
+ cmplwi cr1,10,16
+ mtcrf 0x01,10
+
+L(copy_LT_32_aligned):
+ /* At least 6 bytes to go, and SRC is word-aligned. */
+ blt cr1,8f
+
+ /* Copy 16 bytes. */
+ lwz 6,0(12)
+ lwz 7,4(12)
+ stw 6,0(3)
+ lwz 8,8(12)
+ stw 7,4(3)
+ lwz 6,12(12)
+ addi 12,12,16
+ stw 8,8(3)
+ stw 6,12(3)
+ addi 3,3,16
+8: /* Copy 8 bytes. */
+ bf 28,4f
+
+ lwz 6,0(12)
+ lwz 7,4(12)
+ addi 12,12,8
+ stw 6,0(3)
+ stw 7,4(3)
+ addi 3,3,8
+4: /* Copy 4 bytes. */
+ bf 29,2f
+
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+2: /* Copy 2-3 bytes. */
+ bf 30,1f
+
+ lhz 6,0(12)
+ sth 6,0(3)
+ bf 31,0f
+ lbz 7,2(12)
+ stb 7,2(3)
+
+ /* Return DST + LEN pointer. */
+ add 3,30,5
+ lwz 30,20(1)
+ addi 1,1,32
+ blr
+
+ .align 4
+1: /* Copy 1 byte. */
+ bf 31,0f
+
+ lbz 6,0(12)
+ stb 6,0(3)
+0: /* Return DST + LEN pointer. */
+ add 3,30,5
+ lwz 30,20(1)
+ addi 1,1,32
+ blr
+
+ /* Handles copies of 0~8 bytes. */
+ .align 4
+L(copy_LE_8):
+ bne cr6,4f
+
+ /* Though we could've used lfd/stfd here, they are still
+ slow for unaligned cases. */
+
+ lwz 6,0(4)
+ lwz 7,4(4)
+ stw 6,0(3)
+ stw 7,4(3)
+
+ /* Return DST + LEN pointer. */
+ add 3,30,5
+ lwz 30,20(1)
+ addi 1,1,32
+ blr
+
+ .align 4
+4: /* Copies 4~7 bytes. */
+ bf 29,2b
+
+ lwz 6,0(4)
+ stw 6,0(3)
+ bf 30,5f
+ lhz 7,4(4)
+ sth 7,4(3)
+ bf 31,0f
+ lbz 8,6(4)
+ stb 8,6(3)
+
+ /* Return DST + LEN pointer. */
+ add 3,30,5
+ lwz 30,20(1)
+ addi 1,1,32
+ blr
+
+ .align 4
+5: /* Copy 1 byte. */
+ bf 31,0f
+
+ lbz 6,4(4)
+ stb 6,4(3)
+
+0: /* Return DST + LEN pointer. */
+ add 3,30,5
+ lwz 30,20(1)
+ addi 1,1,32
+ blr
+
+ /* Handle copies of 32+ bytes where DST is aligned (to quadword) but
+ SRC is not. Use aligned quadword loads from SRC, shifted to realign
+ the data, allowing for aligned DST stores. */
+ .align 4
+L(copy_GE_32_unaligned):
+ andi. 11,3,15 /* Check alignment of DST. */
+ clrlwi 0,0,28 /* Number of bytes until the 1st
+ quadword of DST. */
+ srwi 9,5,4 /* Number of full quadwords remaining. */
+
+ beq L(copy_GE_32_unaligned_cont)
+
+ /* SRC is not quadword aligned, get it aligned. */
+
+ mtcrf 0x01,0
+ subf 31,0,5
+
+ /* Vector instructions work best when proper alignment (16-bytes)
+ is present. Move 0~15 bytes as needed to get DST quadword-aligned. */
+1: /* Copy 1 byte. */
+ bf 31,2f
+
+ lbz 6,0(12)
+ addi 12,12,1
+ stb 6,0(3)
+ addi 3,3,1
+2: /* Copy 2 bytes. */
+ bf 30,4f
+
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+4: /* Copy 4 bytes. */
+ bf 29,8f
+
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+8: /* Copy 8 bytes. */
+ bf 28,0f
+
+ lfd 6,0(12)
+ addi 12,12,8
+ stfd 6,0(3)
+ addi 3,3,8
+0:
+ clrlwi 10,12,28 /* Check alignment of SRC. */
+ srwi 9,31,4 /* Number of full quadwords remaining. */
+
+ /* The proper alignment is present, it is OK to copy the bytes now. */
+L(copy_GE_32_unaligned_cont):
+
+ /* Setup two indexes to speed up the indexed vector operations. */
+ clrlwi 11,31,28
+ li 6,16 /* Index for 16-bytes offsets. */
+ li 7,32 /* Index for 32-bytes offsets. */
+ cmplwi cr1,11,0
+ srwi 8,31,5 /* Setup the loop counter. */
+ mr 10,3
+ mr 11,12
+ mtcrf 0x01,9
+ cmplwi cr6,9,1
+ lvsl 5,0,12
+ lvx 3,0,12
+ bf 31,L(setup_unaligned_loop)
+
+ /* Copy another 16 bytes to align to 32-bytes due to the loop . */
+ lvx 4,12,6
+ vperm 6,3,4,5
+ addi 11,12,16
+ addi 10,3,16
+ stvx 6,0,3
+ vor 3,4,4
+
+L(setup_unaligned_loop):
+ mtctr 8
+ ble cr6,L(end_unaligned_loop)
+
+ /* Copy 32 bytes at a time using vector instructions. */
+ .align 4
+L(unaligned_loop):
+
+ /* Note: vr6/vr10 may contain data that was already copied,
+ but in order to get proper alignment, we may have to copy
+ some portions again. This is faster than having unaligned
+ vector instructions though. */
+
+ lvx 4,11,6 /* vr4 = r11+16. */
+ vperm 6,3,4,5 /* Merge the correctly-aligned portions
+ of vr3/vr4 into vr6. */
+ lvx 3,11,7 /* vr3 = r11+32. */
+ vperm 10,4,3,5 /* Merge the correctly-aligned portions
+ of vr3/vr4 into vr10. */
+ addi 11,11,32
+ stvx 6,0,10
+ stvx 10,10,6
+ addi 10,10,32
+
+ bdnz L(unaligned_loop)
+
+ .align 4
+L(end_unaligned_loop):
+
+ /* Check for tail bytes. */
+ clrrwi 0,31,4
+ mtcrf 0x01,31
+ beq cr1,0f
+
+ add 3,3,0
+ add 12,12,0
+
+ /* We have 1~15 tail bytes to copy, and DST is quadword aligned. */
+8: /* Copy 8 bytes. */
+ bf 28,4f
+
+ lwz 6,0(12)
+ lwz 7,4(12)
+ addi 12,12,8
+ stw 6,0(3)
+ stw 7,4(3)
+ addi 3,3,8
+4: /* Copy 4 bytes. */
+ bf 29,2f
+
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+2: /* Copy 2~3 bytes. */
+ bf 30,1f
+
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+1: /* Copy 1 byte. */
+ bf 31,0f
+
+ lbz 6,0(12)
+ stb 6,0(3)
+0: /* Return DST + LEN pointer. */
+ add 3,30,5
+ lwz 30,20(1)
+ lwz 31,24(1)
+ addi 1,1,32
+ blr
+
+END (BP_SYM (__mempcpy))
+libc_hidden_def (BP_SYM (__mempcpy))
+weak_alias (BP_SYM (__mempcpy), BP_SYM (mempcpy))
+libc_hidden_builtin_def (mempcpy)
diff --git a/libc/sysdeps/powerpc/powerpc64/a2/memcpy.S b/libc/sysdeps/powerpc/powerpc64/a2/memcpy.S
new file mode 100644
index 000000000..ac95171aa
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc64/a2/memcpy.S
@@ -0,0 +1,501 @@
+/* Optimized memcpy implementation for PowerPC A2.
+ Copyright (C) 2010 Free Software Foundation, Inc.
+ Contributed by Michael Brutman <brutman@us.ibm.com>.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+#define PREFETCH_AHEAD 4 /* no cache lines SRC prefetching ahead */
+#define ZERO_AHEAD 2 /* no cache lines DST zeroing ahead */
+
+ .section ".toc","aw"
+.LC0:
+ .tc __cache_line_size[TC],__cache_line_size
+ .section ".text"
+ .align 2
+
+
+ .machine a2
+EALIGN (BP_SYM (memcpy), 5, 0)
+ CALL_MCOUNT 3
+
+ dcbt 0,r4 /* Prefetch ONE SRC cacheline */
+ cmpldi cr1,r5,16 /* is size < 16 ? */
+ mr r6,r3 /* Copy dest reg to r6; */
+ blt+ cr1,L(shortcopy)
+
+
+ /* Big copy (16 bytes or more)
+
+ Figure out how far to the nearest quadword boundary, or if we are
+ on one already. Also get the cache line size.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ */
+
+ neg r8,r3 /* LS 4 bits = # bytes to 8-byte dest bdry */
+ ld r9,.LC0@toc(r2) /* Get cache line size (part 1) */
+ clrldi r8,r8,64-4 /* align to 16byte boundary */
+ sub r7,r4,r3 /* compute offset to src from dest */
+ lwz r9,0(r9) /* Get cache line size (part 2) */
+ cmpldi cr0,r8,0 /* Were we aligned on a 16 byte bdy? */
+ addi r10,r9,-1 /* Cache line mask */
+ beq+ L(dst_aligned)
+
+
+
+ /* Destination is not aligned on quadword boundary. Get us to one.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ r7 - offset to src from dest
+ r8 - number of bytes to quadword boundary
+ */
+
+ mtcrf 0x01,r8 /* put #bytes to boundary into cr7 */
+ subf r5,r8,r5 /* adjust remaining len */
+
+ bf cr7*4+3,1f
+ lbzx r0,r7,r6 /* copy 1 byte addr */
+ stb r0,0(r6)
+ addi r6,r6,1
+1:
+ bf cr7*4+2,2f
+ lhzx r0,r7,r6 /* copy 2 byte addr */
+ sth r0,0(r6)
+ addi r6,r6,2
+2:
+ bf cr7*4+1,4f
+ lwzx r0,r7,r6 /* copy 4 byte addr */
+ stw r0,0(r6)
+ addi r6,r6,4
+4:
+ bf cr7*4+0,8f
+ ldx r0,r7,r6 /* copy 8 byte addr */
+ std r0,0(r6)
+ addi r6,r6,8
+8:
+ add r4,r7,r6 /* update src addr */
+
+
+
+ /* Dest is quadword aligned now.
+
+ Lots of decisions to make. If we are copying less than a cache
+ line we won't be here long. If we are not on a cache line
+ boundary we need to get there. And then we need to figure out
+ how many cache lines ahead to pre-touch.
+
+ r3 - return value (always)
+ r4 - current source addr
+ r5 - copy length
+ r6 - current dest addr
+ */
+
+
+ .align 4
+L(dst_aligned):
+
+
+ cmpd cr5,r5,r10 /* Less than a cacheline to go? */
+
+ neg r7,r6 /* How far to next cacheline bdy? */
+
+ addi r6,r6,-8 /* prepare for stdu */
+ cmpdi cr0,r9,128
+ addi r4,r4,-8 /* prepare for ldu */
+
+
+ ble+ cr5,L(lessthancacheline)
+
+ beq- cr0,L(big_lines) /* 128 byte line code */
+
+
+
+ /* More than a cacheline left to go, and using 64 byte cachelines */
+
+ clrldi r7,r7,64-6 /* How far to next cacheline bdy? */
+
+ cmpldi cr6,r7,0 /* Are we on a cacheline bdy already? */
+
+ /* Reduce total len by what it takes to get to the next cache line */
+ subf r5,r7,r5
+ srdi r7,r7,4 /* How many qws to get to the line bdy? */
+
+ /* How many full cache lines to copy after getting to a line bdy? */
+ srdi r10,r5,6
+
+ cmpldi r10,0 /* If no full cache lines to copy ... */
+ li r11,0 /* number cachelines to copy with prefetch */
+ beq L(nocacheprefetch)
+
+
+ /* We are here because we have at least one full cache line to copy,
+ and therefore some pre-touching to do. */
+
+ cmpldi r10,PREFETCH_AHEAD
+ li r12,64+8 /* prefetch distance */
+ ble L(lessthanmaxprefetch)
+
+ /* We can only do so much pre-fetching. R11 will have the count of
+ lines left to prefetch after the initial batch of prefetches
+ are executed. */
+
+ subi r11,r10,PREFETCH_AHEAD
+ li r10,PREFETCH_AHEAD
+
+L(lessthanmaxprefetch):
+ mtctr r10
+
+ /* At this point r10/ctr hold the number of lines to prefetch in this
+ initial batch, and r11 holds any remainder. */
+
+L(prefetchSRC):
+ dcbt r12,r4
+ addi r12,r12,64
+ bdnz L(prefetchSRC)
+
+
+ /* Prefetching is done, or was not needed.
+
+ cr6 - are we on a cacheline boundary already?
+ r7 - number of quadwords to the next cacheline boundary
+ */
+
+L(nocacheprefetch):
+ mtctr r7
+
+ cmpldi cr1,r5,64 /* Less than a cache line to copy? */
+
+ /* How many bytes are left after we copy whatever full
+ cache lines we can get? */
+ clrldi r5,r5,64-6
+
+ beq cr6,L(cachelinealigned)
+
+
+ /* Copy quadwords up to the next cacheline boundary */
+
+L(aligntocacheline):
+ ld r9,0x08(r4)
+ ld r7,0x10(r4)
+ addi r4,r4,0x10
+ std r9,0x08(r6)
+ stdu r7,0x10(r6)
+ bdnz L(aligntocacheline)
+
+
+ .align 4
+L(cachelinealigned): /* copy while cache lines */
+
+ blt- cr1,L(lessthancacheline) /* size <64 */
+
+L(outerloop):
+ cmpdi r11,0
+ mtctr r11
+ beq- L(endloop)
+
+ li r11,64*ZERO_AHEAD +8 /* DCBZ dist */
+
+ .align 4
+ /* Copy whole cachelines, optimized by prefetching SRC cacheline */
+L(loop): /* Copy aligned body */
+ dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
+ ld r9, 0x08(r4)
+ dcbz r11,r6
+ ld r7, 0x10(r4)
+ ld r8, 0x18(r4)
+ ld r0, 0x20(r4)
+ std r9, 0x08(r6)
+ std r7, 0x10(r6)
+ std r8, 0x18(r6)
+ std r0, 0x20(r6)
+ ld r9, 0x28(r4)
+ ld r7, 0x30(r4)
+ ld r8, 0x38(r4)
+ ld r0, 0x40(r4)
+ addi r4, r4,0x40
+ std r9, 0x28(r6)
+ std r7, 0x30(r6)
+ std r8, 0x38(r6)
+ stdu r0, 0x40(r6)
+
+ bdnz L(loop)
+
+
+L(endloop):
+ cmpdi r10,0
+ beq- L(endloop2)
+ mtctr r10
+
+L(loop2): /* Copy aligned body */
+ ld r9, 0x08(r4)
+ ld r7, 0x10(r4)
+ ld r8, 0x18(r4)
+ ld r0, 0x20(r4)
+ std r9, 0x08(r6)
+ std r7, 0x10(r6)
+ std r8, 0x18(r6)
+ std r0, 0x20(r6)
+ ld r9, 0x28(r4)
+ ld r7, 0x30(r4)
+ ld r8, 0x38(r4)
+ ld r0, 0x40(r4)
+ addi r4, r4,0x40
+ std r9, 0x28(r6)
+ std r7, 0x30(r6)
+ std r8, 0x38(r6)
+ stdu r0, 0x40(r6)
+
+ bdnz L(loop2)
+L(endloop2):
+
+
+ .align 4
+L(lessthancacheline): /* Was there less than cache to do ? */
+ cmpldi cr0,r5,16
+ srdi r7,r5,4 /* divide size by 16 */
+ blt- L(do_lt16)
+ mtctr r7
+
+L(copy_remaining):
+ ld r8,0x08(r4)
+ ld r7,0x10(r4)
+ addi r4,r4,0x10
+ std r8,0x08(r6)
+ stdu r7,0x10(r6)
+ bdnz L(copy_remaining)
+
+L(do_lt16): /* less than 16 ? */
+ cmpldi cr0,r5,0 /* copy remaining bytes (0-15) */
+ beqlr+ /* no rest to copy */
+ addi r4,r4,8
+ addi r6,r6,8
+
+L(shortcopy): /* SIMPLE COPY to handle size =< 15 bytes */
+ mtcrf 0x01,r5
+ sub r7,r4,r6
+ bf- cr7*4+0,8f
+ ldx r0,r7,r6 /* copy 8 byte */
+ std r0,0(r6)
+ addi r6,r6,8
+8:
+ bf cr7*4+1,4f
+ lwzx r0,r7,r6 /* copy 4 byte */
+ stw r0,0(r6)
+ addi r6,r6,4
+4:
+ bf cr7*4+2,2f
+ lhzx r0,r7,r6 /* copy 2 byte */
+ sth r0,0(r6)
+ addi r6,r6,2
+2:
+ bf cr7*4+3,1f
+ lbzx r0,r7,r6 /* copy 1 byte */
+ stb r0,0(r6)
+1:
+ blr
+
+
+
+
+
+ /* Similar to above, but for use with 128 byte lines. */
+
+
+L(big_lines):
+
+ clrldi r7,r7,64-7 /* How far to next cacheline bdy? */
+
+ cmpldi cr6,r7,0 /* Are we on a cacheline bdy already? */
+
+ /* Reduce total len by what it takes to get to the next cache line */
+ subf r5,r7,r5
+ srdi r7,r7,4 /* How many qws to get to the line bdy? */
+
+ /* How many full cache lines to copy after getting to a line bdy? */
+ srdi r10,r5,7
+
+ cmpldi r10,0 /* If no full cache lines to copy ... */
+ li r11,0 /* number cachelines to copy with prefetch */
+ beq L(nocacheprefetch_128)
+
+
+ /* We are here because we have at least one full cache line to copy,
+ and therefore some pre-touching to do. */
+
+ cmpldi r10,PREFETCH_AHEAD
+ li r12,128+8 /* prefetch distance */
+ ble L(lessthanmaxprefetch_128)
+
+ /* We can only do so much pre-fetching. R11 will have the count of
+ lines left to prefetch after the initial batch of prefetches
+ are executed. */
+
+ subi r11,r10,PREFETCH_AHEAD
+ li r10,PREFETCH_AHEAD
+
+L(lessthanmaxprefetch_128):
+ mtctr r10
+
+ /* At this point r10/ctr hold the number of lines to prefetch in this
+ initial batch, and r11 holds any remainder. */
+
+L(prefetchSRC_128):
+ dcbt r12,r4
+ addi r12,r12,128
+ bdnz L(prefetchSRC_128)
+
+
+ /* Prefetching is done, or was not needed.
+
+ cr6 - are we on a cacheline boundary already?
+ r7 - number of quadwords to the next cacheline boundary
+ */
+
+L(nocacheprefetch_128):
+ mtctr r7
+
+ cmpldi cr1,r5,128 /* Less than a cache line to copy? */
+
+ /* How many bytes are left after we copy whatever full
+ cache lines we can get? */
+ clrldi r5,r5,64-7
+
+ beq cr6,L(cachelinealigned_128)
+
+
+ /* Copy quadwords up to the next cacheline boundary */
+
+L(aligntocacheline_128):
+ ld r9,0x08(r4)
+ ld r7,0x10(r4)
+ addi r4,r4,0x10
+ std r9,0x08(r6)
+ stdu r7,0x10(r6)
+ bdnz L(aligntocacheline_128)
+
+
+L(cachelinealigned_128): /* copy while cache lines */
+
+ blt- cr1,L(lessthancacheline) /* size <128 */
+
+L(outerloop_128):
+ cmpdi r11,0
+ mtctr r11
+ beq- L(endloop_128)
+
+ li r11,128*ZERO_AHEAD +8 /* DCBZ dist */
+
+ .align 4
+ /* Copy whole cachelines, optimized by prefetching SRC cacheline */
+L(loop_128): /* Copy aligned body */
+ dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
+ ld r9, 0x08(r4)
+ dcbz r11,r6
+ ld r7, 0x10(r4)
+ ld r8, 0x18(r4)
+ ld r0, 0x20(r4)
+ std r9, 0x08(r6)
+ std r7, 0x10(r6)
+ std r8, 0x18(r6)
+ std r0, 0x20(r6)
+ ld r9, 0x28(r4)
+ ld r7, 0x30(r4)
+ ld r8, 0x38(r4)
+ ld r0, 0x40(r4)
+ std r9, 0x28(r6)
+ std r7, 0x30(r6)
+ std r8, 0x38(r6)
+ std r0, 0x40(r6)
+ ld r9, 0x48(r4)
+ ld r7, 0x50(r4)
+ ld r8, 0x58(r4)
+ ld r0, 0x60(r4)
+ std r9, 0x48(r6)
+ std r7, 0x50(r6)
+ std r8, 0x58(r6)
+ std r0, 0x60(r6)
+ ld r9, 0x68(r4)
+ ld r7, 0x70(r4)
+ ld r8, 0x78(r4)
+ ld r0, 0x80(r4)
+ addi r4, r4,0x80
+ std r9, 0x68(r6)
+ std r7, 0x70(r6)
+ std r8, 0x78(r6)
+ stdu r0, 0x80(r6)
+
+ bdnz L(loop_128)
+
+
+L(endloop_128):
+ cmpdi r10,0
+ beq- L(endloop2_128)
+ mtctr r10
+
+L(loop2_128): /* Copy aligned body */
+ ld r9, 0x08(r4)
+ ld r7, 0x10(r4)
+ ld r8, 0x18(r4)
+ ld r0, 0x20(r4)
+ std r9, 0x08(r6)
+ std r7, 0x10(r6)
+ std r8, 0x18(r6)
+ std r0, 0x20(r6)
+ ld r9, 0x28(r4)
+ ld r7, 0x30(r4)
+ ld r8, 0x38(r4)
+ ld r0, 0x40(r4)
+ std r9, 0x28(r6)
+ std r7, 0x30(r6)
+ std r8, 0x38(r6)
+ std r0, 0x40(r6)
+ ld r9, 0x48(r4)
+ ld r7, 0x50(r4)
+ ld r8, 0x58(r4)
+ ld r0, 0x60(r4)
+ std r9, 0x48(r6)
+ std r7, 0x50(r6)
+ std r8, 0x58(r6)
+ std r0, 0x60(r6)
+ ld r9, 0x68(r4)
+ ld r7, 0x70(r4)
+ ld r8, 0x78(r4)
+ ld r0, 0x80(r4)
+ addi r4, r4,0x80
+ std r9, 0x68(r6)
+ std r7, 0x70(r6)
+ std r8, 0x78(r6)
+ stdu r0, 0x80(r6)
+
+ bdnz L(loop2_128)
+L(endloop2_128):
+
+ b L(lessthancacheline)
+
+
+END_GEN_TB (BP_SYM (memcpy),TB_TOCLESS)
+libc_hidden_builtin_def (memcpy)
diff --git a/libc/sysdeps/powerpc/powerpc64/power7/mempcpy.S b/libc/sysdeps/powerpc/powerpc64/power7/mempcpy.S
new file mode 100644
index 000000000..09c08ab51
--- /dev/null
+++ b/libc/sysdeps/powerpc/powerpc64/power7/mempcpy.S
@@ -0,0 +1,458 @@
+/* Optimized mempcpy implementation for POWER7.
+ Copyright (C) 2010 Free Software Foundation, Inc.
+ Contributed by Luis Machado <luisgpm@br.ibm.com>.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA
+ 02110-1301 USA. */
+
+#include <sysdep.h>
+#include <bp-sym.h>
+#include <bp-asm.h>
+
+
+/* __ptr_t [r3] __mempcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
+ Returns 'dst' + 'len'. */
+
+ .machine power7
+EALIGN (BP_SYM (__mempcpy), 5, 0)
+ CALL_MCOUNT 3
+
+ cmpldi cr1,5,31
+ neg 0,3
+ std 3,-16(1)
+ std 31,-8(1)
+ cfi_offset(31,-8)
+ ble cr1,L(copy_LT_32) /* If move < 32 bytes use short move
+ code. */
+
+ andi. 11,3,7 /* Check alignment of DST. */
+
+
+ clrldi 10,4,61 /* Check alignment of SRC. */
+ cmpld cr6,10,11 /* SRC and DST alignments match? */
+ mr 12,4
+ mr 31,5
+ bne cr6,L(copy_GE_32_unaligned)
+
+ srdi 9,5,3 /* Number of full quadwords remaining. */
+
+ beq L(copy_GE_32_aligned_cont)
+
+ clrldi 0,0,61
+ mtcrf 0x01,0
+ subf 31,0,5
+
+ /* Get the SRC aligned to 8 bytes. */
+
+1: bf 31,2f
+ lbz 6,0(12)
+ addi 12,12,1
+ stb 6,0(3)
+ addi 3,3,1
+2: bf 30,4f
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+4: bf 29,0f
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+0:
+ clrldi 10,12,61 /* Check alignment of SRC again. */
+ srdi 9,31,3 /* Number of full doublewords remaining. */
+
+L(copy_GE_32_aligned_cont):
+
+ clrldi 11,31,61
+ mtcrf 0x01,9
+
+ srdi 8,31,5
+ cmpldi cr1,9,4
+ cmpldi cr6,11,0
+ mr 11,12
+
+ /* Copy 1~3 doublewords so the main loop starts
+ at a multiple of 32 bytes. */
+
+ bf 30,1f
+ ld 6,0(12)
+ ld 7,8(12)
+ addi 11,12,16
+ mtctr 8
+ std 6,0(3)
+ std 7,8(3)
+ addi 10,3,16
+ bf 31,4f
+ ld 0,16(12)
+ std 0,16(3)
+ blt cr1,3f
+ addi 11,12,24
+ addi 10,3,24
+ b 4f
+
+ .align 4
+1: /* Copy 1 doubleword and set the counter. */
+ mr 10,3
+ mtctr 8
+ bf 31,4f
+ ld 6,0(12)
+ addi 11,12,8
+ std 6,0(3)
+ addi 10,3,8
+
+ /* Main aligned copy loop. Copies 32-bytes at a time. */
+ .align 4
+4:
+ ld 6,0(11)
+ ld 7,8(11)
+ ld 8,16(11)
+ ld 0,24(11)
+ addi 11,11,32
+
+ std 6,0(10)
+ std 7,8(10)
+ std 8,16(10)
+ std 0,24(10)
+ addi 10,10,32
+ bdnz 4b
+3:
+
+ /* Check for tail bytes. */
+ rldicr 0,31,0,60
+ mtcrf 0x01,31
+ beq cr6,0f
+
+.L9:
+ add 3,3,0
+ add 12,12,0
+
+ /* At this point we have a tail of 0-7 bytes and we know that the
+ destination is doubleword-aligned. */
+4: /* Copy 4 bytes. */
+ bf 29,2f
+
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+2: /* Copy 2 bytes. */
+ bf 30,1f
+
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+1: /* Copy 1 byte. */
+ bf 31,0f
+
+ lbz 6,0(12)
+ stb 6,0(3)
+0: /* Return DST + LEN pointer. */
+ ld 31,-8(1)
+ ld 3,-16(1)
+ add 3,3,5
+ blr
+
+ /* Handle copies of 0~31 bytes. */
+ .align 4
+L(copy_LT_32):
+ cmpldi cr6,5,8
+ mr 12,4
+ mtcrf 0x01,5
+ ble cr6,L(copy_LE_8)
+
+ /* At least 9 bytes to go. */
+ neg 8,4
+ clrrdi 11,4,2
+ andi. 0,8,3
+ cmpldi cr1,5,16
+ mr 10,5
+ beq L(copy_LT_32_aligned)
+
+ /* Force 4-bytes alignment for SRC. */
+ mtocrf 0x01,0
+ subf 10,0,5
+2: bf 30,1f
+
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+1: bf 31,L(end_4bytes_alignment)
+
+ lbz 6,0(12)
+ addi 12,12,1
+ stb 6,0(3)
+ addi 3,3,1
+
+ .align 4
+L(end_4bytes_alignment):
+ cmpldi cr1,10,16
+ mtcrf 0x01,10
+
+L(copy_LT_32_aligned):
+ /* At least 6 bytes to go, and SRC is word-aligned. */
+ blt cr1,8f
+
+ /* Copy 16 bytes. */
+ lwz 6,0(12)
+ lwz 7,4(12)
+ stw 6,0(3)
+ lwz 8,8(12)
+ stw 7,4(3)
+ lwz 6,12(12)
+ addi 12,12,16
+ stw 8,8(3)
+ stw 6,12(3)
+ addi 3,3,16
+8: /* Copy 8 bytes. */
+ bf 28,4f
+
+ lwz 6,0(12)
+ lwz 7,4(12)
+ addi 12,12,8
+ stw 6,0(3)
+ stw 7,4(3)
+ addi 3,3,8
+4: /* Copy 4 bytes. */
+ bf 29,2f
+
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+2: /* Copy 2-3 bytes. */
+ bf 30,1f
+
+ lhz 6,0(12)
+ sth 6,0(3)
+ bf 31,0f
+ lbz 7,2(12)
+ stb 7,2(3)
+ ld 3,-16(1)
+ add 3,3,5
+ blr
+
+ .align 4
+1: /* Copy 1 byte. */
+ bf 31,0f
+
+ lbz 6,0(12)
+ stb 6,0(3)
+0: /* Return DST + LEN pointer. */
+ ld 3,-16(1)
+ add 3,3,5
+ blr
+
+ /* Handles copies of 0~8 bytes. */
+ .align 4
+L(copy_LE_8):
+ bne cr6,4f
+
+ /* Though we could've used ld/std here, they are still
+ slow for unaligned cases. */
+
+ lwz 6,0(4)
+ lwz 7,4(4)
+ stw 6,0(3)
+ stw 7,4(3)
+ ld 3,-16(1) /* Return DST + LEN pointer. */
+ add 3,3,5
+ blr
+
+ .align 4
+4: /* Copies 4~7 bytes. */
+ bf 29,2b
+
+ lwz 6,0(4)
+ stw 6,0(3)
+ bf 30,5f
+ lhz 7,4(4)
+ sth 7,4(3)
+ bf 31,0f
+ lbz 8,6(4)
+ stb 8,6(3)
+ ld 3,-16(1)
+ add 3,3,5
+ blr
+
+ .align 4
+5: /* Copy 1 byte. */
+ bf 31,0f
+
+ lbz 6,4(4)
+ stb 6,4(3)
+
+0: /* Return DST + LEN pointer. */
+ ld 3,-16(1)
+ add 3,3,5
+ blr
+
+ /* Handle copies of 32+ bytes where DST is aligned (to quadword) but
+ SRC is not. Use aligned quadword loads from SRC, shifted to realign
+ the data, allowing for aligned DST stores. */
+ .align 4
+L(copy_GE_32_unaligned):
+ clrldi 0,0,60 /* Number of bytes until the 1st
+ quadword. */
+ andi. 11,3,15 /* Check alignment of DST (against
+ quadwords). */
+ srdi 9,5,4 /* Number of full quadwords remaining. */
+
+ beq L(copy_GE_32_unaligned_cont)
+
+ /* SRC is not quadword aligned, get it aligned. */
+
+ mtcrf 0x01,0
+ subf 31,0,5
+
+ /* Vector instructions work best when proper alignment (16-bytes)
+ is present. Move 0~15 bytes as needed to get DST quadword-aligned. */
+1: /* Copy 1 byte. */
+ bf 31,2f
+
+ lbz 6,0(12)
+ addi 12,12,1
+ stb 6,0(3)
+ addi 3,3,1
+2: /* Copy 2 bytes. */
+ bf 30,4f
+
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+4: /* Copy 4 bytes. */
+ bf 29,8f
+
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+8: /* Copy 8 bytes. */
+ bf 28,0f
+
+ ld 6,0(12)
+ addi 12,12,8
+ std 6,0(3)
+ addi 3,3,8
+0:
+ clrldi 10,12,60 /* Check alignment of SRC. */
+ srdi 9,31,4 /* Number of full quadwords remaining. */
+
+ /* The proper alignment is present, it is OK to copy the bytes now. */
+L(copy_GE_32_unaligned_cont):
+
+ /* Setup two indexes to speed up the indexed vector operations. */
+ clrldi 11,31,60
+ li 6,16 /* Index for 16-bytes offsets. */
+ li 7,32 /* Index for 32-bytes offsets. */
+ cmpldi cr1,11,0
+ srdi 8,31,5 /* Setup the loop counter. */
+ mr 10,3
+ mr 11,12
+ mtcrf 0x01,9
+ cmpldi cr6,9,1
+ lvsl 5,0,12
+ lvx 3,0,12
+ bf 31,L(setup_unaligned_loop)
+
+ /* Copy another 16 bytes to align to 32-bytes due to the loop . */
+ lvx 4,12,6
+ vperm 6,3,4,5
+ addi 11,12,16
+ addi 10,3,16
+ stvx 6,0,3
+ vor 3,4,4
+
+L(setup_unaligned_loop):
+ mtctr 8
+ ble cr6,L(end_unaligned_loop)
+
+ /* Copy 32 bytes at a time using vector instructions. */
+ .align 4
+L(unaligned_loop):
+
+ /* Note: vr6/vr10 may contain data that was already copied,
+ but in order to get proper alignment, we may have to copy
+ some portions again. This is faster than having unaligned
+ vector instructions though. */
+
+ lvx 4,11,6 /* vr4 = r11+16. */
+ vperm 6,3,4,5 /* Merge the correctly-aligned portions
+ of vr3/vr4 into vr6. */
+ lvx 3,11,7 /* vr3 = r11+32. */
+ vperm 10,4,3,5 /* Merge the correctly-aligned portions
+ of vr3/vr4 into vr10. */
+ addi 11,11,32
+ stvx 6,0,10
+ stvx 10,10,6
+ addi 10,10,32
+
+ bdnz L(unaligned_loop)
+
+ .align 4
+L(end_unaligned_loop):
+
+ /* Check for tail bytes. */
+ rldicr 0,31,0,59
+ mtcrf 0x01,31
+ beq cr1,0f
+
+ add 3,3,0
+ add 12,12,0
+
+ /* We have 1~15 tail bytes to copy, and DST is quadword aligned. */
+8: /* Copy 8 bytes. */
+ bf 28,4f
+
+ lwz 6,0(12)
+ lwz 7,4(12)
+ addi 12,12,8
+ stw 6,0(3)
+ stw 7,4(3)
+ addi 3,3,8
+4: /* Copy 4 bytes. */
+ bf 29,2f
+
+ lwz 6,0(12)
+ addi 12,12,4
+ stw 6,0(3)
+ addi 3,3,4
+2: /* Copy 2~3 bytes. */
+ bf 30,1f
+
+ lhz 6,0(12)
+ addi 12,12,2
+ sth 6,0(3)
+ addi 3,3,2
+1: /* Copy 1 byte. */
+ bf 31,0f
+
+ lbz 6,0(12)
+ stb 6,0(3)
+0: /* Return DST + LEN pointer. */
+ ld 31,-8(1)
+ ld 3,-16(1)
+ add 3,3,5
+ blr
+
+END_GEN_TB (BP_SYM (__mempcpy),TB_TOCLESS)
+libc_hidden_def (BP_SYM (__mempcpy))
+weak_alias (BP_SYM (__mempcpy), BP_SYM (mempcpy))
+libc_hidden_builtin_def (mempcpy)