diff options
author | Alan Modra <amodra@gmail.com> | 2013-08-17 18:47:22 +0930 |
---|---|---|
committer | Alan Modra <amodra@gmail.com> | 2013-10-04 10:41:24 +0930 |
commit | 759cfef3ac4c07dba1ece0bbc1207e099348816d (patch) | |
tree | a0e8cadce4426afb90d39b330dd50688b8975484 /sysdeps/powerpc/powerpc64/power4 | |
parent | fe6e95d7171eba5f3e07848f081676fae4e86322 (diff) | |
download | glibc-759cfef3ac4c07dba1ece0bbc1207e099348816d.tar.gz |
PowerPC LE memcpy
http://sourceware.org/ml/libc-alpha/2013-08/msg00103.html
LIttle-endian support for memcpy. I spent some time cleaning up the
64-bit power7 memcpy, in order to avoid the extra alignment traps
power7 takes for little-endian. It probably would have been better
to copy the linux kernel version of memcpy.
* sysdeps/powerpc/powerpc32/power4/memcpy.S: Add little endian support.
* sysdeps/powerpc/powerpc32/power6/memcpy.S: Likewise.
* sysdeps/powerpc/powerpc32/power7/memcpy.S: Likewise.
* sysdeps/powerpc/powerpc32/power7/mempcpy.S: Likewise.
* sysdeps/powerpc/powerpc64/memcpy.S: Likewise.
* sysdeps/powerpc/powerpc64/power4/memcpy.S: Likewise.
* sysdeps/powerpc/powerpc64/power6/memcpy.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/memcpy.S: Likewise.
* sysdeps/powerpc/powerpc64/power7/mempcpy.S: Likewise. Make better
use of regs. Use power7 mtocrf. Tidy function tails.
Diffstat (limited to 'sysdeps/powerpc/powerpc64/power4')
-rw-r--r-- | sysdeps/powerpc/powerpc64/power4/memcpy.S | 61 |
1 files changed, 60 insertions, 1 deletions
diff --git a/sysdeps/powerpc/powerpc64/power4/memcpy.S b/sysdeps/powerpc/powerpc64/power4/memcpy.S index 4317c7e786..f9a7260dcb 100644 --- a/sysdeps/powerpc/powerpc64/power4/memcpy.S +++ b/sysdeps/powerpc/powerpc64/power4/memcpy.S @@ -214,15 +214,28 @@ EALIGN (memcpy, 5, 0) blt cr6,5f srdi 7,6,16 bgt cr6,3f +#ifdef __LITTLE_ENDIAN__ + sth 7,0(3) +#else sth 6,0(3) +#endif b 7f .align 4 3: +#ifdef __LITTLE_ENDIAN__ + rotlwi 6,6,24 + stb 6,0(3) + sth 7,1(3) +#else stb 7,0(3) sth 6,1(3) +#endif b 7f .align 4 5: +#ifdef __LITTLE_ENDIAN__ + rotlwi 6,6,8 +#endif stb 6,0(3) 7: cmpldi cr1,10,16 @@ -334,13 +347,23 @@ EALIGN (memcpy, 5, 0) bf 30,1f /* there are at least two DWs to copy */ +#ifdef __LITTLE_ENDIAN__ + srd 0,6,10 + sld 8,7,9 +#else sld 0,6,10 srd 8,7,9 +#endif or 0,0,8 ld 6,16(5) std 0,0(4) +#ifdef __LITTLE_ENDIAN__ + srd 0,7,10 + sld 8,6,9 +#else sld 0,7,10 srd 8,6,9 +#endif or 0,0,8 ld 7,24(5) std 0,8(4) @@ -349,8 +372,13 @@ EALIGN (memcpy, 5, 0) blt cr6,8f /* if total DWs = 3, then bypass loop */ bf 31,4f /* there is a third DW to copy */ +#ifdef __LITTLE_ENDIAN__ + srd 0,6,10 + sld 8,7,9 +#else sld 0,6,10 srd 8,7,9 +#endif or 0,0,8 std 0,0(4) mr 6,7 @@ -361,8 +389,13 @@ EALIGN (memcpy, 5, 0) b 4f .align 4 1: +#ifdef __LITTLE_ENDIAN__ + srd 0,6,10 + sld 8,7,9 +#else sld 0,6,10 srd 8,7,9 +#endif addi 5,5,16 or 0,0,8 bf 31,4f @@ -373,23 +406,44 @@ EALIGN (memcpy, 5, 0) addi 4,4,8 .align 4 /* copy 32 bytes at a time */ -4: sld 0,6,10 +4: +#ifdef __LITTLE_ENDIAN__ + srd 0,6,10 + sld 8,7,9 +#else + sld 0,6,10 srd 8,7,9 +#endif or 0,0,8 ld 6,0(5) std 0,0(4) +#ifdef __LITTLE_ENDIAN__ + srd 0,7,10 + sld 8,6,9 +#else sld 0,7,10 srd 8,6,9 +#endif or 0,0,8 ld 7,8(5) std 0,8(4) +#ifdef __LITTLE_ENDIAN__ + srd 0,6,10 + sld 8,7,9 +#else sld 0,6,10 srd 8,7,9 +#endif or 0,0,8 ld 6,16(5) std 0,16(4) +#ifdef __LITTLE_ENDIAN__ + srd 0,7,10 + sld 8,6,9 +#else sld 0,7,10 srd 8,6,9 +#endif or 0,0,8 ld 7,24(5) std 0,24(4) @@ -399,8 +453,13 @@ EALIGN (memcpy, 5, 0) .align 4 8: /* calculate and store the final DW */ +#ifdef __LITTLE_ENDIAN__ + srd 0,6,10 + sld 8,7,9 +#else sld 0,6,10 srd 8,7,9 +#endif or 0,0,8 std 0,0(4) 3: |