From bfe6f5fa899ebc951222e5c29d33c9c223824a1e Mon Sep 17 00:00:00 2001 From: Ulrich Drepper Date: Mon, 21 May 2007 19:21:48 +0000 Subject: * sysdeps/unix/sysv/linux/x86_64/sysconf.c: Move cache information handling to ... * sysdeps/x86_64/cacheinfo.c: ... here. New file. * sysdeps/x86_64/Makefile [subdir=string] (sysdep_routines): Add cacheinfo. * sysdeps/x86_64/memcpy.S: Complete rewrite. * sysdeps/x86_64/mempcpy.S: Adjust appropriately. Patch by Evandro Menezes . * sysdeps/unix/sysv/linux/i386/epoll_pwait.S: New file. --- sysdeps/x86_64/memcpy.S | 571 +++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 522 insertions(+), 49 deletions(-) (limited to 'sysdeps/x86_64/memcpy.S') diff --git a/sysdeps/x86_64/memcpy.S b/sysdeps/x86_64/memcpy.S index 5f06198b5d..231329864f 100644 --- a/sysdeps/x86_64/memcpy.S +++ b/sysdeps/x86_64/memcpy.S @@ -1,7 +1,10 @@ -/* Highly optimized version for x86-64. - Copyright (C) 1997, 2000, 2002, 2003, 2004 Free Software Foundation, Inc. +/* + Optimized memcpy for x86-64. + + Copyright (C) 2007 Free Software Foundation, Inc. + Contributed by Evandro Menezes , 2007. + This file is part of the GNU C Library. - Based on i586 version contributed by Ulrich Drepper , 1997. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public @@ -16,86 +19,556 @@ You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA - 02111-1307 USA. */ + 02111-1307 USA. +*/ #include #include "asm-syntax.h" -#include "bp-sym.h" -#include "bp-asm.h" -/* BEWARE: `#ifdef memcpy' means that memcpy is redefined as `mempcpy', - and the return value is the byte after the last one copied in - the destination. */ -#define MEMPCPY_P (defined memcpy) +/* Stack slots in the red-zone. */ + +#ifdef USE_AS_MEMPCPY +# define RETVAL (0) +#else +# define RETVAL (-8) +#endif +#define SAVE0 (RETVAL - 8) +#define SAVE1 (SAVE0 - 8) +#define SAVE2 (SAVE1 - 8) +#define SAVE3 (SAVE2 - 8) .text + #if defined PIC && !defined NOT_IN_libc ENTRY (__memcpy_chk) + cmpq %rdx, %rcx jb HIDDEN_JUMPTARGET (__chk_fail) + END (__memcpy_chk) #endif -ENTRY (BP_SYM (memcpy)) - /* Cutoff for the big loop is a size of 32 bytes since otherwise - the loop will never be entered. */ + +ENTRY(memcpy) /* (void *, const void*, size_t) */ + +/* Handle tiny blocks. */ + +L(1try): /* up to 32B */ cmpq $32, %rdx - movq %rdx, %rcx -#if !MEMPCPY_P - movq %rdi, %r10 /* Save value. */ +#ifndef USE_AS_MEMPCPY + movq %rdi, %rax /* save return value */ #endif + jae L(1after) - /* We need this in any case. */ - cld +L(1): /* 1-byte once */ + testb $1, %dl + jz L(1a) - jbe 1f + movzbl (%rsi), %ecx + movb %cl, (%rdi) - /* Align destination. */ - movq %rdi, %rax - negq %rax - andq $7, %rax - subq %rax, %rcx - xchgq %rax, %rcx + incq %rsi + incq %rdi + + .p2align 4,, 4 + +L(1a): /* 2-byte once */ + testb $2, %dl + jz L(1b) + + movzwl (%rsi), %ecx + movw %cx, (%rdi) + + addq $2, %rsi + addq $2, %rdi + + .p2align 4,, 4 + +L(1b): /* 4-byte once */ + testb $4, %dl + jz L(1c) + + movl (%rsi), %ecx + movl %ecx, (%rdi) + + addq $4, %rsi + addq $4, %rdi + + .p2align 4,, 4 + +L(1c): /* 8-byte once */ + testb $8, %dl + jz L(1d) + + movq (%rsi), %rcx + movq %rcx, (%rdi) + + addq $8, %rsi + addq $8, %rdi + + .p2align 4,, 4 + +L(1d): /* 16-byte loop */ + andl $0xf0, %edx + jz L(exit) + + .p2align 4 + +L(1loop): + movq (%rsi), %rcx + movq 8 (%rsi), %r8 + movq %rcx, (%rdi) + movq %r8, 8 (%rdi) + + subl $16, %edx + + leaq 16 (%rsi), %rsi + leaq 16 (%rdi), %rdi + + jnz L(1loop) + + .p2align 4,, 4 + +L(exit): /* exit */ +#ifdef USE_AS_MEMPCPY + movq %rdi, %rax /* return value */ +#else + rep +#endif + retq + + .p2align 4 + +L(1after): +#ifndef USE_AS_MEMPCPY + movq %rax, RETVAL (%rsp) /* save return value */ +#endif + +/* Align to the natural word size. */ + +L(aligntry): + movl %esi, %ecx /* align by destination */ + + andl $7, %ecx + jz L(alignafter) /* already aligned */ + +L(align): /* align */ + leaq -8 (%rcx, %rdx), %rdx /* calculate remaining bytes */ + subl $8, %ecx + + .p2align 4 + +L(alignloop): /* 1-byte alignment loop */ + movzbl (%rsi), %eax + movb %al, (%rdi) + + incl %ecx + + leaq 1 (%rsi), %rsi + leaq 1 (%rdi), %rdi - rep; movsb + jnz L(alignloop) - movq %rax, %rcx - subq $32, %rcx - js 2f + .p2align 4 + +L(alignafter): + +/* Loop to handle mid-sized blocks. */ + +L(32try): /* up to 1KB */ + cmpq $1024, %rdx + ja L(32after) + +L(32): /* 32-byte loop */ + movl %edx, %ecx + shrl $5, %ecx + jz L(32skip) .p2align 4 -3: - /* Now correct the loop counter. Please note that in the following - code the flags are not changed anymore. */ - subq $32, %rcx +L(32loop): + decl %ecx movq (%rsi), %rax - movq 8(%rsi), %rdx - movq 16(%rsi), %r8 - movq 24(%rsi), %r9 + movq 8 (%rsi), %r8 + movq 16 (%rsi), %r9 + movq 24 (%rsi), %r10 + movq %rax, (%rdi) - movq %rdx, 8(%rdi) - movq %r8, 16(%rdi) - movq %r9, 24(%rdi) + movq %r8, 8 (%rdi) + movq %r9, 16 (%rdi) + movq %r10, 24 (%rdi) leaq 32(%rsi), %rsi leaq 32(%rdi), %rdi - jns 3b + jz L(32skip) /* help out smaller blocks */ + + decl %ecx + + movq (%rsi), %rax + movq 8 (%rsi), %r8 + movq 16 (%rsi), %r9 + movq 24 (%rsi), %r10 + + movq %rax, (%rdi) + movq %r8, 8 (%rdi) + movq %r9, 16 (%rdi) + movq %r10, 24 (%rdi) + + leaq 32 (%rsi), %rsi + leaq 32 (%rdi), %rdi - /* Correct extra loop counter modification. */ -2: addq $32, %rcx -1: rep; movsb + jnz L(32loop) -#if MEMPCPY_P - movq %rdi, %rax /* Set return value. */ + .p2align 4 + +L(32skip): + andl $31, %edx /* check for left overs */ +#ifdef USE_AS_MEMPCPY + jnz L(1) + + movq %rdi, %rax #else - movq %r10, %rax /* Set return value. */ + movq RETVAL (%rsp), %rax + jnz L(1) + rep +#endif + retq /* exit */ + + .p2align 4 + +L(32after): + +/* + In order to minimize code-size in RTLD, algorithms specific for + larger blocks are excluded when building for RTLD. +*/ + +/* Handle large blocks smaller than 1/2 L1. */ + +L(fasttry): /* first 1/2 L1 */ +#ifndef NOT_IN_libc /* only up to this algorithm outside of libc.so */ + movq __x86_64_core_cache_size_half (%rip), %r11 + cmpq %rdx, %r11 /* calculate the smaller of */ + cmovaq %rdx, %r11 /* remaining bytes and 1/2 L1 */ +#endif + +L(fast): /* good ol' MOVS */ +#ifndef NOT_IN_libc + movq %r11, %rcx + andq $-8, %r11 +#else + movq %rdx, %rcx +#endif + shrq $3, %rcx + jz L(fastskip) + + rep + movsq + + .p2align 4,, 4 + +L(fastskip): +#ifndef NOT_IN_libc + subq %r11, %rdx /* check for more */ + testq $-8, %rdx + jnz L(fastafter) +#endif + + andl $7, %edx /* check for left overs */ +#ifdef USE_AS_MEMPCPY + jnz L(1) + + movq %rdi, %rax +#else + movq RETVAL (%rsp), %rax + jnz L(1) + + rep +#endif + retq /* exit */ + +#ifndef NOT_IN_libc /* none of the algorithms below for RTLD */ + + .p2align 4 + +L(fastafter): + +/* Handle large blocks smaller than 1/2 L2. */ + +L(pretry): /* first 1/2 L2 */ + movq __x86_64_shared_cache_size_half (%rip), %r8 + cmpq %rdx, %r8 /* calculate the lesser of */ + cmovaq %rdx, %r8 /* remaining bytes and 1/2 L2 */ + +L(pre): /* 64-byte with prefetching */ + movq %r8, %rcx + andq $-64, %r8 + shrq $6, %rcx + jz L(preskip) + + movq %r14, SAVE0 (%rsp) + cfi_rel_offset (%r14, SAVE0) + movq %r13, SAVE1 (%rsp) + cfi_rel_offset (%r13, SAVE1) + movq %r12, SAVE2 (%rsp) + cfi_rel_offset (%r12, SAVE2) + movq %rbx, SAVE3 (%rsp) + cfi_rel_offset (%rbx, SAVE3) + + cmpl $0, __x86_64_prefetchw (%rip) + jz L(preloop) /* check if PREFETCHW OK */ + + .p2align 4 + +/* ... when PREFETCHW is available (less cache-probe traffic in MP systems). */ + +L(prewloop): /* cache-line in state M */ + decq %rcx + + movq (%rsi), %rax + movq 8 (%rsi), %rbx + movq 16 (%rsi), %r9 + movq 24 (%rsi), %r10 + movq 32 (%rsi), %r11 + movq 40 (%rsi), %r12 + movq 48 (%rsi), %r13 + movq 56 (%rsi), %r14 + + prefetcht0 0 + 896 (%rsi) + prefetcht0 64 + 896 (%rsi) + + movq %rax, (%rdi) + movq %rbx, 8 (%rdi) + movq %r9, 16 (%rdi) + movq %r10, 24 (%rdi) + movq %r11, 32 (%rdi) + movq %r12, 40 (%rdi) + movq %r13, 48 (%rdi) + movq %r14, 56 (%rdi) + + leaq 64 (%rsi), %rsi + leaq 64 (%rdi), %rdi + + jz L(prebail) + + decq %rcx + + movq (%rsi), %rax + movq 8 (%rsi), %rbx + movq 16 (%rsi), %r9 + movq 24 (%rsi), %r10 + movq 32 (%rsi), %r11 + movq 40 (%rsi), %r12 + movq 48 (%rsi), %r13 + movq 56 (%rsi), %r14 + + movq %rax, (%rdi) + movq %rbx, 8 (%rdi) + movq %r9, 16 (%rdi) + movq %r10, 24 (%rdi) + movq %r11, 32 (%rdi) + movq %r12, 40 (%rdi) + movq %r13, 48 (%rdi) + movq %r14, 56 (%rdi) + + prefetchw 896 - 64 (%rdi) + prefetchw 896 - 0 (%rdi) + + leaq 64 (%rsi), %rsi + leaq 64 (%rdi), %rdi + + jnz L(prewloop) + jmp L(prebail) + + .p2align 4 + +/* ... when PREFETCHW is not available. */ + +L(preloop): /* cache-line in state E */ + decq %rcx + + movq (%rsi), %rax + movq 8 (%rsi), %rbx + movq 16 (%rsi), %r9 + movq 24 (%rsi), %r10 + movq 32 (%rsi), %r11 + movq 40 (%rsi), %r12 + movq 48 (%rsi), %r13 + movq 56 (%rsi), %r14 + + prefetcht0 896 + 0 (%rsi) + prefetcht0 896 + 64 (%rsi) + + movq %rax, (%rdi) + movq %rbx, 8 (%rdi) + movq %r9, 16 (%rdi) + movq %r10, 24 (%rdi) + movq %r11, 32 (%rdi) + movq %r12, 40 (%rdi) + movq %r13, 48 (%rdi) + movq %r14, 56 (%rdi) + + leaq 64 (%rsi), %rsi + leaq 64 (%rdi), %rdi + + jz L(prebail) + + decq %rcx + + movq (%rsi), %rax + movq 8 (%rsi), %rbx + movq 16 (%rsi), %r9 + movq 24 (%rsi), %r10 + movq 32 (%rsi), %r11 + movq 40 (%rsi), %r12 + movq 48 (%rsi), %r13 + movq 56 (%rsi), %r14 + + prefetcht0 896 - 64 (%rdi) + prefetcht0 896 - 0 (%rdi) + + movq %rax, (%rdi) + movq %rbx, 8 (%rdi) + movq %r9, 16 (%rdi) + movq %r10, 24 (%rdi) + movq %r11, 32 (%rdi) + movq %r12, 40 (%rdi) + movq %r13, 48 (%rdi) + movq %r14, 56 (%rdi) + + leaq 64 (%rsi), %rsi + leaq 64 (%rdi), %rdi + + jnz L(preloop) + +L(prebail): + movq SAVE3 (%rsp), %rbx + cfi_restore (%rbx) + movq SAVE2 (%rsp), %r12 + cfi_restore (%r12) + movq SAVE1 (%rsp), %r13 + cfi_restore (%r13) + movq SAVE0 (%rsp), %r14 + cfi_restore (%r14) + +/* .p2align 4 */ + +L(preskip): + subq %r8, %rdx /* check for more */ + testq $-64, %rdx + jnz L(preafter) + + andl $63, %edx /* check for left overs */ +#ifdef USE_AS_MEMPCPY + jnz L(1) + + movq %rdi, %rax +#else + movq RETVAL (%rsp), %rax + jnz L(1) + + rep +#endif + retq /* exit */ + + .p2align 4 + +L(preafter): + +/* Loop to handle huge blocks. */ + +L(NTtry): + +L(NT): /* non-temporal 128-byte */ + movq %rdx, %rcx + shrq $7, %rcx + jz L(NTskip) + + movq %r14, SAVE0 (%rsp) + cfi_rel_offset (%r14, SAVE0) + movq %r13, SAVE1 (%rsp) + cfi_rel_offset (%r13, SAVE1) + movq %r12, SAVE2 (%rsp) + cfi_rel_offset (%r12, SAVE2) + + .p2align 4 + +L(NTloop): + prefetchnta 768 (%rsi) + prefetchnta 832 (%rsi) + + decq %rcx + + movq (%rsi), %rax + movq 8 (%rsi), %r8 + movq 16 (%rsi), %r9 + movq 24 (%rsi), %r10 + movq 32 (%rsi), %r11 + movq 40 (%rsi), %r12 + movq 48 (%rsi), %r13 + movq 56 (%rsi), %r14 + + movntiq %rax, (%rdi) + movntiq %r8, 8 (%rdi) + movntiq %r9, 16 (%rdi) + movntiq %r10, 24 (%rdi) + movntiq %r11, 32 (%rdi) + movntiq %r12, 40 (%rdi) + movntiq %r13, 48 (%rdi) + movntiq %r14, 56 (%rdi) + + movq 64 (%rsi), %rax + movq 72 (%rsi), %r8 + movq 80 (%rsi), %r9 + movq 88 (%rsi), %r10 + movq 96 (%rsi), %r11 + movq 104 (%rsi), %r12 + movq 112 (%rsi), %r13 + movq 120 (%rsi), %r14 + + movntiq %rax, 64 (%rdi) + movntiq %r8, 72 (%rdi) + movntiq %r9, 80 (%rdi) + movntiq %r10, 88 (%rdi) + movntiq %r11, 96 (%rdi) + movntiq %r12, 104 (%rdi) + movntiq %r13, 112 (%rdi) + movntiq %r14, 120 (%rdi) + + leaq 128 (%rsi), %rsi + leaq 128 (%rdi), %rdi + + jnz L(NTloop) + + sfence /* serialize memory stores */ + + movq SAVE2 (%rsp), %r12 + cfi_restore (%r12) + movq SAVE1 (%rsp), %r13 + cfi_restore (%r13) + movq SAVE0 (%rsp), %r14 + cfi_restore (%r14) + +L(NTskip): + andl $127, %edx /* check for left overs */ +#ifdef USE_AS_MEMPCPY + jnz L(1) + + movq %rdi, %rax +#else + movq RETVAL (%rsp), %rax + jnz L(1) + + rep #endif - ret + retq /* exit */ + +#endif /* !NOT_IN_libc */ + +END(memcpy) -END (BP_SYM (memcpy)) -#if !MEMPCPY_P +#ifndef USE_AS_MEMPCPY libc_hidden_builtin_def (memcpy) #endif -- cgit v1.2.1