summaryrefslogtreecommitdiff
path: root/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
diff options
context:
space:
mode:
authorAndrew Senkevich <andrew.senkevich@intel.com>2015-06-18 20:11:27 +0300
committerAndrew Senkevich <andrew.senkevich@intel.com>2015-06-18 20:11:27 +0300
commita6336cc446a7ed682cb9dbc47cc56ebf9f9a4229 (patch)
tree3b89c96ee406327a8ad942cb1f4923fe33c0558e /sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
parentc9a8c526acd185176e486bee4624039740f8c435 (diff)
downloadglibc-a6336cc446a7ed682cb9dbc47cc56ebf9f9a4229.tar.gz
Vector sincosf for x86_64 and tests.
Here is implementation of vectorized sincosf containing SSE, AVX, AVX2 and AVX512 versions according to Vector ABI <https://groups.google.com/forum/#!topic/x86-64-abi/LmppCfN1rZ4>. * NEWS: Mention addition of x86_64 vector sincosf. * math/test-float-vlen16.h: Added wrapper for sincosf tests. * math/test-float-vlen4.h: Likewise. * math/test-float-vlen8.h: Likewise. * sysdeps/unix/sysv/linux/x86_64/libmvec.abilist: New symbols added. * sysdeps/x86/fpu/bits/math-vector.h: Added sincosf SIMD declaration. * sysdeps/x86_64/fpu/Makefile (libmvec-support): Added new files. * sysdeps/x86_64/fpu/Versions: New versions added. * sysdeps/x86_64/fpu/libm-test-ulps: Regenerated. * sysdeps/x86_64/fpu/multiarch/Makefile (libmvec-sysdep_routines): Added build of SSE, AVX2 and AVX512 IFUNC versions. * sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core.S * sysdeps/x86_64/fpu/multiarch/svml_s_sincosf16_core_avx512.S * sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core.S * sysdeps/x86_64/fpu/multiarch/svml_s_sincosf4_core_sse4.S * sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core.S * sysdeps/x86_64/fpu/multiarch/svml_s_sincosf8_core_avx2.S * sysdeps/x86_64/fpu/svml_s_sincosf16_core.S * sysdeps/x86_64/fpu/svml_s_sincosf4_core.S * sysdeps/x86_64/fpu/svml_s_sincosf8_core.S * sysdeps/x86_64/fpu/svml_s_sincosf8_core_avx.S * sysdeps/x86_64/fpu/svml_s_sincosf_data.S: New file. * sysdeps/x86_64/fpu/svml_s_sincosf_data.h: New file. * sysdeps/x86_64/fpu/svml_s_wrapper_impl.h: Added 3 argument wrappers. * sysdeps/x86_64/fpu/test-float-vlen16.c: : Vector sincosf tests. * sysdeps/x86_64/fpu/test-float-vlen16-wrappers.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen4-wrappers.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen4.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen8-avx2-wrappers.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen8-avx2.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen8-wrappers.c: Likewise. * sysdeps/x86_64/fpu/test-float-vlen8.c: Likewise.
Diffstat (limited to 'sysdeps/x86_64/fpu/svml_s_wrapper_impl.h')
-rw-r--r--sysdeps/x86_64/fpu/svml_s_wrapper_impl.h193
1 files changed, 154 insertions, 39 deletions
diff --git a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
index f88e30f054..66bb081c9d 100644
--- a/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
+++ b/sysdeps/x86_64/fpu/svml_s_wrapper_impl.h
@@ -76,6 +76,67 @@
ret
.endm
+/* 3 argument SSE2 ISA version as wrapper to scalar. */
+.macro WRAPPER_IMPL_SSE2_fFF callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ pushq %rbx
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbx, 0)
+ movq %rdi, %rbp
+ movq %rsi, %rbx
+ subq $40, %rsp
+ cfi_adjust_cfa_offset(40)
+ leaq 24(%rsp), %rsi
+ leaq 28(%rsp), %rdi
+ movaps %xmm0, (%rsp)
+ call \callee@PLT
+ leaq 24(%rsp), %rsi
+ leaq 28(%rsp), %rdi
+ movss 28(%rsp), %xmm0
+ movss %xmm0, 0(%rbp)
+ movaps (%rsp), %xmm1
+ movss 24(%rsp), %xmm0
+ movss %xmm0, (%rbx)
+ movaps %xmm1, %xmm0
+ shufps $85, %xmm1, %xmm0
+ call \callee@PLT
+ movss 28(%rsp), %xmm0
+ leaq 24(%rsp), %rsi
+ movss %xmm0, 4(%rbp)
+ leaq 28(%rsp), %rdi
+ movaps (%rsp), %xmm1
+ movss 24(%rsp), %xmm0
+ movss %xmm0, 4(%rbx)
+ movaps %xmm1, %xmm0
+ unpckhps %xmm1, %xmm0
+ call \callee@PLT
+ movaps (%rsp), %xmm1
+ leaq 24(%rsp), %rsi
+ leaq 28(%rsp), %rdi
+ movss 28(%rsp), %xmm0
+ shufps $255, %xmm1, %xmm1
+ movss %xmm0, 8(%rbp)
+ movss 24(%rsp), %xmm0
+ movss %xmm0, 8(%rbx)
+ movaps %xmm1, %xmm0
+ call \callee@PLT
+ movss 28(%rsp), %xmm0
+ movss %xmm0, 12(%rbp)
+ movss 24(%rsp), %xmm0
+ movss %xmm0, 12(%rbx)
+ addq $40, %rsp
+ cfi_adjust_cfa_offset(-40)
+ popq %rbx
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbx)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
/* AVX/AVX2 ISA version as wrapper to SSE ISA version. */
.macro WRAPPER_IMPL_AVX callee
pushq %rbp
@@ -130,6 +191,52 @@
ret
.endm
+/* 3 argument AVX/AVX2 ISA version as wrapper to SSE ISA version. */
+.macro WRAPPER_IMPL_AVX_fFF callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-32, %rsp
+ pushq %r13
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r13, 0)
+ pushq %r14
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%r14, 0)
+ subq $48, %rsp
+ movq %rsi, %r14
+ vmovaps %ymm0, (%rsp)
+ movq %rdi, %r13
+ vmovaps 16(%rsp), %xmm1
+ vmovaps %xmm1, 32(%rsp)
+ vzeroupper
+ vmovaps (%rsp), %xmm0
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 32(%rsp), %xmm0
+ lea (%rsp), %rdi
+ lea 16(%rsp), %rsi
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps (%rsp), %xmm0
+ vmovaps 16(%rsp), %xmm1
+ vmovaps %xmm0, 16(%r13)
+ vmovaps %xmm1, 16(%r14)
+ addq $48, %rsp
+ popq %r14
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r14)
+ popq %r13
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%r13)
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
/* AVX512 ISA version as wrapper to AVX2 ISA version. */
.macro WRAPPER_IMPL_AVX512 callee
pushq %rbp
@@ -147,20 +254,9 @@
.byte 0x29
.byte 0x04
.byte 0x24
-/* Below is encoding for vmovaps (%rsp), %ymm0. */
- .byte 0xc5
- .byte 0xfc
- .byte 0x28
- .byte 0x04
- .byte 0x24
+ vmovaps (%rsp), %ymm0
call HIDDEN_JUMPTARGET(\callee)
-/* Below is encoding for vmovaps 32(%rsp), %ymm0. */
- .byte 0xc5
- .byte 0xfc
- .byte 0x28
- .byte 0x44
- .byte 0x24
- .byte 0x20
+ vmovaps 32(%rsp), %ymm0
call HIDDEN_JUMPTARGET(\callee)
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
@@ -195,38 +291,57 @@
.byte 0x29
.byte 0x4c
.byte 0x24
-/* Below is encoding for vmovaps (%rsp), %ymm0. */
- .byte 0xc5
- .byte 0xfc
- .byte 0x28
+ vmovaps (%rsp), %ymm0
+ vmovaps 64(%rsp), %ymm1
+ call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 32(%rsp), %ymm0
+ vmovaps 96(%rsp), %ymm1
+ call HIDDEN_JUMPTARGET(\callee)
+ movq %rbp, %rsp
+ cfi_def_cfa_register (%rsp)
+ popq %rbp
+ cfi_adjust_cfa_offset (-8)
+ cfi_restore (%rbp)
+ ret
+.endm
+
+/* 3 argument AVX512 ISA version as wrapper to AVX2 ISA version. */
+.macro WRAPPER_IMPL_AVX512_fFF callee
+ pushq %rbp
+ cfi_adjust_cfa_offset (8)
+ cfi_rel_offset (%rbp, 0)
+ movq %rsp, %rbp
+ cfi_def_cfa_register (%rbp)
+ andq $-64, %rsp
+ pushq %r12
+ pushq %r13
+ subq $176, %rsp
+ movq %rsi, %r13
+/* Below is encoding for vmovaps %zmm0, (%rsp). */
+ .byte 0x62
+ .byte 0xf1
+ .byte 0x7c
+ .byte 0x48
+ .byte 0x29
.byte 0x04
.byte 0x24
-/* Below is encoding for vmovaps 64(%rsp), %ymm1. */
- .byte 0xc5
- .byte 0xfc
- .byte 0x28
- .byte 0x4c
- .byte 0x24
- .byte 0x40
+ movq %rdi, %r12
+ vmovaps (%rsp), %ymm0
call HIDDEN_JUMPTARGET(\callee)
-/* Below is encoding for vmovaps 32(%rsp), %ymm0. */
- .byte 0xc5
- .byte 0xfc
- .byte 0x28
- .byte 0x44
- .byte 0x24
- .byte 0x20
-/* Below is encoding for vmovaps 96(%rsp), %ymm1. */
- .byte 0xc5
- .byte 0xfc
- .byte 0x28
- .byte 0x4c
- .byte 0x24
- .byte 0x60
+ vmovaps 32(%rsp), %ymm0
+ lea 64(%rsp), %rdi
+ lea 96(%rsp), %rsi
call HIDDEN_JUMPTARGET(\callee)
+ vmovaps 64(%rsp), %ymm0
+ vmovaps 96(%rsp), %ymm1
+ vmovaps %ymm0, 32(%r12)
+ vmovaps %ymm1, 32(%r13)
+ addq $176, %rsp
+ popq %r13
+ popq %r12
movq %rbp, %rsp
cfi_def_cfa_register (%rsp)
- popq %rbp
+ popq %rbp
cfi_adjust_cfa_offset (-8)
cfi_restore (%rbp)
ret