summaryrefslogtreecommitdiff
path: root/rtkaio/sysdeps
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2007-07-12 18:26:36 +0000
committerJakub Jelinek <jakub@redhat.com>2007-07-12 18:26:36 +0000
commit0ecb606cb6cf65de1d9fc8a919bceb4be476c602 (patch)
tree2ea1f8305970753e4a657acb2ccc15ca3eec8e2c /rtkaio/sysdeps
parent7d58530341304d403a6626d7f7a1913165fe2f32 (diff)
downloadglibc-0ecb606cb6cf65de1d9fc8a919bceb4be476c602.tar.gz
2.5-18.1
Diffstat (limited to 'rtkaio/sysdeps')
-rw-r--r--rtkaio/sysdeps/pthread/Makefile39
-rw-r--r--rtkaio/sysdeps/pthread/Versions7
-rw-r--r--rtkaio/sysdeps/pthread/kaio_timer_routines.c1
-rw-r--r--rtkaio/sysdeps/pthread/librtkaio-cancellation.c1
-rw-r--r--rtkaio/sysdeps/pthread/rtkaio-unwind-resume.c1
-rw-r--r--rtkaio/sysdeps/pthread/tst-cancel17.c1
-rw-r--r--rtkaio/sysdeps/pthread/tst-cancelx17.c1
-rw-r--r--rtkaio/sysdeps/pthread/tst-mqueue8x.c1
-rw-r--r--rtkaio/sysdeps/pthread/tst-timer.c1
-rw-r--r--rtkaio/sysdeps/unix/alpha/Makefile3
-rw-r--r--rtkaio/sysdeps/unix/alpha/rtkaio-sysdep.S1
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/Implies1
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/Makefile4
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/alpha/Versions6
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/alpha/kaio_cancel.c33
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/ia64/Makefile3
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/ia64/Versions9
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/ia64/rtkaio-sysdep.S1
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/kaio_cancel.c238
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/kaio_error.c54
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/kaio_fsync.c6
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/kaio_misc.c1130
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/kaio_misc.h328
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/kaio_notify.c2
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/kaio_read.c2
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/kaio_read64.c2
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/kaio_return.c50
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/kaio_sigqueue.c2
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/kaio_suspend.c327
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/kaio_write.c2
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/kaio_write64.c2
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/klio_listio.c322
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/klio_listio64.c40
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/powerpc/Makefile3
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/powerpc/powerpc64/Versions9
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/powerpc/rtkaio-sysdep.c1
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/s390/Makefile3
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/s390/rtkaio-sysdep.S1
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/s390/s390-64/Versions9
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/sparc/Makefile3
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/sparc/Versions6
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/sparc/kaio_cancel.c33
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/sparc/rtkaio-sysdep.c1
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/sparc/sparc64/Versions9
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/syscalls.list5
-rw-r--r--rtkaio/sysdeps/unix/sysv/linux/x86_64/Versions9
46 files changed, 2713 insertions, 0 deletions
diff --git a/rtkaio/sysdeps/pthread/Makefile b/rtkaio/sysdeps/pthread/Makefile
new file mode 100644
index 0000000000..7ced3eb5a9
--- /dev/null
+++ b/rtkaio/sysdeps/pthread/Makefile
@@ -0,0 +1,39 @@
+ifeq ($(filter nptl,$(sorted-subdirs)),nptl)
+
+ifeq ($(subdir),rtkaio)
+librtkaio-sysdep_routines += kaio_timer_routines librtkaio-cancellation rtkaio-unwind-resume
+librtkaio-shared-only-routines += rtkaio-unwind-resume
+tests += tst-cancel17 tst-cancelx17
+CPPFLAGS-kaio_timer_routines.c = -I../nptl
+CFLAGS-librtkaio-cancellation.c += -fexceptions -fasynchronous-unwind-tables
+CFLAGS-rtkaio-unwind-resume.c += -fexceptions -fasynchronous-unwind-tables
+CFLAGS-tst-cancelx17.c += -fexceptions
+
+ifeq (yes,$(build-shared))
+$(objpfx)tst-timer: $(objpfx)librtkaio.so $(shared-thread-library)
+else
+$(objpfx)tst-timer: $(objpfx)librtkaio.a $(static-thread-library)
+endif
+
+ifeq ($(have-forced-unwind),yes)
+tests += tst-mqueue8x
+CFLAGS-tst-mqueue8x.c += -fexceptions
+endif
+endif
+
+endif
+
+ifeq ($(filter linuxthreads,$(sorted-subdirs)),linuxthreads)
+
+ifeq ($(subdir),rtkaio)
+librtkaio-sysdep_routines += kaio_timer_routines
+CPPFLAGS += -DBROKEN_THREAD_SIGNALS
+
+ifeq (yes,$(build-shared))
+$(objpfx)tst-timer: $(objpfx)librtkaio.so $(shared-thread-library)
+else
+$(objpfx)tst-timer: $(objpfx)librtkaio.a $(static-thread-library)
+endif
+endif
+
+endif
diff --git a/rtkaio/sysdeps/pthread/Versions b/rtkaio/sysdeps/pthread/Versions
new file mode 100644
index 0000000000..7677b76687
--- /dev/null
+++ b/rtkaio/sysdeps/pthread/Versions
@@ -0,0 +1,7 @@
+%ifdef HAVE_FORCED_UNWIND
+librtkaio {
+ GLIBC_2.4 {
+ lio_listio; lio_listio64;
+ }
+}
+%endif
diff --git a/rtkaio/sysdeps/pthread/kaio_timer_routines.c b/rtkaio/sysdeps/pthread/kaio_timer_routines.c
new file mode 100644
index 0000000000..3aa70ede12
--- /dev/null
+++ b/rtkaio/sysdeps/pthread/kaio_timer_routines.c
@@ -0,0 +1 @@
+#include <timer_routines.c>
diff --git a/rtkaio/sysdeps/pthread/librtkaio-cancellation.c b/rtkaio/sysdeps/pthread/librtkaio-cancellation.c
new file mode 100644
index 0000000000..77c0870e32
--- /dev/null
+++ b/rtkaio/sysdeps/pthread/librtkaio-cancellation.c
@@ -0,0 +1 @@
+#include <librt-cancellation.c>
diff --git a/rtkaio/sysdeps/pthread/rtkaio-unwind-resume.c b/rtkaio/sysdeps/pthread/rtkaio-unwind-resume.c
new file mode 100644
index 0000000000..09499a51a9
--- /dev/null
+++ b/rtkaio/sysdeps/pthread/rtkaio-unwind-resume.c
@@ -0,0 +1 @@
+#include <rt-unwind-resume.c>
diff --git a/rtkaio/sysdeps/pthread/tst-cancel17.c b/rtkaio/sysdeps/pthread/tst-cancel17.c
new file mode 100644
index 0000000000..3a7e7ef1ab
--- /dev/null
+++ b/rtkaio/sysdeps/pthread/tst-cancel17.c
@@ -0,0 +1 @@
+#include <nptl/tst-cancel17.c>
diff --git a/rtkaio/sysdeps/pthread/tst-cancelx17.c b/rtkaio/sysdeps/pthread/tst-cancelx17.c
new file mode 100644
index 0000000000..c6c833b60c
--- /dev/null
+++ b/rtkaio/sysdeps/pthread/tst-cancelx17.c
@@ -0,0 +1 @@
+#include "tst-cancel17.c"
diff --git a/rtkaio/sysdeps/pthread/tst-mqueue8x.c b/rtkaio/sysdeps/pthread/tst-mqueue8x.c
new file mode 100644
index 0000000000..1259ebdf50
--- /dev/null
+++ b/rtkaio/sysdeps/pthread/tst-mqueue8x.c
@@ -0,0 +1 @@
+#include_next <tst-mqueue8x.c>
diff --git a/rtkaio/sysdeps/pthread/tst-timer.c b/rtkaio/sysdeps/pthread/tst-timer.c
new file mode 100644
index 0000000000..9f38f9b2da
--- /dev/null
+++ b/rtkaio/sysdeps/pthread/tst-timer.c
@@ -0,0 +1 @@
+#include_next <tst-timer.c>
diff --git a/rtkaio/sysdeps/unix/alpha/Makefile b/rtkaio/sysdeps/unix/alpha/Makefile
new file mode 100644
index 0000000000..223ec37222
--- /dev/null
+++ b/rtkaio/sysdeps/unix/alpha/Makefile
@@ -0,0 +1,3 @@
+ifeq ($(subdir),rtkaio)
+librtkaio-sysdep_routines += rtkaio-sysdep
+endif
diff --git a/rtkaio/sysdeps/unix/alpha/rtkaio-sysdep.S b/rtkaio/sysdeps/unix/alpha/rtkaio-sysdep.S
new file mode 100644
index 0000000000..11ee214b23
--- /dev/null
+++ b/rtkaio/sysdeps/unix/alpha/rtkaio-sysdep.S
@@ -0,0 +1 @@
+#include <rt-sysdep.S>
diff --git a/rtkaio/sysdeps/unix/sysv/linux/Implies b/rtkaio/sysdeps/unix/sysv/linux/Implies
new file mode 100644
index 0000000000..329b6a20ca
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/Implies
@@ -0,0 +1 @@
+rtkaio
diff --git a/rtkaio/sysdeps/unix/sysv/linux/Makefile b/rtkaio/sysdeps/unix/sysv/linux/Makefile
new file mode 100644
index 0000000000..9ed3d7551a
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/Makefile
@@ -0,0 +1,4 @@
+ifeq ($(subdir),rtkaio)
+CFLAGS-kaio_mq_send.c += -fexceptions
+CFLAGS-kaio_mq_receive.c += -fexceptions
+endif
diff --git a/rtkaio/sysdeps/unix/sysv/linux/alpha/Versions b/rtkaio/sysdeps/unix/sysv/linux/alpha/Versions
new file mode 100644
index 0000000000..b2d59a9140
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/alpha/Versions
@@ -0,0 +1,6 @@
+librtkaio {
+ GLIBC_2.3 {
+ # AIO functions.
+ aio_cancel; aio_cancel64;
+ }
+}
diff --git a/rtkaio/sysdeps/unix/sysv/linux/alpha/kaio_cancel.c b/rtkaio/sysdeps/unix/sysv/linux/alpha/kaio_cancel.c
new file mode 100644
index 0000000000..6e345e1643
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/alpha/kaio_cancel.c
@@ -0,0 +1,33 @@
+#include <shlib-compat.h>
+
+#define aio_cancel64 XXX
+#include <aio.h>
+#undef aio_cancel64
+#include <errno.h>
+
+extern __typeof (aio_cancel) __new_aio_cancel;
+extern __typeof (aio_cancel) __old_aio_cancel;
+
+#define aio_cancel __new_aio_cancel
+
+#include <sysdeps/unix/sysv/linux/kaio_cancel.c>
+
+#undef aio_cancel
+strong_alias (__new_aio_cancel, __new_aio_cancel64);
+versioned_symbol (librt, __new_aio_cancel, aio_cancel, GLIBC_2_3);
+versioned_symbol (librt, __new_aio_cancel64, aio_cancel64, GLIBC_2_3);
+
+#if SHLIB_COMPAT (librt, GLIBC_2_1, GLIBC_2_3)
+
+#undef ECANCELED
+#define aio_cancel __old_aio_cancel
+#define ECANCELED 125
+
+#include <sysdeps/unix/sysv/linux/kaio_cancel.c>
+
+#undef aio_cancel
+strong_alias (__old_aio_cancel, __old_aio_cancel64);
+compat_symbol (librt, __old_aio_cancel, aio_cancel, GLIBC_2_1);
+compat_symbol (librt, __old_aio_cancel64, aio_cancel64, GLIBC_2_1);
+
+#endif
diff --git a/rtkaio/sysdeps/unix/sysv/linux/ia64/Makefile b/rtkaio/sysdeps/unix/sysv/linux/ia64/Makefile
new file mode 100644
index 0000000000..ead21fb111
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/ia64/Makefile
@@ -0,0 +1,3 @@
+ifeq ($(subdir),rtkaio)
+librtkaio-routines += rtkaio-sysdep
+endif
diff --git a/rtkaio/sysdeps/unix/sysv/linux/ia64/Versions b/rtkaio/sysdeps/unix/sysv/linux/ia64/Versions
new file mode 100644
index 0000000000..7443c81d6a
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/ia64/Versions
@@ -0,0 +1,9 @@
+%ifdef HAVE_FORCED_UNWIND
+librtkaio {
+ GLIBC_2.3.3 {
+ # Changed timer_t.
+ timer_create; timer_delete; timer_getoverrun; timer_gettime;
+ timer_settime;
+ }
+}
+%endif
diff --git a/rtkaio/sysdeps/unix/sysv/linux/ia64/rtkaio-sysdep.S b/rtkaio/sysdeps/unix/sysv/linux/ia64/rtkaio-sysdep.S
new file mode 100644
index 0000000000..11ee214b23
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/ia64/rtkaio-sysdep.S
@@ -0,0 +1 @@
+#include <rt-sysdep.S>
diff --git a/rtkaio/sysdeps/unix/sysv/linux/kaio_cancel.c b/rtkaio/sysdeps/unix/sysv/linux/kaio_cancel.c
new file mode 100644
index 0000000000..7d2738d5bc
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/kaio_cancel.c
@@ -0,0 +1,238 @@
+/* Cancel requests associated with given file descriptor.
+ Copyright (C) 1997, 1998, 2000, 2002, 2003, 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/* We use an UGLY hack to prevent gcc from finding us cheating. The
+ implementation of aio_cancel and aio_cancel64 are identical and so
+ we want to avoid code duplication by using aliases. But gcc sees
+ the different parameter lists and prints a warning. We define here
+ a function so that aio_cancel64 has no prototype. */
+#ifndef aio_cancel
+#define aio_cancel64 XXX
+#include <aio.h>
+/* And undo the hack. */
+#undef aio_cancel64
+#endif
+
+#include <kaio_misc.h>
+
+#ifndef USE_KAIO
+#include <aio_cancel.c>
+#else
+
+#include <assert.h>
+#include <errno.h>
+
+int
+aio_cancel (fildes, aiocbp)
+ int fildes;
+ struct aiocb *aiocbp;
+{
+ struct requestlist *req = NULL;
+ int result = AIO_ALLDONE;
+
+ /* If fildes is invalid, error. */
+ if (fcntl (fildes, F_GETFL) < 0)
+ {
+ __set_errno (EBADF);
+ return -1;
+ }
+
+ /* Request the mutex. */
+ pthread_mutex_lock (&__aio_requests_mutex);
+
+ /* We are asked to cancel a specific AIO request. */
+ if (aiocbp != NULL)
+ {
+ /* If the AIO request is not for this descriptor it has no value
+ to look for the request block. */
+ if (aiocbp->aio_fildes != fildes)
+ {
+ pthread_mutex_unlock (&__aio_requests_mutex);
+ __set_errno (EINVAL);
+ return -1;
+ }
+ else if (aiocbp->__error_code == EINPROGRESS)
+ {
+ struct requestlist *last = NULL;
+
+ req = __aio_find_req_fd (fildes);
+
+ if (req != NULL)
+ while (req->aiocbp != (aiocb_union *) aiocbp)
+ {
+ last = req;
+ req = req->next_prio;
+ if (req == NULL)
+ break;
+ }
+
+ if (req != NULL)
+ {
+ /* Don't remove the entry if a thread is already working on
+ it. */
+ if (req->running == allocated)
+ {
+ result = AIO_NOTCANCELED;
+ req = NULL;
+ }
+ else
+ {
+ /* We can remove the entry. */
+ __aio_remove_request (last, req, 0);
+
+ result = AIO_CANCELED;
+
+ req->next_prio = NULL;
+ }
+ }
+ else
+ {
+ /* Try kernel requests. */
+ req = __aio_find_req ((aiocb_union *) aiocbp);
+
+ if (req == NULL)
+ {
+ pthread_mutex_unlock (&__aio_requests_mutex);
+ __set_errno (EINVAL);
+ return -1;
+ }
+
+ assert (req->kioctx != KCTX_NONE);
+
+ /* If kernel is working on it, try cancelling it. */
+ if (req->running == allocated)
+ {
+ struct kio_event ev;
+ INTERNAL_SYSCALL_DECL (err);
+ int res;
+
+ res = INTERNAL_SYSCALL (io_cancel, err, 3, __aio_kioctx,
+ &req->kiocb, &ev);
+ if (INTERNAL_SYSCALL_ERROR_P (res, err))
+ {
+ result = AIO_NOTCANCELED;
+ req = NULL;
+ }
+ else
+ req->running = queued;
+ }
+ if (req != NULL)
+ {
+ /* We can remove the entry. */
+ __aio_remove_krequest (req);
+ result = AIO_CANCELED;
+ req->next_prio = NULL;
+ }
+ }
+ }
+ }
+ else
+ {
+ /* Find the beginning of the list of all requests for this
+ desriptor. */
+ req = __aio_find_req_fd (fildes);
+
+ /* If any request is worked on by a thread it must be the first.
+ So either we can delete all requests or all but the first. */
+ if (req != NULL)
+ {
+ if (req->running == allocated)
+ {
+ struct requestlist *old = req;
+ req = req->next_prio;
+ old->next_prio = NULL;
+
+ result = AIO_NOTCANCELED;
+
+ if (req != NULL)
+ __aio_remove_request (old, req, 1);
+ }
+ else
+ {
+ result = AIO_CANCELED;
+
+ /* We can remove the entry. */
+ __aio_remove_request (NULL, req, 1);
+ }
+ }
+
+ if (result != AIO_NOTCANCELED)
+ {
+ /* Try to cancel kernel requests. */
+ struct requestlist *kreq = __aio_find_kreq_fd (fildes);
+
+ while (kreq)
+ {
+ struct requestlist *next;
+
+ /* If kernel is working on it, try cancelling it. */
+ if (kreq->running == allocated)
+ {
+ struct kio_event ev;
+ INTERNAL_SYSCALL_DECL (err);
+ int res;
+
+ res = INTERNAL_SYSCALL (io_cancel, err, 3, __aio_kioctx,
+ &kreq->kiocb, &ev);
+ if (INTERNAL_SYSCALL_ERROR_P (res, err))
+ {
+ result = AIO_NOTCANCELED;
+ break;
+ }
+ else
+ kreq->running = queued;
+ }
+ next = kreq->next_prio;
+ __aio_remove_krequest (kreq);
+ result = AIO_CANCELED;
+ kreq->next_prio = NULL;
+ assert (kreq->running == yes || kreq->running == queued);
+ kreq->aiocbp->aiocb.__error_code = ECANCELED;
+ kreq->aiocbp->aiocb.__return_value = -1;
+ __aio_notify (kreq);
+ __aio_free_request (kreq);
+ kreq = next;
+ }
+ }
+ }
+
+ /* Mark requests as canceled and send signal. */
+ while (req != NULL)
+ {
+ struct requestlist *old = req;
+ assert (req->running == yes || req->running == queued);
+ req->aiocbp->aiocb.__error_code = ECANCELED;
+ req->aiocbp->aiocb.__return_value = -1;
+ __aio_notify (req);
+ req = req->next_prio;
+ __aio_free_request (old);
+ }
+
+ /* Release the mutex. */
+ pthread_mutex_unlock (&__aio_requests_mutex);
+
+ return result;
+}
+
+#ifndef aio_cancel
+weak_alias (aio_cancel, aio_cancel64)
+#endif
+
+#endif
diff --git a/rtkaio/sysdeps/unix/sysv/linux/kaio_error.c b/rtkaio/sysdeps/unix/sysv/linux/kaio_error.c
new file mode 100644
index 0000000000..23859c363a
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/kaio_error.c
@@ -0,0 +1,54 @@
+/* Return error status of asynchronous I/O request.
+ Copyright (C) 1997, 2002, 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+/* We use an UGLY hack to prevent gcc from finding us cheating. The
+ implementation of aio_error and aio_error64 are identical and so
+ we want to avoid code duplication by using aliases. But gcc sees
+ the different parameter lists and prints a warning. We define here
+ a function so that aio_error64 has no prototype. */
+#define aio_error64 XXX
+#include <aio.h>
+/* And undo the hack. */
+#undef aio_error64
+
+#include <kaio_misc.h>
+
+#ifndef USE_KAIO
+#include <aio_error.c>
+#else
+
+#include <errno.h>
+
+int
+aio_error (aiocbp)
+ const struct aiocb *aiocbp;
+{
+ int ret = aiocbp->__error_code;
+
+ if (ret == EINPROGRESS)
+ {
+ __aio_read_one_event ();
+ ret = aiocbp->__error_code;
+ }
+ return ret;
+}
+
+weak_alias (aio_error, aio_error64)
+#endif
diff --git a/rtkaio/sysdeps/unix/sysv/linux/kaio_fsync.c b/rtkaio/sysdeps/unix/sysv/linux/kaio_fsync.c
new file mode 100644
index 0000000000..d51bbe0135
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/kaio_fsync.c
@@ -0,0 +1,6 @@
+#define aio_fsync64 XXX
+#include <aio.h>
+/* And undo the hack. */
+#undef aio_fsync64
+#include <kaio_misc.h>
+#include <aio_fsync.c>
diff --git a/rtkaio/sysdeps/unix/sysv/linux/kaio_misc.c b/rtkaio/sysdeps/unix/sysv/linux/kaio_misc.c
new file mode 100644
index 0000000000..76e0c430b7
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/kaio_misc.c
@@ -0,0 +1,1130 @@
+/* Handle general operations.
+ Copyright (C) 1997,1998,1999,2000,2001,2002,2003,2006
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <kaio_misc.h>
+
+#ifndef USE_KAIO
+#include <aio_misc.c>
+#else
+
+#include <aio.h>
+#include <assert.h>
+#include <atomic.h>
+#include <errno.h>
+#include <limits.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/sysmacros.h>
+
+#ifndef aio_create_helper_thread
+# define aio_create_helper_thread __aio_create_helper_thread
+
+extern inline int
+__aio_create_helper_thread (pthread_t *threadp, void *(*tf) (void *), void *arg)
+{
+ pthread_attr_t attr;
+
+ /* Make sure the thread is created detached. */
+ pthread_attr_init (&attr);
+ pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
+
+ int ret = pthread_create (threadp, &attr, tf, arg);
+
+ (void) pthread_attr_destroy (&attr);
+ return ret;
+}
+
+#endif
+
+
+static void add_request_to_runlist (struct requestlist *newrequest)
+ internal_function;
+static int add_request_to_list (struct requestlist *newrequest, int fildes,
+ int prio)
+ internal_function;
+static void * handle_kernel_aio (void *arg);
+static void kernel_callback (kctx_t ctx, struct kiocb *kiocb, long res,
+ long res2);
+
+/* Pool of request list entries. */
+static struct requestlist **pool;
+
+/* Number of total and allocated pool entries. */
+static size_t pool_max_size;
+static size_t pool_size;
+
+/* Kernel AIO context. */
+kctx_t __aio_kioctx = KCTX_NONE;
+int __have_no_kernel_aio;
+int __kernel_thread_started;
+
+/* We implement a two dimensional array but allocate each row separately.
+ The macro below determines how many entries should be used per row.
+ It should better be a power of two. */
+#define ENTRIES_PER_ROW 32
+
+/* How many rows we allocate at once. */
+#define ROWS_STEP 8
+
+/* List of available entries. */
+static struct requestlist *freelist;
+
+/* List of request waiting to be processed. */
+static struct requestlist *runlist;
+
+/* Structure list of all currently processed requests. */
+static struct requestlist *requests, *krequests;
+
+/* Number of threads currently running. */
+static int nthreads;
+
+/* Number of threads waiting for work to arrive. */
+static int idle_thread_count;
+
+
+/* These are the values used to optimize the use of AIO. The user can
+ overwrite them by using the `aio_init' function. */
+static struct aioinit optim =
+{
+ 20, /* int aio_threads; Maximal number of threads. */
+ 64, /* int aio_num; Number of expected simultanious requests. */
+ 0,
+ 0,
+ 0,
+ 0,
+ 1,
+ 0
+};
+
+
+/* Since the list is global we need a mutex protecting it. */
+pthread_mutex_t __aio_requests_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+
+/* When you add a request to the list and there are idle threads present,
+ you signal this condition variable. When a thread finishes work, it waits
+ on this condition variable for a time before it actually exits. */
+pthread_cond_t __aio_new_request_notification = PTHREAD_COND_INITIALIZER;
+
+
+/* Functions to handle request list pool. */
+static struct requestlist *
+get_elem (void)
+{
+ struct requestlist *result;
+
+ if (freelist == NULL)
+ {
+ struct requestlist *new_row;
+ int cnt;
+
+ assert (sizeof (struct aiocb) == sizeof (struct aiocb64));
+
+ if (pool_size + 1 >= pool_max_size)
+ {
+ size_t new_max_size = pool_max_size + ROWS_STEP;
+ struct requestlist **new_tab;
+
+ new_tab = (struct requestlist **)
+ realloc (pool, new_max_size * sizeof (struct requestlist *));
+
+ if (new_tab == NULL)
+ return NULL;
+
+ pool_max_size = new_max_size;
+ pool = new_tab;
+ }
+
+ /* Allocate the new row. */
+ cnt = pool_size == 0 ? optim.aio_num : ENTRIES_PER_ROW;
+ new_row = (struct requestlist *) calloc (cnt,
+ sizeof (struct requestlist));
+ if (new_row == NULL)
+ return NULL;
+
+ pool[pool_size++] = new_row;
+
+ /* Put all the new entries in the freelist. */
+ do
+ {
+ new_row->next_prio = freelist;
+ freelist = new_row++;
+ }
+ while (--cnt > 0);
+ }
+
+ result = freelist;
+ freelist = freelist->next_prio;
+
+ return result;
+}
+
+
+void
+internal_function
+__aio_free_request (struct requestlist *elem)
+{
+ elem->running = no;
+ elem->next_prio = freelist;
+ freelist = elem;
+}
+
+
+struct requestlist *
+internal_function
+__aio_find_req (aiocb_union *elem)
+{
+ struct requestlist *runp;
+ int fildes = elem->aiocb.aio_fildes;
+ int i;
+
+ for (i = 0; i < 2; i++)
+ {
+ runp = i ? requests : krequests;
+
+ while (runp != NULL && runp->aiocbp->aiocb.aio_fildes < fildes)
+ runp = runp->next_fd;
+
+ if (runp != NULL)
+ {
+ if (runp->aiocbp->aiocb.aio_fildes != fildes)
+ runp = NULL;
+ else
+ while (runp != NULL && runp->aiocbp != elem)
+ runp = runp->next_prio;
+ if (runp != NULL)
+ return runp;
+ }
+ }
+
+ return NULL;
+}
+
+
+struct requestlist *
+internal_function
+__aio_find_req_fd (int fildes)
+{
+ struct requestlist *runp = requests;
+
+ while (runp != NULL && runp->aiocbp->aiocb.aio_fildes < fildes)
+ runp = runp->next_fd;
+
+ return (runp != NULL && runp->aiocbp->aiocb.aio_fildes == fildes
+ ? runp : NULL);
+}
+
+
+struct requestlist *
+internal_function
+__aio_find_kreq_fd (int fildes)
+{
+ struct requestlist *runp = krequests;
+
+ while (runp != NULL && runp->aiocbp->aiocb.aio_fildes < fildes)
+ runp = runp->next_fd;
+
+ return (runp != NULL && runp->aiocbp->aiocb.aio_fildes == fildes
+ ? runp : NULL);
+}
+
+
+void
+internal_function
+__aio_remove_request (struct requestlist *last, struct requestlist *req,
+ int all)
+{
+ assert (req->running == yes || req->running == queued
+ || req->running == done);
+ assert (req->kioctx == KCTX_NONE);
+
+ if (last != NULL)
+ last->next_prio = all ? NULL : req->next_prio;
+ else
+ {
+ if (all || req->next_prio == NULL)
+ {
+ if (req->last_fd != NULL)
+ req->last_fd->next_fd = req->next_fd;
+ else
+ requests = req->next_fd;
+ if (req->next_fd != NULL)
+ req->next_fd->last_fd = req->last_fd;
+ }
+ else
+ {
+ if (req->last_fd != NULL)
+ req->last_fd->next_fd = req->next_prio;
+ else
+ requests = req->next_prio;
+
+ if (req->next_fd != NULL)
+ req->next_fd->last_fd = req->next_prio;
+
+ req->next_prio->last_fd = req->last_fd;
+ req->next_prio->next_fd = req->next_fd;
+
+ /* Mark this entry as runnable. */
+ req->next_prio->running = yes;
+ }
+
+ if (req->running == yes)
+ {
+ struct requestlist *runp = runlist;
+
+ last = NULL;
+ while (runp != NULL)
+ {
+ if (runp == req)
+ {
+ if (last == NULL)
+ runlist = runp->next_run;
+ else
+ last->next_run = runp->next_run;
+ break;
+ }
+ last = runp;
+ runp = runp->next_run;
+ }
+ }
+ }
+}
+
+void
+internal_function
+__aio_remove_krequest (struct requestlist *req)
+{
+ assert (req->running == yes || req->running == queued
+ || req->running == done);
+ assert (req->kioctx != KCTX_NONE);
+
+ if (req->prev_prio != NULL)
+ {
+ req->prev_prio->next_prio = req->next_prio;
+ if (req->next_prio != NULL)
+ req->next_prio->prev_prio = req->prev_prio;
+ }
+ else if (req->next_prio == NULL)
+ {
+ if (req->last_fd != NULL)
+ req->last_fd->next_fd = req->next_fd;
+ else
+ krequests = req->next_fd;
+ if (req->next_fd != NULL)
+ req->next_fd->last_fd = req->last_fd;
+ }
+ else
+ {
+ if (req->last_fd != NULL)
+ req->last_fd->next_fd = req->next_prio;
+ else
+ krequests = req->next_prio;
+ if (req->next_fd != NULL)
+ req->next_fd->last_fd = req->next_prio;
+
+ req->next_prio->prev_prio = NULL;
+ req->next_prio->last_fd = req->last_fd;
+ req->next_prio->next_fd = req->next_fd;
+ }
+}
+
+
+/* The thread handler. */
+static void *handle_fildes_io (void *arg);
+static int wait_for_kernel_requests (int fildes);
+
+
+/* User optimization. */
+void
+__aio_init (const struct aioinit *init)
+{
+ /* Get the mutex. */
+ pthread_mutex_lock (&__aio_requests_mutex);
+
+ /* Only allow writing new values if the table is not yet allocated. */
+ if (pool == NULL)
+ {
+ optim.aio_threads = init->aio_threads < 1 ? 1 : init->aio_threads;
+ optim.aio_num = (init->aio_num < ENTRIES_PER_ROW
+ ? ENTRIES_PER_ROW
+ : init->aio_num & ~ENTRIES_PER_ROW);
+ }
+
+ if (init->aio_idle_time != 0)
+ optim.aio_idle_time = init->aio_idle_time;
+
+ /* Release the mutex. */
+ pthread_mutex_unlock (&__aio_requests_mutex);
+}
+weak_alias (__aio_init, aio_init)
+
+static void
+kernel_callback (kctx_t ctx, struct kiocb *kiocb, long res, long res2)
+{
+ struct requestlist *req = (struct requestlist *)kiocb;
+ long errcode = 0;
+
+ if (res < 0 && res > -1000)
+ {
+ errcode = -res;
+ res = -1;
+ }
+ req->aiocbp->aiocb.__return_value = res;
+ atomic_write_barrier ();
+ req->aiocbp->aiocb.__error_code = errcode;
+ __aio_notify (req);
+ assert (req->running == allocated);
+ req->running = done;
+ __aio_remove_krequest (req);
+ __aio_free_request (req);
+}
+
+void
+internal_function
+__aio_read_one_event (void)
+{
+ struct kio_event ev[10];
+ struct timespec ts;
+ int count, i;
+
+ if (__aio_kioctx == KCTX_NONE)
+ return;
+ ts.tv_sec = 0;
+ ts.tv_nsec = 0;
+ do
+ {
+ INTERNAL_SYSCALL_DECL (err);
+ count = INTERNAL_SYSCALL (io_getevents, err, 5, __aio_kioctx, 0, 10,
+ ev, &ts);
+ if (INTERNAL_SYSCALL_ERROR_P (count, err) || count == 0)
+ break;
+ pthread_mutex_lock (&__aio_requests_mutex);
+ for (i = 0; i < count; i++)
+ {
+ void (*cb)(kctx_t, struct kiocb *, long, long);
+
+ cb = (void *) (uintptr_t) ev[i].kioe_data;
+ cb (__aio_kioctx, (struct kiocb *) (uintptr_t) ev[i].kioe_obj,
+ ev[i].kioe_res, ev[i].kioe_res2);
+ }
+ pthread_mutex_unlock (&__aio_requests_mutex);
+ }
+ while (count == 10);
+}
+
+int
+internal_function
+__aio_wait_for_events (kctx_t kctx, const struct timespec *timespec)
+{
+ int ret, i;
+ struct kio_event ev[10];
+ struct timespec ts;
+ INTERNAL_SYSCALL_DECL (err);
+
+ pthread_mutex_unlock (&__aio_requests_mutex);
+ ts.tv_sec = 0;
+ ts.tv_nsec = 0;
+ do
+ {
+ ret = INTERNAL_SYSCALL (io_getevents, err, 5, kctx, 1, 10, ev,
+ timespec);
+ if (INTERNAL_SYSCALL_ERROR_P (ret, err) || ret == 0)
+ break;
+
+ pthread_mutex_lock (&__aio_requests_mutex);
+ for (i = 0; i < ret; i++)
+ {
+ void (*cb)(kctx_t, struct kiocb *, long, long);
+
+ cb = (void *) (uintptr_t) ev[i].kioe_data;
+ cb (kctx, (struct kiocb *) (uintptr_t) ev[i].kioe_obj,
+ ev[i].kioe_res, ev[i].kioe_res2);
+ }
+ if (ret < 10)
+ return 0;
+ pthread_mutex_unlock (&__aio_requests_mutex);
+ timespec = &ts;
+ }
+ while (1);
+
+ pthread_mutex_lock (&__aio_requests_mutex);
+ return (timespec != &ts
+ && INTERNAL_SYSCALL_ERROR_P (ret, err)
+ && INTERNAL_SYSCALL_ERRNO (ret, err) == ETIMEDOUT) ? ETIMEDOUT : 0;
+}
+
+int
+internal_function
+__aio_create_kernel_thread (void)
+{
+ pthread_t thid;
+
+ if (__kernel_thread_started)
+ return 0;
+
+ if (aio_create_helper_thread (&thid, handle_kernel_aio, NULL) != 0)
+ return -1;
+ __kernel_thread_started = 1;
+ return 0;
+}
+
+static void *
+handle_kernel_aio (void *arg __attribute__((unused)))
+{
+ int ret, i;
+ INTERNAL_SYSCALL_DECL (err);
+ struct kio_event ev[10];
+
+ for (;;)
+ {
+ ret = INTERNAL_SYSCALL (io_getevents, err, 5, __aio_kioctx, 1, 10, ev,
+ NULL);
+ if (INTERNAL_SYSCALL_ERROR_P (ret, err) || ret == 0)
+ continue;
+ pthread_mutex_lock (&__aio_requests_mutex);
+ for (i = 0; i < ret; i++)
+ {
+ void (*cb)(kctx_t, struct kiocb *, long, long);
+
+ cb = (void *) (uintptr_t) ev[i].kioe_data;
+ cb (__aio_kioctx, (struct kiocb *) (uintptr_t) ev[i].kioe_obj,
+ ev[i].kioe_res, ev[i].kioe_res2);
+ }
+ pthread_mutex_unlock (&__aio_requests_mutex);
+ }
+ return NULL;
+}
+
+static int
+internal_function
+add_request_to_list (struct requestlist *newp, int fildes, int prio)
+{
+ struct requestlist *last, *runp, *reqs;
+
+ last = NULL;
+ reqs = newp->kioctx != KCTX_NONE ? krequests : requests;
+ runp = reqs;
+
+ /* First look whether the current file descriptor is currently
+ worked with. */
+ while (runp != NULL
+ && runp->aiocbp->aiocb.aio_fildes < fildes)
+ {
+ last = runp;
+ runp = runp->next_fd;
+ }
+
+ if (runp != NULL
+ && runp->aiocbp->aiocb.aio_fildes == fildes)
+ {
+ /* The current file descriptor is worked on. It makes no sense
+ to start another thread since this new thread would fight
+ with the running thread for the resources. But we also cannot
+ say that the thread processing this desriptor shall immediately
+ after finishing the current job process this request if there
+ are other threads in the running queue which have a higher
+ priority. */
+
+ /* Simply enqueue it after the running one according to the
+ priority. */
+ while (runp->next_prio != NULL
+ && runp->next_prio->aiocbp->aiocb.__abs_prio >= prio)
+ runp = runp->next_prio;
+
+ newp->next_prio = runp->next_prio;
+ runp->next_prio = newp;
+ if (newp->kioctx != KCTX_NONE)
+ {
+ newp->prev_prio = runp;
+ if (newp->next_prio != NULL)
+ newp->next_prio->prev_prio = newp;
+ }
+ return queued;
+ }
+ else
+ {
+ /* Enqueue this request for a new descriptor. */
+ if (last == NULL)
+ {
+ newp->last_fd = NULL;
+ newp->next_fd = reqs;
+ if (reqs != NULL)
+ reqs->last_fd = newp;
+ if (newp->kioctx != KCTX_NONE)
+ krequests = newp;
+ else
+ requests = newp;
+ }
+ else
+ {
+ newp->next_fd = last->next_fd;
+ newp->last_fd = last;
+ last->next_fd = newp;
+ if (newp->next_fd != NULL)
+ newp->next_fd->last_fd = newp;
+ }
+
+ newp->next_prio = NULL;
+ if (newp->kioctx != KCTX_NONE)
+ newp->prev_prio = NULL;
+ return yes;
+ }
+}
+
+static int
+internal_function
+__aio_enqueue_user_request (struct requestlist *newp)
+{
+ int result = 0;
+ int running = add_request_to_list (newp, newp->aiocbp->aiocb.aio_fildes,
+ newp->aiocbp->aiocb.__abs_prio);
+
+ if (running == yes)
+ {
+ /* We try to create a new thread for this file descriptor. The
+ function which gets called will handle all available requests
+ for this descriptor and when all are processed it will
+ terminate.
+
+ If no new thread can be created or if the specified limit of
+ threads for AIO is reached we queue the request. */
+
+ /* See if we need to and are able to create a thread. */
+ if (nthreads < optim.aio_threads && idle_thread_count == 0)
+ {
+ pthread_t thid;
+
+ running = newp->running = allocated;
+
+ /* Now try to start a thread. */
+ if (aio_create_helper_thread (&thid, handle_fildes_io, newp) == 0)
+ /* We managed to enqueue the request. All errors which can
+ happen now can be recognized by calls to `aio_return' and
+ `aio_error'. */
+ ++nthreads;
+ else
+ {
+ /* Reset the running flag. The new request is not running. */
+ running = newp->running = yes;
+
+ if (nthreads == 0)
+ /* We cannot create a thread in the moment and there is
+ also no thread running. This is a problem. `errno' is
+ set to EAGAIN if this is only a temporary problem. */
+ result = -1;
+ }
+ }
+ }
+
+ /* Enqueue the request in the run queue if it is not yet running. */
+ if (running == yes && result == 0)
+ {
+ add_request_to_runlist (newp);
+
+ /* If there is a thread waiting for work, then let it know that we
+ have just given it something to do. */
+ if (idle_thread_count > 0)
+ pthread_cond_signal (&__aio_new_request_notification);
+ }
+
+ if (result == 0)
+ newp->running = running;
+ return result;
+}
+
+/* The main function of the async I/O handling. It enqueues requests
+ and if necessary starts and handles threads. */
+struct requestlist *
+internal_function
+__aio_enqueue_request_ctx (aiocb_union *aiocbp, int operation, kctx_t kctx)
+{
+ int policy, prio;
+ struct sched_param param;
+ struct requestlist *newp;
+ int op = (operation & 0xffff);
+
+ if (op == LIO_SYNC || op == LIO_DSYNC)
+ {
+ aiocbp->aiocb.aio_reqprio = 0;
+ /* FIXME: Kernel doesn't support sync yet. */
+ operation &= ~LIO_KTHREAD;
+ kctx = KCTX_NONE;
+ }
+ else if (aiocbp->aiocb.aio_reqprio < 0
+ || aiocbp->aiocb.aio_reqprio > AIO_PRIO_DELTA_MAX)
+ {
+ /* Invalid priority value. */
+ __set_errno (EINVAL);
+ aiocbp->aiocb.__error_code = EINVAL;
+ aiocbp->aiocb.__return_value = -1;
+ return NULL;
+ }
+
+ if ((operation & LIO_KTHREAD) || kctx != KCTX_NONE)
+ {
+ /* io_* is only really asynchronous for O_DIRECT or /dev/raw*. */
+ int fl = __fcntl (aiocbp->aiocb.aio_fildes, F_GETFL);
+ if (fl < 0 || (fl & O_DIRECT) == 0)
+ {
+ struct stat64 st;
+ if (__fxstat64 (_STAT_VER, aiocbp->aiocb.aio_fildes, &st) < 0
+ || ! S_ISCHR (st.st_mode)
+ || major (st.st_rdev) != 162)
+ {
+ operation &= ~LIO_KTHREAD;
+ kctx = KCTX_NONE;
+ }
+ }
+ }
+
+ /* Compute priority for this request. */
+ pthread_getschedparam (pthread_self (), &policy, &param);
+ prio = param.sched_priority - aiocbp->aiocb.aio_reqprio;
+
+ /* Get the mutex. */
+ pthread_mutex_lock (&__aio_requests_mutex);
+
+ if (operation & LIO_KTHREAD)
+ {
+ if (__aio_kioctx == KCTX_NONE && !__have_no_kernel_aio)
+ {
+ int res;
+ INTERNAL_SYSCALL_DECL (err);
+
+ __aio_kioctx = 0;
+ do
+ res = INTERNAL_SYSCALL (io_setup, err, 2, 1024, &__aio_kioctx);
+ while (INTERNAL_SYSCALL_ERROR_P (res, err)
+ && INTERNAL_SYSCALL_ERRNO (res, err) == EINTR);
+ if (INTERNAL_SYSCALL_ERROR_P (res, err))
+ {
+ __have_no_kernel_aio = 1;
+ __aio_kioctx = KCTX_NONE;
+ }
+ }
+
+ kctx = __aio_kioctx;
+
+ if (kctx != KCTX_NONE && !__kernel_thread_started
+ && ((operation & LIO_KTHREAD_REQUIRED)
+ || aiocbp->aiocb.aio_sigevent.sigev_notify != SIGEV_NONE))
+ {
+ if (__aio_create_kernel_thread () < 0)
+ kctx = KCTX_NONE;
+ }
+ }
+
+ /* Get a new element for the waiting list. */
+ newp = get_elem ();
+ if (newp == NULL)
+ {
+ pthread_mutex_unlock (&__aio_requests_mutex);
+ __set_errno (EAGAIN);
+ return NULL;
+ }
+ newp->aiocbp = aiocbp;
+#ifdef BROKEN_THREAD_SIGNALS
+ newp->caller_pid = (aiocbp->aiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL
+ ? getpid () : 0);
+#endif
+ newp->waiting = NULL;
+ newp->kioctx = kctx;
+
+ aiocbp->aiocb.__abs_prio = prio;
+ aiocbp->aiocb.__policy = policy;
+ aiocbp->aiocb.aio_lio_opcode = op;
+ aiocbp->aiocb.__error_code = EINPROGRESS;
+ aiocbp->aiocb.__return_value = 0;
+
+ if (newp->kioctx != KCTX_NONE)
+ {
+ int res;
+ INTERNAL_SYSCALL_DECL (err);
+
+ aiocb_union *aiocbp = newp->aiocbp;
+ struct kiocb *kiocbs[] __attribute__((unused)) = { &newp->kiocb };
+
+ newp->kiocb.kiocb_data = (uintptr_t) kernel_callback;
+ switch (op & 127)
+ {
+ case LIO_READ: newp->kiocb.kiocb_lio_opcode = IO_CMD_PREAD; break;
+ case LIO_WRITE: newp->kiocb.kiocb_lio_opcode = IO_CMD_PWRITE; break;
+ case LIO_SYNC:
+ case LIO_DSYNC: newp->kiocb.kiocb_lio_opcode = IO_CMD_FSYNC; break;
+ }
+ if (op & 128)
+ newp->kiocb.kiocb_offset = aiocbp->aiocb64.aio_offset;
+ else
+ newp->kiocb.kiocb_offset = aiocbp->aiocb.aio_offset;
+ newp->kiocb.kiocb_fildes = aiocbp->aiocb.aio_fildes;
+ newp->kiocb.kiocb_buf = (uintptr_t) aiocbp->aiocb.aio_buf;
+ newp->kiocb.kiocb_nbytes = aiocbp->aiocb.aio_nbytes;
+ /* FIXME. */
+ newp->kiocb.kiocb_req_prio = 0;
+ res = INTERNAL_SYSCALL (io_submit, err, 3, newp->kioctx, 1, kiocbs);
+ if (! INTERNAL_SYSCALL_ERROR_P (res, err))
+ {
+ newp->running = allocated;
+ add_request_to_list (newp, aiocbp->aiocb.aio_fildes, prio);
+ /* Release the mutex. */
+ pthread_mutex_unlock (&__aio_requests_mutex);
+ return newp;
+ }
+ newp->kioctx = KCTX_NONE;
+ }
+
+ if (__aio_enqueue_user_request (newp))
+ {
+ /* Something went wrong. */
+ __aio_free_request (newp);
+ newp = NULL;
+ }
+
+ /* Release the mutex. */
+ pthread_mutex_unlock (&__aio_requests_mutex);
+
+ return newp;
+}
+
+
+static int
+wait_for_kernel_requests (int fildes)
+{
+ pthread_mutex_lock (&__aio_requests_mutex);
+
+ struct requestlist *kreq = __aio_find_kreq_fd (fildes), *req;
+ int nent = 0;
+ int ret = 0;
+
+ req = kreq;
+ while (req)
+ {
+ if (req->running == allocated)
+ ++nent;
+ req = req->next_prio;
+ }
+
+ if (nent)
+ {
+ if (__aio_create_kernel_thread () < 0)
+ {
+ pthread_mutex_unlock (&__aio_requests_mutex);
+ return -1;
+ }
+
+#ifndef DONT_NEED_AIO_MISC_COND
+ pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+#endif
+ struct waitlist waitlist[nent];
+ int cnt = 0;
+
+ while (kreq)
+ {
+ if (kreq->running == allocated)
+ {
+#ifndef DONT_NEED_AIO_MISC_COND
+ waitlist[cnt].cond = &cond;
+#endif
+ waitlist[cnt].result = NULL;
+ waitlist[cnt].next = kreq->waiting;
+ waitlist[cnt].counterp = &nent;
+ waitlist[cnt].sigevp = NULL;
+#ifdef BROKEN_THREAD_SIGNALS
+ waitlist[cnt].caller_pid = 0; /* Not needed. */
+#endif
+ kreq->waiting = &waitlist[cnt++];
+ }
+ kreq = kreq->next_prio;
+ }
+
+#ifdef DONT_NEED_AIO_MISC_COND
+ AIO_MISC_WAIT (ret, nent, NULL, 0);
+#else
+ do
+ pthread_cond_wait (&cond, &__aio_requests_mutex);
+ while (nent);
+
+ pthread_cond_destroy (&cond);
+#endif
+ }
+
+ pthread_mutex_unlock (&__aio_requests_mutex);
+ return ret;
+}
+
+
+static void *
+handle_fildes_io (void *arg)
+{
+ pthread_t self = pthread_self ();
+ struct sched_param param;
+ struct requestlist *runp = (struct requestlist *) arg;
+ aiocb_union *aiocbp;
+ int policy;
+ int fildes;
+
+ pthread_getschedparam (self, &policy, &param);
+
+ do
+ {
+ /* If runp is NULL, then we were created to service the work queue
+ in general, not to handle any particular request. In that case we
+ skip the "do work" stuff on the first pass, and go directly to the
+ "get work off the work queue" part of this loop, which is near the
+ end. */
+ if (runp == NULL)
+ pthread_mutex_lock (&__aio_requests_mutex);
+ else
+ {
+ /* Hopefully this request is marked as running. */
+ assert (runp->running == allocated);
+
+ /* Update our variables. */
+ aiocbp = runp->aiocbp;
+ fildes = aiocbp->aiocb.aio_fildes;
+
+ /* Change the priority to the requested value (if necessary). */
+ if (aiocbp->aiocb.__abs_prio != param.sched_priority
+ || aiocbp->aiocb.__policy != policy)
+ {
+ param.sched_priority = aiocbp->aiocb.__abs_prio;
+ policy = aiocbp->aiocb.__policy;
+ pthread_setschedparam (self, policy, &param);
+ }
+
+ /* Process request pointed to by RUNP. We must not be disturbed
+ by signals. */
+ if ((aiocbp->aiocb.aio_lio_opcode & 127) == LIO_READ)
+ {
+ if (aiocbp->aiocb.aio_lio_opcode & 128)
+ aiocbp->aiocb.__return_value =
+ TEMP_FAILURE_RETRY (__pread64 (fildes, (void *)
+ aiocbp->aiocb64.aio_buf,
+ aiocbp->aiocb64.aio_nbytes,
+ aiocbp->aiocb64.aio_offset));
+ else
+ aiocbp->aiocb.__return_value =
+ TEMP_FAILURE_RETRY (pread (fildes,
+ (void *) aiocbp->aiocb.aio_buf,
+ aiocbp->aiocb.aio_nbytes,
+ aiocbp->aiocb.aio_offset));
+
+ if (aiocbp->aiocb.__return_value == -1 && errno == ESPIPE)
+ /* The Linux kernel is different from others. It returns
+ ESPIPE if using pread on a socket. Other platforms
+ simply ignore the offset parameter and behave like
+ read. */
+ aiocbp->aiocb.__return_value =
+ TEMP_FAILURE_RETRY (read (fildes,
+ (void *) aiocbp->aiocb64.aio_buf,
+ aiocbp->aiocb64.aio_nbytes));
+ }
+ else if ((aiocbp->aiocb.aio_lio_opcode & 127) == LIO_WRITE)
+ {
+ if (aiocbp->aiocb.aio_lio_opcode & 128)
+ aiocbp->aiocb.__return_value =
+ TEMP_FAILURE_RETRY (__pwrite64 (fildes, (const void *)
+ aiocbp->aiocb64.aio_buf,
+ aiocbp->aiocb64.aio_nbytes,
+ aiocbp->aiocb64.aio_offset));
+ else
+ aiocbp->aiocb.__return_value =
+ TEMP_FAILURE_RETRY (__libc_pwrite (fildes, (const void *)
+ aiocbp->aiocb.aio_buf,
+ aiocbp->aiocb.aio_nbytes,
+ aiocbp->aiocb.aio_offset));
+
+ if (aiocbp->aiocb.__return_value == -1 && errno == ESPIPE)
+ /* The Linux kernel is different from others. It returns
+ ESPIPE if using pwrite on a socket. Other platforms
+ simply ignore the offset parameter and behave like
+ write. */
+ aiocbp->aiocb.__return_value =
+ TEMP_FAILURE_RETRY (write (fildes,
+ (void *) aiocbp->aiocb64.aio_buf,
+ aiocbp->aiocb64.aio_nbytes));
+ }
+ else if (aiocbp->aiocb.aio_lio_opcode == LIO_DSYNC
+ || aiocbp->aiocb.aio_lio_opcode == LIO_SYNC)
+ {
+ if (wait_for_kernel_requests (fildes) < 0)
+ {
+ aiocbp->aiocb.__return_value = -1;
+ __set_errno (ENOMEM);
+ }
+ else if (aiocbp->aiocb.aio_lio_opcode == LIO_DSYNC)
+ aiocbp->aiocb.__return_value =
+ TEMP_FAILURE_RETRY (fdatasync (fildes));
+ else
+ aiocbp->aiocb.__return_value =
+ TEMP_FAILURE_RETRY (fsync (fildes));
+ }
+ else
+ {
+ /* This is an invalid opcode. */
+ aiocbp->aiocb.__return_value = -1;
+ __set_errno (EINVAL);
+ }
+
+ /* Get the mutex. */
+ pthread_mutex_lock (&__aio_requests_mutex);
+
+ /* In theory we would need here a write memory barrier since the
+ callers test using aio_error() whether the request finished
+ and once this value != EINPROGRESS the field __return_value
+ must be committed to memory.
+
+ But since the pthread_mutex_lock call involves write memory
+ barriers as well it is not necessary. */
+
+ if (aiocbp->aiocb.__return_value == -1)
+ aiocbp->aiocb.__error_code = errno;
+ else
+ aiocbp->aiocb.__error_code = 0;
+
+ /* Send the signal to notify about finished processing of the
+ request. */
+ __aio_notify (runp);
+
+ /* For debugging purposes we reset the running flag of the
+ finished request. */
+ assert (runp->running == allocated);
+ runp->running = done;
+
+ /* Now dequeue the current request. */
+ __aio_remove_request (NULL, runp, 0);
+ if (runp->next_prio != NULL)
+ add_request_to_runlist (runp->next_prio);
+
+ /* Free the old element. */
+ __aio_free_request (runp);
+ }
+
+ runp = runlist;
+
+ /* If the runlist is empty, then we sleep for a while, waiting for
+ something to arrive in it. */
+ if (runp == NULL && optim.aio_idle_time >= 0)
+ {
+ struct timeval now;
+ struct timespec wakeup_time;
+
+ ++idle_thread_count;
+ gettimeofday (&now, NULL);
+ wakeup_time.tv_sec = now.tv_sec + optim.aio_idle_time;
+ wakeup_time.tv_nsec = now.tv_usec * 1000;
+ if (wakeup_time.tv_nsec > 1000000000)
+ {
+ wakeup_time.tv_nsec -= 1000000000;
+ ++wakeup_time.tv_sec;
+ }
+ pthread_cond_timedwait (&__aio_new_request_notification,
+ &__aio_requests_mutex,
+ &wakeup_time);
+ --idle_thread_count;
+ runp = runlist;
+ }
+
+ if (runp == NULL)
+ --nthreads;
+ else
+ {
+ assert (runp->running == yes);
+ runp->running = allocated;
+ runlist = runp->next_run;
+
+ /* If we have a request to process, and there's still another in
+ the run list, then we need to either wake up or create a new
+ thread to service the request that is still in the run list. */
+ if (runlist != NULL)
+ {
+ /* There are at least two items in the work queue to work on.
+ If there are other idle threads, then we should wake them
+ up for these other work elements; otherwise, we should try
+ to create a new thread. */
+ if (idle_thread_count > 0)
+ pthread_cond_signal (&__aio_new_request_notification);
+ else if (nthreads < optim.aio_threads)
+ {
+ pthread_t thid;
+
+ /* Now try to start a thread. If we fail, no big deal,
+ because we know that there is at least one thread (us)
+ that is working on AIO operations. */
+ if (aio_create_helper_thread (&thid, handle_fildes_io, NULL)
+ == 0)
+ ++nthreads;
+ }
+ }
+ }
+
+ /* Release the mutex. */
+ pthread_mutex_unlock (&__aio_requests_mutex);
+ }
+ while (runp != NULL);
+
+ return NULL;
+}
+
+
+/* Free allocated resources. */
+libc_freeres_fn (free_res)
+{
+ size_t row;
+
+ for (row = 0; row < pool_max_size; ++row)
+ free (pool[row]);
+
+ free (pool);
+}
+
+
+/* Add newrequest to the runlist. The __abs_prio flag of newrequest must
+ be correctly set to do this. Also, you had better set newrequest's
+ "running" flag to "yes" before you release your lock or you'll throw an
+ assertion. */
+static void
+internal_function
+add_request_to_runlist (struct requestlist *newrequest)
+{
+ int prio = newrequest->aiocbp->aiocb.__abs_prio;
+ struct requestlist *runp;
+
+ if (runlist == NULL || runlist->aiocbp->aiocb.__abs_prio < prio)
+ {
+ newrequest->next_run = runlist;
+ runlist = newrequest;
+ }
+ else
+ {
+ runp = runlist;
+
+ while (runp->next_run != NULL
+ && runp->next_run->aiocbp->aiocb.__abs_prio >= prio)
+ runp = runp->next_run;
+
+ newrequest->next_run = runp->next_run;
+ runp->next_run = newrequest;
+ }
+}
+#endif
diff --git a/rtkaio/sysdeps/unix/sysv/linux/kaio_misc.h b/rtkaio/sysdeps/unix/sysv/linux/kaio_misc.h
new file mode 100644
index 0000000000..5e0ca19c31
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/kaio_misc.h
@@ -0,0 +1,328 @@
+/* Copyright (C) 1997,1999,2000,2001,2002,2003,2006
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#ifndef _AIO_MISC_H
+
+#include <sysdep.h>
+
+#if !defined __NR_io_setup || !defined __NR_io_destroy \
+ || !defined __NR_io_getevents || !defined __NR_io_submit \
+ || !defined __NR_io_cancel
+
+#include <aio_misc.h>
+
+#else
+
+#define _AIO_MISC_H 1
+#define USE_KAIO 1
+
+#include <aio.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <signal.h>
+#include <sysdep.h>
+#include <limits.h>
+
+#ifdef HAVE_FORCED_UNWIND
+
+/* We define a special synchronization primitive for AIO. POSIX
+ conditional variables would be ideal but the pthread_cond_*wait
+ operations do not return on EINTR. This is a requirement for
+ correct aio_suspend and lio_listio implementations. */
+
+#include <assert.h>
+#include <pthreadP.h>
+#include <lowlevellock.h>
+
+# define DONT_NEED_AIO_MISC_COND 1
+
+# define AIO_MISC_NOTIFY(waitlist) \
+ do { \
+ if (*waitlist->counterp > 0 && --*waitlist->counterp == 0) \
+ lll_futex_wake (waitlist->counterp, 1); \
+ } while (0)
+
+# define AIO_MISC_WAIT(result, futex, timeout, cancel) \
+ do { \
+ volatile int *futexaddr = &futex; \
+ int oldval = futex; \
+ \
+ if (oldval != 0) \
+ { \
+ pthread_mutex_unlock (&__aio_requests_mutex); \
+ \
+ int oldtype; \
+ if (cancel) \
+ oldtype = LIBC_CANCEL_ASYNC (); \
+ \
+ int status; \
+ do \
+ { \
+ status = lll_futex_timed_wait (futexaddr, oldval, timeout); \
+ if (status != -EWOULDBLOCK) \
+ break; \
+ \
+ oldval = *futexaddr; \
+ } \
+ while (oldval != 0); \
+ \
+ if (cancel) \
+ LIBC_CANCEL_RESET (oldtype); \
+ \
+ if (status == -EINTR) \
+ result = EINTR; \
+ else if (status == -ETIMEDOUT) \
+ result = EAGAIN; \
+ else \
+ assert (status == 0 || status == -EWOULDBLOCK); \
+ \
+ pthread_mutex_lock (&__aio_requests_mutex); \
+ } \
+ } while (0)
+
+#endif
+
+typedef unsigned long kctx_t;
+#define KCTX_NONE ~0UL
+extern kctx_t __aio_kioctx;
+
+enum
+{
+ IO_CMD_PREAD,
+ IO_CMD_PWRITE,
+ IO_CMD_FSYNC,
+ IO_CMD_FDSYNC,
+ IO_CMD_PREADX,
+ IO_CMD_POLL
+};
+
+struct kiocb
+{
+ uint64_t kiocb_data;
+ uint64_t kiocb_key;
+ uint16_t kiocb_lio_opcode;
+ int16_t kiocb_req_prio;
+ uint32_t kiocb_fildes;
+ uint64_t kiocb_buf;
+ uint64_t kiocb_nbytes;
+ int64_t kiocb_offset;
+ int64_t __pad3, __pad4;
+};
+
+struct kio_event
+{
+ uint64_t kioe_data;
+ uint64_t kioe_obj;
+ int64_t kioe_res;
+ int64_t kioe_res2;
+};
+
+/* Extend the operation enum. */
+enum
+{
+ LIO_DSYNC = LIO_NOP + 1,
+ LIO_SYNC,
+ LIO_READ64 = LIO_READ | 128,
+ LIO_WRITE64 = LIO_WRITE | 128,
+ LIO_KTHREAD = 0x10000,
+ LIO_KTHREAD_REQUIRED = 0x20000
+};
+
+
+/* Union of the two request types. */
+typedef union
+ {
+ struct aiocb aiocb;
+ struct aiocb64 aiocb64;
+ } aiocb_union;
+
+
+/* Used to synchronize. */
+struct waitlist
+ {
+ struct waitlist *next;
+
+ /* The next two fields is used in synchronous io_listio' operations. */
+#ifndef DONT_NEED_AIO_MISC_COND
+ pthread_cond_t *cond;
+#endif
+ int *result;
+
+ volatile int *counterp;
+ /* The next field is used in asynchronous `lio_listio' operations. */
+ struct sigevent *sigevp;
+#ifdef BROKEN_THREAD_SIGNALS
+ /* XXX See requestlist, it's used to work around the broken signal
+ handling in Linux. */
+ pid_t caller_pid;
+#endif
+ };
+
+
+/* Status of a request. */
+enum
+{
+ no,
+ queued,
+ yes,
+ allocated,
+ done
+};
+
+
+/* Used to queue requests.. */
+struct requestlist
+ {
+ struct kiocb kiocb;
+ kctx_t kioctx;
+
+ int running;
+
+ struct requestlist *last_fd;
+ struct requestlist *next_fd;
+ struct requestlist *next_prio;
+ struct requestlist *next_run;
+ /* For kioctx != KCTX_NONE requests we are doubly linked. */
+#define prev_prio next_run
+
+ /* Pointer to the actual data. */
+ aiocb_union *aiocbp;
+
+#ifdef BROKEN_THREAD_SIGNALS
+ /* PID of the initiator thread.
+ XXX This is only necessary for the broken signal handling on Linux. */
+ pid_t caller_pid;
+#endif
+
+ /* List of waiting processes. */
+ struct waitlist *waiting;
+ };
+
+
+/* Lock for global I/O list of requests. */
+extern pthread_mutex_t __aio_requests_mutex attribute_hidden;
+
+
+/* Enqueue request. */
+extern struct requestlist *__aio_enqueue_request_ctx (aiocb_union *aiocbp,
+ int operation,
+ kctx_t kctx)
+ attribute_hidden internal_function;
+
+#define __aio_enqueue_request(aiocbp, operation) \
+ __aio_enqueue_request_ctx (aiocbp, operation | LIO_KTHREAD, KCTX_NONE)
+
+/* Find request entry for given AIO control block. */
+extern struct requestlist *__aio_find_req (aiocb_union *elem)
+ attribute_hidden internal_function;
+
+/* Find request entry for given file descriptor. */
+extern struct requestlist *__aio_find_req_fd (int fildes)
+ attribute_hidden internal_function;
+
+/* Find request entry for given file descriptor. */
+extern struct requestlist *__aio_find_kreq_fd (int fildes)
+ attribute_hidden internal_function;
+
+/* Remove request from the list. */
+extern void __aio_remove_request (struct requestlist *last,
+ struct requestlist *req, int all)
+ attribute_hidden internal_function;
+
+extern void __aio_remove_krequest (struct requestlist *req)
+ attribute_hidden internal_function;
+
+/* Release the entry for the request. */
+extern void __aio_free_request (struct requestlist *req)
+ attribute_hidden internal_function;
+
+/* Notify initiator of request and tell this everybody listening. */
+extern void __aio_notify (struct requestlist *req)
+ attribute_hidden internal_function;
+
+/* Notify initiator of request. */
+#ifdef BROKEN_THREAD_SIGNALS
+extern int __aio_notify_only (struct sigevent *sigev, pid_t caller_pid)
+ attribute_hidden internal_function;
+#else
+extern int __aio_notify_only (struct sigevent *sigev)
+ attribute_hidden internal_function;
+#endif
+
+/* Send the signal. */
+extern int __aio_sigqueue (int sig, const union sigval val, pid_t caller_pid)
+ attribute_hidden internal_function;
+
+extern int __aio_wait_for_events (kctx_t kctx, const struct timespec *timeout)
+ attribute_hidden internal_function;
+
+extern void __aio_read_one_event (void) attribute_hidden internal_function;
+
+extern int __aio_create_kernel_thread (void)
+ attribute_hidden internal_function;
+
+extern int __have_no_kernel_aio attribute_hidden;
+extern int __kernel_thread_started attribute_hidden;
+
+#ifndef BROKEN_THREAD_SIGNALS
+# define aio_start_notify_thread __aio_start_notify_thread
+# define aio_create_helper_thread __aio_create_helper_thread
+
+extern inline void
+__aio_start_notify_thread (void)
+{
+ sigset_t ss;
+ sigemptyset (&ss);
+ INTERNAL_SYSCALL_DECL (err);
+ INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_SETMASK, &ss, NULL, _NSIG / 8);
+}
+
+extern inline int
+__aio_create_helper_thread (pthread_t *threadp, void *(*tf) (void *), void *arg)
+{
+ pthread_attr_t attr;
+
+ /* Make sure the thread is created detached. */
+ pthread_attr_init (&attr);
+ pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
+
+ /* The helper thread needs only very little resources. */
+ (void) pthread_attr_setstacksize (&attr, PTHREAD_STACK_MIN);
+
+ /* Block all signals in the helper thread. To do this thoroughly we
+ temporarily have to block all signals here. */
+ sigset_t ss;
+ sigset_t oss;
+ sigfillset (&ss);
+ INTERNAL_SYSCALL_DECL (err);
+ INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_SETMASK, &ss, &oss, _NSIG / 8);
+
+ int ret = pthread_create (threadp, &attr, tf, arg);
+
+ /* Restore the signal mask. */
+ INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_SETMASK, &oss, NULL,
+ _NSIG / 8);
+
+ (void) pthread_attr_destroy (&attr);
+ return ret;
+}
+#endif
+
+#endif
+#endif /* aio_misc.h */
diff --git a/rtkaio/sysdeps/unix/sysv/linux/kaio_notify.c b/rtkaio/sysdeps/unix/sysv/linux/kaio_notify.c
new file mode 100644
index 0000000000..8b03909bc4
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/kaio_notify.c
@@ -0,0 +1,2 @@
+#include <kaio_misc.h>
+#include <aio_notify.c>
diff --git a/rtkaio/sysdeps/unix/sysv/linux/kaio_read.c b/rtkaio/sysdeps/unix/sysv/linux/kaio_read.c
new file mode 100644
index 0000000000..0c5db34879
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/kaio_read.c
@@ -0,0 +1,2 @@
+#include <kaio_misc.h>
+#include <aio_read.c>
diff --git a/rtkaio/sysdeps/unix/sysv/linux/kaio_read64.c b/rtkaio/sysdeps/unix/sysv/linux/kaio_read64.c
new file mode 100644
index 0000000000..62db3d45bd
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/kaio_read64.c
@@ -0,0 +1,2 @@
+#include <kaio_misc.h>
+#include <aio_read64.c>
diff --git a/rtkaio/sysdeps/unix/sysv/linux/kaio_return.c b/rtkaio/sysdeps/unix/sysv/linux/kaio_return.c
new file mode 100644
index 0000000000..1d9f6cbabb
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/kaio_return.c
@@ -0,0 +1,50 @@
+/* Return exit value of asynchronous I/O request.
+ Copyright (C) 1997, 2002, 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+
+/* We use an UGLY hack to prevent gcc from finding us cheating. The
+ implementation of aio_return and aio_return64 are identical and so
+ we want to avoid code duplication by using aliases. But gcc sees
+ the different parameter lists and prints a warning. We define here
+ a function so that aio_return64 has no prototype. */
+#define aio_return64 XXX
+#include <aio.h>
+/* And undo the hack. */
+#undef aio_return64
+
+#include <kaio_misc.h>
+
+#ifndef USE_KAIO
+#include <aio_return.c>
+#else
+
+#include <errno.h>
+
+ssize_t
+aio_return (aiocbp)
+ struct aiocb *aiocbp;
+{
+ if (aiocbp->__error_code == EINPROGRESS)
+ __aio_read_one_event ();
+ return aiocbp->__return_value;
+}
+
+weak_alias (aio_return, aio_return64)
+#endif
diff --git a/rtkaio/sysdeps/unix/sysv/linux/kaio_sigqueue.c b/rtkaio/sysdeps/unix/sysv/linux/kaio_sigqueue.c
new file mode 100644
index 0000000000..3a5c5efb0d
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/kaio_sigqueue.c
@@ -0,0 +1,2 @@
+#include <kaio_misc.h>
+#include <aio_sigqueue.c>
diff --git a/rtkaio/sysdeps/unix/sysv/linux/kaio_suspend.c b/rtkaio/sysdeps/unix/sysv/linux/kaio_suspend.c
new file mode 100644
index 0000000000..2400c5223c
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/kaio_suspend.c
@@ -0,0 +1,327 @@
+/* Suspend until termination of a requests.
+ Copyright (C) 1997, 1998, 1999, 2000, 2002, 2003, 2006
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+
+/* We use an UGLY hack to prevent gcc from finding us cheating. The
+ implementations of aio_suspend and aio_suspend64 are identical and so
+ we want to avoid code duplication by using aliases. But gcc sees
+ the different parameter lists and prints a warning. We define here
+ a function so that aio_suspend64 has no prototype. */
+#define aio_suspend64 XXX
+#include <aio.h>
+/* And undo the hack. */
+#undef aio_suspend64
+
+#include <kaio_misc.h>
+
+#ifndef USE_KAIO
+#include <aio_suspend.c>
+#else
+
+#include <assert.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <sys/time.h>
+
+#include <bits/libc-lock.h>
+#include <sysdep-cancel.h>
+
+
+struct clparam
+{
+ const struct aiocb *const *list;
+ struct waitlist *waitlist;
+ struct requestlist **requestlist;
+#ifndef DONT_NEED_AIO_MISC_COND
+ pthread_cond_t *cond;
+#endif
+ int nent;
+};
+
+
+static void
+cleanup (void *arg)
+{
+#ifdef DONT_NEED_AIO_MISC_COND
+ /* Acquire the mutex. If pthread_cond_*wait is used this would
+ happen implicitly. */
+ pthread_mutex_lock (&__aio_requests_mutex);
+#endif
+
+ const struct clparam *param = (const struct clparam *) arg;
+
+ /* Now remove the entry in the waiting list for all requests
+ which didn't terminate. */
+ int cnt = param->nent;
+ while (cnt-- > 0)
+ if (param->list[cnt] != NULL
+ && param->list[cnt]->__error_code == EINPROGRESS)
+ {
+ struct waitlist **listp;
+
+ assert (param->requestlist[cnt] != NULL);
+
+ /* There is the chance that we cannot find our entry anymore. This
+ could happen if the request terminated and restarted again. */
+ listp = &param->requestlist[cnt]->waiting;
+ while (*listp != NULL && *listp != &param->waitlist[cnt])
+ listp = &(*listp)->next;
+
+ if (*listp != NULL)
+ *listp = (*listp)->next;
+ }
+
+#ifndef DONT_NEED_AIO_MISC_COND
+ /* Release the conditional variable. */
+ (void) pthread_cond_destroy (param->cond);
+#endif
+
+ /* Release the mutex. */
+ pthread_mutex_unlock (&__aio_requests_mutex);
+}
+
+
+int
+aio_suspend (list, nent, timeout)
+ const struct aiocb *const list[];
+ int nent;
+ const struct timespec *timeout;
+{
+ if (__builtin_expect (nent < 0, 0))
+ {
+ __set_errno (EINVAL);
+ return -1;
+ }
+
+ struct waitlist waitlist[nent];
+ struct requestlist *requestlist[nent];
+#ifndef DONT_NEED_AIO_MISC_COND
+ pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+#endif
+ int cnt;
+ int result = 0;
+ int cntr = 1;
+ int total = 0, ktotal = 0;
+
+ /* Request the mutex. */
+ pthread_mutex_lock (&__aio_requests_mutex);
+
+ /* There is not yet a finished request. Signal the request that
+ we are working for it. */
+ for (cnt = 0; cnt < nent; ++cnt)
+ if (list[cnt] != NULL)
+ {
+ if (list[cnt]->__error_code == EINPROGRESS)
+ {
+ requestlist[cnt] = __aio_find_req ((aiocb_union *) list[cnt]);
+
+ if (requestlist[cnt] != NULL)
+ {
+#ifndef DONT_NEED_AIO_MISC_COND
+ waitlist[cnt].cond = &cond;
+#endif
+ waitlist[cnt].result = NULL;
+ waitlist[cnt].next = requestlist[cnt]->waiting;
+ waitlist[cnt].counterp = &cntr;
+ waitlist[cnt].sigevp = NULL;
+#ifdef BROKEN_THREAD_SIGNALS
+ waitlist[cnt].caller_pid = 0; /* Not needed. */
+#endif
+ requestlist[cnt]->waiting = &waitlist[cnt];
+ total++;
+ if (requestlist[cnt]->kioctx != KCTX_NONE)
+ ktotal++;
+ }
+ else
+ /* We will never suspend. */
+ break;
+ }
+ else
+ /* We will never suspend. */
+ break;
+ }
+
+
+ /* Only if none of the entries is NULL or finished to be wait. */
+ if (cnt == nent && total)
+ {
+ struct clparam clparam =
+ {
+ .list = list,
+ .waitlist = waitlist,
+ .requestlist = requestlist,
+#ifndef DONT_NEED_AIO_MISC_COND
+ .cond = &cond,
+#endif
+ .nent = nent
+ };
+
+ pthread_cleanup_push (cleanup, &clparam);
+
+ if (!__kernel_thread_started && ktotal)
+ {
+ /* If the kernel aio thread was not started yet all requests
+ are served by the kernel and there are no other threads running,
+ read events with mutex hold, so that nobody else can get them
+ instead of us here. */
+ if (SINGLE_THREAD_P && total == ktotal)
+ {
+ if (timeout == NULL)
+ {
+ while (cntr == 1)
+ __aio_wait_for_events (__aio_kioctx, NULL);
+ }
+ else
+ {
+ struct timeval now;
+ struct timespec abstime, ts;
+
+ __gettimeofday (&now, NULL);
+ abstime.tv_nsec = timeout->tv_nsec + now.tv_usec * 1000;
+ abstime.tv_sec = timeout->tv_sec + now.tv_sec;
+ if (abstime.tv_nsec >= 1000000000)
+ {
+ abstime.tv_nsec -= 1000000000;
+ abstime.tv_sec += 1;
+ }
+
+ for (;;)
+ {
+ result = __aio_wait_for_events (__aio_kioctx, timeout);
+ if (cntr < 1)
+ break;
+ if (result == ETIMEDOUT)
+ break;
+
+ __gettimeofday (&now, NULL);
+ if (now.tv_sec > abstime.tv_sec
+ || (now.tv_sec == abstime.tv_sec
+ && now.tv_usec * 1000 >= abstime.tv_nsec))
+ break;
+
+ ts.tv_nsec = abstime.tv_nsec - now.tv_usec * 1000;
+ ts.tv_sec = abstime.tv_sec - now.tv_sec;
+ if (abstime.tv_nsec < now.tv_usec * 1000)
+ {
+ ts.tv_nsec += 1000000000;
+ ts.tv_sec -= 1;
+ }
+ timeout = &ts;
+ }
+
+ if (cntr < 1)
+ result = 0;
+ else
+ result = ETIMEDOUT;
+ }
+ total = 0;
+ }
+ else if (__aio_create_kernel_thread () < 0)
+ {
+ total = 0;
+ __set_errno (ENOMEM);
+ result = -1;
+ }
+ }
+
+ if (total == 0)
+ /* Suspending was handled above. */
+ ;
+#ifdef DONT_NEED_AIO_MISC_COND
+ else
+ AIO_MISC_WAIT (result, cntr, timeout, 1);
+#else
+ else if (timeout == NULL)
+ result = pthread_cond_wait (&cond, &__aio_requests_mutex);
+ else
+ {
+ /* We have to convert the relative timeout value into an
+ absolute time value with pthread_cond_timedwait expects. */
+ struct timeval now;
+ struct timespec abstime;
+
+ __gettimeofday (&now, NULL);
+ abstime.tv_nsec = timeout->tv_nsec + now.tv_usec * 1000;
+ abstime.tv_sec = timeout->tv_sec + now.tv_sec;
+ if (abstime.tv_nsec >= 1000000000)
+ {
+ abstime.tv_nsec -= 1000000000;
+ abstime.tv_sec += 1;
+ }
+
+ result = pthread_cond_timedwait (&cond, &__aio_requests_mutex,
+ &abstime);
+ }
+#endif
+
+ pthread_cleanup_pop (0);
+ }
+
+ /* Now remove the entry in the waiting list for all requests
+ which didn't terminate. */
+ while (cnt-- > 0)
+ if (list[cnt] != NULL && list[cnt]->__error_code == EINPROGRESS)
+ {
+ struct waitlist **listp;
+
+ assert (requestlist[cnt] != NULL);
+
+ /* There is the chance that we cannot find our entry anymore. This
+ could happen if the request terminated and restarted again. */
+ listp = &requestlist[cnt]->waiting;
+ while (*listp != NULL && *listp != &waitlist[cnt])
+ listp = &(*listp)->next;
+
+ if (*listp != NULL)
+ *listp = (*listp)->next;
+ }
+
+#ifndef DONT_NEED_AIO_MISC_COND
+ /* Release the conditional variable. */
+ if (__builtin_expect (pthread_cond_destroy (&cond) != 0, 0))
+ /* This must never happen. */
+ abort ();
+#endif
+
+ if (result != 0)
+ {
+#ifndef DONT_NEED_AIO_MISC_COND
+ /* An error occurred. Possibly it's ETIMEDOUT. We have to translate
+ the timeout error report of `pthread_cond_timedwait' to the
+ form expected from `aio_suspend'. */
+ if (result == ETIMEDOUT)
+ __set_errno (EAGAIN);
+ else
+#endif
+ __set_errno (result);
+
+ result = -1;
+ }
+
+ /* Release the mutex. */
+ pthread_mutex_unlock (&__aio_requests_mutex);
+
+ return result;
+}
+
+weak_alias (aio_suspend, aio_suspend64)
+#endif
diff --git a/rtkaio/sysdeps/unix/sysv/linux/kaio_write.c b/rtkaio/sysdeps/unix/sysv/linux/kaio_write.c
new file mode 100644
index 0000000000..96863b2cdd
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/kaio_write.c
@@ -0,0 +1,2 @@
+#include <kaio_misc.h>
+#include <aio_write.c>
diff --git a/rtkaio/sysdeps/unix/sysv/linux/kaio_write64.c b/rtkaio/sysdeps/unix/sysv/linux/kaio_write64.c
new file mode 100644
index 0000000000..1fa7121dfd
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/kaio_write64.c
@@ -0,0 +1,2 @@
+#include <kaio_misc.h>
+#include <aio_write64.c>
diff --git a/rtkaio/sysdeps/unix/sysv/linux/klio_listio.c b/rtkaio/sysdeps/unix/sysv/linux/klio_listio.c
new file mode 100644
index 0000000000..b7676c9f03
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/klio_listio.c
@@ -0,0 +1,322 @@
+/* Enqueue and list of read or write requests.
+ Copyright (C) 1997,1998,1999,2000,2001,2002,2003,2005,2006
+ Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <kaio_misc.h>
+
+#ifndef USE_KAIO
+#include <lio_listio.c>
+#else
+
+#ifndef lio_listio
+#include <aio.h>
+#include <assert.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#define LIO_OPCODE_BASE 0
+#endif
+
+#include <shlib-compat.h>
+
+
+/* We need this special structure to handle asynchronous I/O. */
+struct async_waitlist
+ {
+ int counter;
+ struct sigevent sigev;
+ struct waitlist list[0];
+ };
+
+
+/* The code in glibc 2.1 to glibc 2.4 issued only one event when all
+ requests submitted with lio_listio finished. The existing practice
+ is to issue events for the individual requests as well. This is
+ what the new code does. */
+#if SHLIB_COMPAT (librt, GLIBC_2_1, GLIBC_2_4)
+# define LIO_MODE(mode) ((mode) & 127)
+# define NO_INDIVIDUAL_EVENT_P(mode) ((mode) & 128)
+#else
+# define LIO_MODE(mode) mode
+# define NO_INDIVIDUAL_EVENT_P(mode) 0
+#endif
+
+
+static int
+lio_listio_internal (int mode, struct aiocb *const list[], int nent,
+ struct sigevent *sig)
+{
+ struct sigevent defsigev;
+ struct requestlist *requests[nent];
+ int cnt;
+ volatile int total = 0;
+ int result = 0, op = 0;
+ kctx_t kctx = KCTX_NONE;
+
+ if (sig == NULL)
+ {
+ defsigev.sigev_notify = SIGEV_NONE;
+ sig = &defsigev;
+ }
+
+ /* Request the mutex. */
+ pthread_mutex_lock (&__aio_requests_mutex);
+
+ if (LIO_MODE (mode) == LIO_WAIT && ! __have_no_kernel_aio && nent > 0)
+ {
+ int res;
+ INTERNAL_SYSCALL_DECL (err);
+
+ kctx = 0;
+ do
+ res = INTERNAL_SYSCALL (io_setup, err, 2, nent, &kctx);
+ while (INTERNAL_SYSCALL_ERROR_P (res, err)
+ && INTERNAL_SYSCALL_ERRNO (res, err) == EINTR);
+ if (INTERNAL_SYSCALL_ERROR_P (res, err))
+ {
+ kctx = KCTX_NONE;
+ if (INTERNAL_SYSCALL_ERRNO (res, err) == ENOSYS)
+ __have_no_kernel_aio = 1;
+ }
+ }
+ else if (LIO_MODE (mode) == LIO_NOWAIT)
+ {
+ op = LIO_KTHREAD;
+ if (sig->sigev_notify != SIGEV_NONE)
+ op = LIO_KTHREAD | LIO_KTHREAD_REQUIRED;
+ }
+ op |= LIO_OPCODE_BASE;
+
+ /* Now we can enqueue all requests. Since we already acquired the
+ mutex the enqueue function need not do this. */
+ for (cnt = 0; cnt < nent; ++cnt)
+ if (list[cnt] != NULL && list[cnt]->aio_lio_opcode != LIO_NOP)
+ {
+ if (NO_INDIVIDUAL_EVENT_P (mode))
+ list[cnt]->aio_sigevent.sigev_notify = SIGEV_NONE;
+ requests[cnt]
+ = __aio_enqueue_request_ctx ((aiocb_union *) list[cnt],
+ list[cnt]->aio_lio_opcode | op,
+ kctx);
+
+ if (requests[cnt] != NULL)
+ /* Successfully enqueued. */
+ ++total;
+ else
+ /* Signal that we've seen an error. `errno' and the error code
+ of the aiocb will tell more. */
+ result = -1;
+ }
+ else
+ requests[cnt] = NULL;
+
+ if (total == 0)
+ {
+ /* We don't have anything to do except signalling if we work
+ asynchronously. */
+
+ if (kctx != KCTX_NONE)
+ {
+ INTERNAL_SYSCALL_DECL (err);
+ INTERNAL_SYSCALL (io_destroy, err, 1, kctx);
+ }
+
+ /* Release the mutex. We do this before raising a signal since the
+ signal handler might do a `siglongjmp' and then the mutex is
+ locked forever. */
+ pthread_mutex_unlock (&__aio_requests_mutex);
+
+ if (LIO_MODE (mode) == LIO_NOWAIT)
+ {
+#ifdef BROKEN_THREAD_SIGNALS
+ __aio_notify_only (sig,
+ sig->sigev_notify == SIGEV_SIGNAL ? getpid () : 0);
+#else
+ __aio_notify_only (sig);
+#endif
+ }
+
+ return result;
+ }
+ else if (LIO_MODE (mode) == LIO_WAIT)
+ {
+#ifndef DONT_NEED_AIO_MISC_COND
+ pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+ int oldstate;
+#endif
+ struct waitlist waitlist[nent];
+ volatile int ktotal = 0;
+
+ total = 0;
+ for (cnt = 0; cnt < nent; ++cnt)
+ {
+ assert (requests[cnt] == NULL || list[cnt] != NULL);
+
+ if (requests[cnt] != NULL && list[cnt]->aio_lio_opcode != LIO_NOP)
+ {
+ if (requests[cnt]->kioctx != KCTX_NONE)
+ {
+ assert (requests[cnt]->kioctx == kctx);
+ waitlist[cnt].counterp = &ktotal;
+ ++ktotal;
+ }
+ else
+ {
+ waitlist[cnt].counterp = &total;
+ ++total;
+ }
+#ifndef DONT_NEED_AIO_MISC_COND
+ waitlist[cnt].cond = &cond;
+#endif
+ waitlist[cnt].result = &result;
+ waitlist[cnt].next = requests[cnt]->waiting;
+ waitlist[cnt].sigevp = NULL;
+#ifdef BROKEN_THREAD_SIGNALS
+ waitlist[cnt].caller_pid = 0; /* Not needed. */
+#endif
+ requests[cnt]->waiting = &waitlist[cnt];
+ }
+ }
+
+ while (ktotal > 0)
+ __aio_wait_for_events (kctx, NULL);
+#ifdef DONT_NEED_AIO_MISC_COND
+ AIO_MISC_WAIT (result, total, NULL, 0);
+#else
+ /* Since `pthread_cond_wait'/`pthread_cond_timedwait' are cancellation
+ points we must be careful. We added entries to the waiting lists
+ which we must remove. So defer cancellation for now. */
+ pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, &oldstate);
+
+ while (total > 0)
+ pthread_cond_wait (&cond, &__aio_requests_mutex);
+
+ /* Now it's time to restore the cancellation state. */
+ pthread_setcancelstate (oldstate, NULL);
+
+ /* Release the conditional variable. */
+ if (pthread_cond_destroy (&cond) != 0)
+ /* This must never happen. */
+ abort ();
+#endif
+
+ if (kctx != KCTX_NONE)
+ {
+ INTERNAL_SYSCALL_DECL (err);
+ INTERNAL_SYSCALL (io_destroy, err, 1, kctx);
+ }
+
+ /* If any of the I/O requests failed, return -1 and set errno. */
+ if (result != 0)
+ {
+ __set_errno (result == EINTR ? EINTR : EIO);
+ result = -1;
+ }
+ }
+ else if (sig->sigev_notify != SIGEV_NONE)
+ {
+ struct async_waitlist *waitlist;
+
+ waitlist = (struct async_waitlist *)
+ malloc (sizeof (struct async_waitlist)
+ + (nent * sizeof (struct waitlist)));
+
+ if (waitlist == NULL)
+ {
+ __set_errno (EAGAIN);
+ result = -1;
+ }
+ else
+ {
+#ifdef BROKEN_THREAD_SIGNALS
+ pid_t caller_pid = sig->sigev_notify == SIGEV_SIGNAL ? getpid () : 0;
+#endif
+ total = 0;
+
+ for (cnt = 0; cnt < nent; ++cnt)
+ {
+ assert (requests[cnt] == NULL || list[cnt] != NULL);
+
+ if (requests[cnt] != NULL
+ && list[cnt]->aio_lio_opcode != LIO_NOP)
+ {
+#ifndef DONT_NEED_AIO_MISC_COND
+ waitlist->list[cnt].cond = NULL;
+#endif
+ waitlist->list[cnt].result = NULL;
+ waitlist->list[cnt].next = requests[cnt]->waiting;
+ waitlist->list[cnt].counterp = &waitlist->counter;
+ waitlist->list[cnt].sigevp = &waitlist->sigev;
+#ifdef BROKEN_THREAD_SIGNALS
+ waitlist->list[cnt].caller_pid = caller_pid;
+#endif
+ requests[cnt]->waiting = &waitlist->list[cnt];
+ ++total;
+ }
+ }
+
+ waitlist->counter = total;
+ waitlist->sigev = *sig;
+ }
+ }
+
+ /* Release the mutex. */
+ pthread_mutex_unlock (&__aio_requests_mutex);
+
+ return result;
+}
+
+
+#if SHLIB_COMPAT (librt, GLIBC_2_1, GLIBC_2_4)
+int
+attribute_compat_text_section
+__lio_listio_21 (int mode, struct aiocb *const list[], int nent,
+ struct sigevent *sig)
+{
+ /* Check arguments. */
+ if (mode != LIO_WAIT && mode != LIO_NOWAIT)
+ {
+ __set_errno (EINVAL);
+ return -1;
+ }
+
+ return lio_listio_internal (mode | LIO_NO_INDIVIDUAL_EVENT, list, nent, sig);
+}
+compat_symbol (librt, __lio_listio_21, lio_listio, GLIBC_2_1);
+#endif
+
+
+int
+__lio_listio_item_notify (int mode, struct aiocb *const list[], int nent,
+ struct sigevent *sig)
+{
+ /* Check arguments. */
+ if (mode != LIO_WAIT && mode != LIO_NOWAIT)
+ {
+ __set_errno (EINVAL);
+ return -1;
+ }
+
+ return lio_listio_internal (mode, list, nent, sig);
+}
+versioned_symbol (librt, __lio_listio_item_notify, lio_listio, GLIBC_2_4);
+
+#endif
diff --git a/rtkaio/sysdeps/unix/sysv/linux/klio_listio64.c b/rtkaio/sysdeps/unix/sysv/linux/klio_listio64.c
new file mode 100644
index 0000000000..937cf1a9f0
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/klio_listio64.c
@@ -0,0 +1,40 @@
+/* Enqueue and list of read or write requests, 64bit offset version.
+ Copyright (C) 1997, 1998, 1999, 2003, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#include <kaio_misc.h>
+
+#ifndef USE_KAIO
+#include <lio_listio64.c>
+#else
+
+#include <aio.h>
+#include <assert.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#define lio_listio lio_listio64
+#define __lio_listio_21 __lio_listio64_21
+#define __lio_listio_item_notify __lio_listio64_item_notify
+#define aiocb aiocb64
+#define LIO_OPCODE_BASE 128
+#include <klio_listio.c>
+
+#endif
diff --git a/rtkaio/sysdeps/unix/sysv/linux/powerpc/Makefile b/rtkaio/sysdeps/unix/sysv/linux/powerpc/Makefile
new file mode 100644
index 0000000000..ead21fb111
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/powerpc/Makefile
@@ -0,0 +1,3 @@
+ifeq ($(subdir),rtkaio)
+librtkaio-routines += rtkaio-sysdep
+endif
diff --git a/rtkaio/sysdeps/unix/sysv/linux/powerpc/powerpc64/Versions b/rtkaio/sysdeps/unix/sysv/linux/powerpc/powerpc64/Versions
new file mode 100644
index 0000000000..7443c81d6a
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/powerpc/powerpc64/Versions
@@ -0,0 +1,9 @@
+%ifdef HAVE_FORCED_UNWIND
+librtkaio {
+ GLIBC_2.3.3 {
+ # Changed timer_t.
+ timer_create; timer_delete; timer_getoverrun; timer_gettime;
+ timer_settime;
+ }
+}
+%endif
diff --git a/rtkaio/sysdeps/unix/sysv/linux/powerpc/rtkaio-sysdep.c b/rtkaio/sysdeps/unix/sysv/linux/powerpc/rtkaio-sysdep.c
new file mode 100644
index 0000000000..ff0440aa83
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/powerpc/rtkaio-sysdep.c
@@ -0,0 +1 @@
+#include <rt-sysdep.c>
diff --git a/rtkaio/sysdeps/unix/sysv/linux/s390/Makefile b/rtkaio/sysdeps/unix/sysv/linux/s390/Makefile
new file mode 100644
index 0000000000..ead21fb111
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/s390/Makefile
@@ -0,0 +1,3 @@
+ifeq ($(subdir),rtkaio)
+librtkaio-routines += rtkaio-sysdep
+endif
diff --git a/rtkaio/sysdeps/unix/sysv/linux/s390/rtkaio-sysdep.S b/rtkaio/sysdeps/unix/sysv/linux/s390/rtkaio-sysdep.S
new file mode 100644
index 0000000000..11ee214b23
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/s390/rtkaio-sysdep.S
@@ -0,0 +1 @@
+#include <rt-sysdep.S>
diff --git a/rtkaio/sysdeps/unix/sysv/linux/s390/s390-64/Versions b/rtkaio/sysdeps/unix/sysv/linux/s390/s390-64/Versions
new file mode 100644
index 0000000000..7443c81d6a
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/s390/s390-64/Versions
@@ -0,0 +1,9 @@
+%ifdef HAVE_FORCED_UNWIND
+librtkaio {
+ GLIBC_2.3.3 {
+ # Changed timer_t.
+ timer_create; timer_delete; timer_getoverrun; timer_gettime;
+ timer_settime;
+ }
+}
+%endif
diff --git a/rtkaio/sysdeps/unix/sysv/linux/sparc/Makefile b/rtkaio/sysdeps/unix/sysv/linux/sparc/Makefile
new file mode 100644
index 0000000000..ead21fb111
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/sparc/Makefile
@@ -0,0 +1,3 @@
+ifeq ($(subdir),rtkaio)
+librtkaio-routines += rtkaio-sysdep
+endif
diff --git a/rtkaio/sysdeps/unix/sysv/linux/sparc/Versions b/rtkaio/sysdeps/unix/sysv/linux/sparc/Versions
new file mode 100644
index 0000000000..b2d59a9140
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/sparc/Versions
@@ -0,0 +1,6 @@
+librtkaio {
+ GLIBC_2.3 {
+ # AIO functions.
+ aio_cancel; aio_cancel64;
+ }
+}
diff --git a/rtkaio/sysdeps/unix/sysv/linux/sparc/kaio_cancel.c b/rtkaio/sysdeps/unix/sysv/linux/sparc/kaio_cancel.c
new file mode 100644
index 0000000000..6e345e1643
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/sparc/kaio_cancel.c
@@ -0,0 +1,33 @@
+#include <shlib-compat.h>
+
+#define aio_cancel64 XXX
+#include <aio.h>
+#undef aio_cancel64
+#include <errno.h>
+
+extern __typeof (aio_cancel) __new_aio_cancel;
+extern __typeof (aio_cancel) __old_aio_cancel;
+
+#define aio_cancel __new_aio_cancel
+
+#include <sysdeps/unix/sysv/linux/kaio_cancel.c>
+
+#undef aio_cancel
+strong_alias (__new_aio_cancel, __new_aio_cancel64);
+versioned_symbol (librt, __new_aio_cancel, aio_cancel, GLIBC_2_3);
+versioned_symbol (librt, __new_aio_cancel64, aio_cancel64, GLIBC_2_3);
+
+#if SHLIB_COMPAT (librt, GLIBC_2_1, GLIBC_2_3)
+
+#undef ECANCELED
+#define aio_cancel __old_aio_cancel
+#define ECANCELED 125
+
+#include <sysdeps/unix/sysv/linux/kaio_cancel.c>
+
+#undef aio_cancel
+strong_alias (__old_aio_cancel, __old_aio_cancel64);
+compat_symbol (librt, __old_aio_cancel, aio_cancel, GLIBC_2_1);
+compat_symbol (librt, __old_aio_cancel64, aio_cancel64, GLIBC_2_1);
+
+#endif
diff --git a/rtkaio/sysdeps/unix/sysv/linux/sparc/rtkaio-sysdep.c b/rtkaio/sysdeps/unix/sysv/linux/sparc/rtkaio-sysdep.c
new file mode 100644
index 0000000000..ff0440aa83
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/sparc/rtkaio-sysdep.c
@@ -0,0 +1 @@
+#include <rt-sysdep.c>
diff --git a/rtkaio/sysdeps/unix/sysv/linux/sparc/sparc64/Versions b/rtkaio/sysdeps/unix/sysv/linux/sparc/sparc64/Versions
new file mode 100644
index 0000000000..7443c81d6a
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/sparc/sparc64/Versions
@@ -0,0 +1,9 @@
+%ifdef HAVE_FORCED_UNWIND
+librtkaio {
+ GLIBC_2.3.3 {
+ # Changed timer_t.
+ timer_create; timer_delete; timer_getoverrun; timer_gettime;
+ timer_settime;
+ }
+}
+%endif
diff --git a/rtkaio/sysdeps/unix/sysv/linux/syscalls.list b/rtkaio/sysdeps/unix/sysv/linux/syscalls.list
new file mode 100644
index 0000000000..686b8d5d2e
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/syscalls.list
@@ -0,0 +1,5 @@
+# File name Caller Syscall name Args Strong name Weak names
+
+kaio_mq_timedsend - mq_timedsend Ci:ipiip __GI_mq_timedsend mq_timedsend
+kaio_mq_timedreceive - mq_timedreceive Ci:ipipp __GI_mq_timedreceive mq_timedreceive
+kaio_mq_setattr - mq_getsetattr i:ipp __GI_mq_setattr mq_setattr
diff --git a/rtkaio/sysdeps/unix/sysv/linux/x86_64/Versions b/rtkaio/sysdeps/unix/sysv/linux/x86_64/Versions
new file mode 100644
index 0000000000..7443c81d6a
--- /dev/null
+++ b/rtkaio/sysdeps/unix/sysv/linux/x86_64/Versions
@@ -0,0 +1,9 @@
+%ifdef HAVE_FORCED_UNWIND
+librtkaio {
+ GLIBC_2.3.3 {
+ # Changed timer_t.
+ timer_create; timer_delete; timer_getoverrun; timer_gettime;
+ timer_settime;
+ }
+}
+%endif