summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--configure.in26
-rw-r--r--include/apr_global_mutex.h13
-rw-r--r--include/apr_proc_mutex.h15
-rw-r--r--include/apr_thread_mutex.h13
-rw-r--r--include/apr_time.h4
-rw-r--r--include/arch/netware/apr_arch_thread_mutex.h3
-rw-r--r--include/arch/unix/apr_arch_proc_mutex.h2
-rw-r--r--include/arch/unix/apr_arch_thread_mutex.h3
-rw-r--r--locks/beos/proc_mutex.c81
-rw-r--r--locks/beos/thread_cond.c2
-rw-r--r--locks/beos/thread_mutex.c95
-rw-r--r--locks/netware/proc_mutex.c26
-rw-r--r--locks/netware/thread_mutex.c104
-rw-r--r--locks/os2/proc_mutex.c33
-rw-r--r--locks/os2/thread_mutex.c26
-rw-r--r--locks/unix/global_mutex.c32
-rw-r--r--locks/unix/proc_mutex.c135
-rw-r--r--locks/unix/thread_mutex.c194
-rw-r--r--locks/win32/proc_mutex.c27
-rw-r--r--locks/win32/thread_mutex.c38
-rw-r--r--test/abts_tests.h2
-rw-r--r--test/testglobalmutex.c3
-rw-r--r--test/testlock.c73
-rw-r--r--test/testlockperf.c64
-rw-r--r--test/testmutexscope.c37
-rw-r--r--test/testprocmutex.c119
26 files changed, 1063 insertions, 107 deletions
diff --git a/configure.in b/configure.in
index d6831aa04..fefb88970 100644
--- a/configure.in
+++ b/configure.in
@@ -2057,10 +2057,17 @@ dnl ----------------------------- Checking for Locking Characteristics
AC_MSG_NOTICE([])
AC_MSG_NOTICE([Checking for Locking...])
-AC_CHECK_FUNCS(semget semctl flock)
-AC_CHECK_HEADERS(semaphore.h OS.h)
+AC_CHECK_FUNCS(semget semctl semop semtimedop flock)
+APR_IFALLYES(func:semtimedop, have_semtimedop="1", have_semtimedop="0")
+
+AC_CHECK_HEADERS(semaphore.h)
AC_SEARCH_LIBS(sem_open, rt)
-AC_CHECK_FUNCS(sem_close sem_unlink sem_post sem_wait create_sem)
+AC_CHECK_FUNCS(sem_close sem_unlink sem_post sem_wait sem_timedwait)
+APR_IFALLYES(func:sem_timedwait, have_sem_timedwait="1", have_sem_timedwait="0")
+
+AC_CHECK_HEADERS(OS.h)
+AC_CHECK_FUNCS(create_sem acquire_sem acquire_sem_etc)
+APR_IFALLYES(header:OS.h func:acquire_sem_etc, have_acquire_sem_etc="1", have_acquire_sem_etc="0")
# Some systems return ENOSYS from sem_open.
AC_CACHE_CHECK(for working sem_open,ac_cv_func_sem_open,[
@@ -2119,7 +2126,10 @@ APR_CHECK_DEFINE_FILES(POLLIN, poll.h sys/poll.h)
if test "$threads" = "1"; then
APR_CHECK_DEFINE(PTHREAD_PROCESS_SHARED, pthread.h)
- AC_CHECK_FUNCS(pthread_mutexattr_setpshared)
+ AC_CHECK_FUNCS(pthread_mutex_timedlock pthread_mutexattr_setpshared)
+ APR_IFALLYES(header:pthread.h func:pthread_mutex_timedlock,
+ have_pthread_mutex_timedlock="1", have_pthread_mutex_timedlock="0")
+ AC_SUBST(have_pthread_mutex_timedlock)
# Some systems have setpshared and define PROCESS_SHARED, but don't
# really support PROCESS_SHARED locks. So, we must validate that we
# can go through the steps without receiving some sort of system error.
@@ -2157,8 +2167,8 @@ fi
APR_IFALLYES(header:semaphore.h func:sem_open func:sem_close dnl
func:sem_unlink func:sem_post func:sem_wait,
hasposixser="1", hasposixser="0")
-APR_IFALLYES(func:semget func:semctl define:SEM_UNDO, hassysvser="1",
- hassysvser="0")
+APR_IFALLYES(func:semget func:semctl func:semop define:SEM_UNDO,
+ hassysvser="1", hassysvser="0")
APR_IFALLYES(func:flock define:LOCK_EX, hasflockser="1", hasflockser="0")
APR_IFALLYES(header:fcntl.h define:F_SETLK, hasfcntlser="1", hasfcntlser="0")
# note: the current APR use of shared mutex requires /dev/zero
@@ -2183,9 +2193,9 @@ APR_IFALLYES(func:flock define:LOCK_EX,
APR_DECIDE(USE_FLOCK_SERIALIZE, [4.2BSD-style flock()]))
APR_IFALLYES(header:fcntl.h define:F_SETLK,
APR_DECIDE(USE_FCNTL_SERIALIZE, [SVR4-style fcntl()]))
-APR_IFALLYES(func:semget func:semctl define:SEM_UNDO,
+APR_IFALLYES(func:semget func:semctl func:semop define:SEM_UNDO,
APR_DECIDE(USE_SYSVSEM_SERIALIZE, [SysV IPC semget()]))
-APR_IFALLYES(header:OS.h func:create_sem,
+APR_IFALLYES(header:OS.h func:create_sem func:acquire_sem func:acquire_sem_etc,
APR_DECIDE(USE_BEOSSEM, [BeOS Semaphores]))
if test "x$apr_lock_method" != "x"; then
APR_DECISION_FORCE($apr_lock_method)
diff --git a/include/apr_global_mutex.h b/include/apr_global_mutex.h
index 3cd384704..e86048cac 100644
--- a/include/apr_global_mutex.h
+++ b/include/apr_global_mutex.h
@@ -29,6 +29,7 @@
#if APR_PROC_MUTEX_IS_GLOBAL
#include "apr_proc_mutex.h"
#endif
+#include "apr_time.h"
#ifdef __cplusplus
extern "C" {
@@ -66,6 +67,7 @@ typedef struct apr_global_mutex_t apr_global_mutex_t;
* APR_LOCK_POSIXSEM
* APR_LOCK_PROC_PTHREAD
* APR_LOCK_DEFAULT pick the default mechanism for the platform
+ * APR_LOCK_DEFAULT_TIMED pick the default timed mechanism
* </PRE>
* @param pool the pool from which to allocate the mutex.
* @warning Check APR_HAS_foo_SERIALIZE defines to see if the platform supports
@@ -109,6 +111,17 @@ APR_DECLARE(apr_status_t) apr_global_mutex_lock(apr_global_mutex_t *mutex);
APR_DECLARE(apr_status_t) apr_global_mutex_trylock(apr_global_mutex_t *mutex);
/**
+ * Attempt to acquire the lock for the given mutex until timeout expires.
+ * If the acquisition time outs, the call returns with APR_TIMEUP.
+ * @param mutex the mutex on which to attempt the lock acquiring.
+ * @param timeout the absolute (non 0) or relative (0) timeout
+ * @param absolute whether the timeout given is absolute or relative
+ */
+APR_DECLARE(apr_status_t) apr_global_mutex_timedlock(apr_global_mutex_t *mutex,
+ apr_time_t timeout,
+ int absolute);
+
+/**
* Release the lock for the given mutex.
* @param mutex the mutex from which to release the lock.
*/
diff --git a/include/apr_proc_mutex.h b/include/apr_proc_mutex.h
index 1c6d19d6b..09fde2ff2 100644
--- a/include/apr_proc_mutex.h
+++ b/include/apr_proc_mutex.h
@@ -26,6 +26,7 @@
#include "apr_pools.h"
#include "apr_errno.h"
#include "apr_perms_set.h"
+#include "apr_time.h"
#ifdef __cplusplus
extern "C" {
@@ -48,7 +49,8 @@ typedef enum {
APR_LOCK_SYSVSEM, /**< System V Semaphores */
APR_LOCK_PROC_PTHREAD, /**< POSIX pthread process-based locking */
APR_LOCK_POSIXSEM, /**< POSIX semaphore process-based locking */
- APR_LOCK_DEFAULT /**< Use the default process lock */
+ APR_LOCK_DEFAULT, /**< Use the default process lock */
+ APR_LOCK_DEFAULT_TIMED /**< Use the default process timed lock */
} apr_lockmech_e;
/** Opaque structure representing a process mutex. */
@@ -114,6 +116,17 @@ APR_DECLARE(apr_status_t) apr_proc_mutex_lock(apr_proc_mutex_t *mutex);
APR_DECLARE(apr_status_t) apr_proc_mutex_trylock(apr_proc_mutex_t *mutex);
/**
+ * Attempt to acquire the lock for the given mutex until timeout expires.
+ * If the acquisition time outs, the call returns with APR_TIMEUP.
+ * @param mutex the mutex on which to attempt the lock acquiring.
+ * @param timeout the absolute (non 0) or relative (0) timeout
+ * @param absolute whether the timeout given is absolute or relative
+ */
+APR_DECLARE(apr_status_t) apr_proc_mutex_timedlock(apr_proc_mutex_t *mutex,
+ apr_time_t timeout,
+ int absolute);
+
+/**
* Release the lock for the given mutex.
* @param mutex the mutex from which to release the lock.
*/
diff --git a/include/apr_thread_mutex.h b/include/apr_thread_mutex.h
index 193a70a38..f95b93718 100644
--- a/include/apr_thread_mutex.h
+++ b/include/apr_thread_mutex.h
@@ -24,6 +24,7 @@
#include "apr.h"
#include "apr_errno.h"
+#include "apr_time.h"
#ifdef __cplusplus
extern "C" {
@@ -43,6 +44,7 @@ typedef struct apr_thread_mutex_t apr_thread_mutex_t;
#define APR_THREAD_MUTEX_DEFAULT 0x0 /**< platform-optimal lock behavior */
#define APR_THREAD_MUTEX_NESTED 0x1 /**< enable nested (recursive) locks */
#define APR_THREAD_MUTEX_UNNESTED 0x2 /**< disable nested locks */
+#define APR_THREAD_MUTEX_TIMED 0x4 /**< enable timed locks */
/* Delayed the include to avoid a circular reference */
#include "apr_pools.h"
@@ -82,6 +84,17 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_lock(apr_thread_mutex_t *mutex);
APR_DECLARE(apr_status_t) apr_thread_mutex_trylock(apr_thread_mutex_t *mutex);
/**
+ * Attempt to acquire the lock for the given mutex until timeout expires.
+ * If the acquisition time outs, the call returns with APR_TIMEUP.
+ * @param mutex the mutex on which to attempt the lock acquiring.
+ * @param timeout the absolute (non 0) or relative (0) timeout
+ * @param absolute whether the timeout given is absolute or relative
+ */
+APR_DECLARE(apr_status_t) apr_thread_mutex_timedlock(apr_thread_mutex_t *mutex,
+ apr_time_t timeout,
+ int absolute);
+
+/**
* Release the lock for the given mutex.
* @param mutex the mutex from which to release the lock.
*/
diff --git a/include/apr_time.h b/include/apr_time.h
index 15e0b9611..b0efd791c 100644
--- a/include/apr_time.h
+++ b/include/apr_time.h
@@ -23,7 +23,6 @@
*/
#include "apr.h"
-#include "apr_pools.h"
#include "apr_errno.h"
#ifdef __cplusplus
@@ -120,6 +119,9 @@ struct apr_time_exp_t {
apr_int32_t tm_gmtoff;
};
+/* Delayed the include to avoid a circular reference */
+#include "apr_pools.h"
+
/**
* Convert an ansi time_t to an apr_time_t
* @param result the resulting apr_time_t
diff --git a/include/arch/netware/apr_arch_thread_mutex.h b/include/arch/netware/apr_arch_thread_mutex.h
index 0453799c2..18702fc7d 100644
--- a/include/arch/netware/apr_arch_thread_mutex.h
+++ b/include/arch/netware/apr_arch_thread_mutex.h
@@ -18,11 +18,14 @@
#define THREAD_MUTEX_H
#include "apr_thread_mutex.h"
+#include "apr_thread_cond.h"
#include <nks/synch.h>
struct apr_thread_mutex_t {
apr_pool_t *pool;
NXMutex_t *mutex;
+ apr_thread_cond_t *cond;
+ int locked, num_waiters;
};
#endif /* THREAD_MUTEX_H */
diff --git a/include/arch/unix/apr_arch_proc_mutex.h b/include/arch/unix/apr_arch_proc_mutex.h
index c582eeb21..37ecd0b5a 100644
--- a/include/arch/unix/apr_arch_proc_mutex.h
+++ b/include/arch/unix/apr_arch_proc_mutex.h
@@ -26,6 +26,7 @@
#include "apr_portable.h"
#include "apr_file_io.h"
#include "apr_arch_file_io.h"
+#include "apr_time.h"
/* System headers required by Locks library */
#if APR_HAVE_SYS_TYPES_H
@@ -72,6 +73,7 @@ struct apr_proc_mutex_unix_lock_methods_t {
apr_status_t (*create)(apr_proc_mutex_t *, const char *);
apr_status_t (*acquire)(apr_proc_mutex_t *);
apr_status_t (*tryacquire)(apr_proc_mutex_t *);
+ apr_status_t (*timedacquire)(apr_proc_mutex_t *, apr_time_t, int);
apr_status_t (*release)(apr_proc_mutex_t *);
apr_status_t (*cleanup)(void *);
apr_status_t (*child_init)(apr_proc_mutex_t **, apr_pool_t *, const char *);
diff --git a/include/arch/unix/apr_arch_thread_mutex.h b/include/arch/unix/apr_arch_thread_mutex.h
index 40cdef3c6..4fe46c3b4 100644
--- a/include/arch/unix/apr_arch_thread_mutex.h
+++ b/include/arch/unix/apr_arch_thread_mutex.h
@@ -21,6 +21,7 @@
#include "apr_private.h"
#include "apr_general.h"
#include "apr_thread_mutex.h"
+#include "apr_thread_cond.h"
#include "apr_portable.h"
#include "apr_atomic.h"
@@ -32,6 +33,8 @@
struct apr_thread_mutex_t {
apr_pool_t *pool;
pthread_mutex_t mutex;
+ apr_thread_cond_t *cond;
+ int locked, num_waiters;
};
#endif
diff --git a/locks/beos/proc_mutex.c b/locks/beos/proc_mutex.c
index a02668add..0283040bb 100644
--- a/locks/beos/proc_mutex.c
+++ b/locks/beos/proc_mutex.c
@@ -27,13 +27,13 @@ static apr_status_t _proc_mutex_cleanup(void * data)
apr_proc_mutex_t *lock = (apr_proc_mutex_t*)data;
if (lock->LockCount != 0) {
/* we're still locked... */
- while (atomic_add(&lock->LockCount , -1) > 1){
- /* OK we had more than one person waiting on the lock so
- * the sem is also locked. Release it until we have no more
- * locks left.
- */
+ while (atomic_add(&lock->LockCount , -1) > 1){
+ /* OK we had more than one person waiting on the lock so
+ * the sem is also locked. Release it until we have no more
+ * locks left.
+ */
release_sem (lock->Lock);
- }
+ }
}
delete_sem(lock->Lock);
return APR_SUCCESS;
@@ -47,7 +47,7 @@ APR_DECLARE(apr_status_t) apr_proc_mutex_create(apr_proc_mutex_t **mutex,
apr_proc_mutex_t *new;
apr_status_t stat = APR_SUCCESS;
- if (mech != APR_LOCK_DEFAULT) {
+ if (mech != APR_LOCK_DEFAULT && mech != APR_LOCK_DEFAULT_TIMED) {
return APR_ENOTIMPL;
}
@@ -82,25 +82,76 @@ APR_DECLARE(apr_status_t) apr_proc_mutex_lock(apr_proc_mutex_t *mutex)
{
int32 stat;
- if (atomic_add(&mutex->LockCount, 1) > 0) {
- if ((stat = acquire_sem(mutex->Lock)) < B_NO_ERROR) {
- atomic_add(&mutex->LockCount, -1);
- return stat;
- }
- }
+ if (atomic_add(&mutex->LockCount, 1) > 0) {
+ if ((stat = acquire_sem(mutex->Lock)) < B_NO_ERROR) {
+ atomic_add(&mutex->LockCount, -1);
+ return stat;
+ }
+ }
return APR_SUCCESS;
}
APR_DECLARE(apr_status_t) apr_proc_mutex_trylock(apr_proc_mutex_t *mutex)
{
- return APR_ENOTIMPL;
+ int32 stat;
+
+ if (atomic_add(&mutex->LockCount, 1) > 0) {
+ stat = acquire_sem_etc(mutex->Lock, 1, 0, 0);
+ if (stat < B_NO_ERROR) {
+ atomic_add(&mutex->LockCount, -1);
+ if (stat == B_WOULD_BLOCK) {
+ stat = APR_EBUSY;
+ }
+ return stat;
+ }
+ }
+ return APR_SUCCESS;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_timedlock(apr_proc_mutex_t *mutex,
+ apr_time_t timeout,
+ int absolute)
+{
+ int32 stat;
+
+ if (atomic_add(&mutex->LockCount, 1) > 0) {
+ int flag = 0;
+#ifdef B_ABSOLUTE_TIMEOUT
+ if (timeout) {
+ flag = absolute ? B_ABSOLUTE_TIMEOUT : B_RELATIVE_TIMEOUT;
+ }
+ stat = acquire_sem_etc(mutex->Lock, 1, flag, timeout);
+#else
+ if (absolute) {
+ apr_time_t now = apr_time_now();
+ if (timeout > now) {
+ timeout -= now;
+ }
+ else {
+ timeout = 0;
+ }
+ }
+ if (timeout) {
+ flag = B_RELATIVE_TIMEOUT;
+ }
+ stat = acquire_sem_etc(mutex->Lock, 1, flag, timeout);
+#endif
+ if (stat < B_NO_ERROR) {
+ atomic_add(&mutex->LockCount, -1);
+ if (stat == B_TIMED_OUT) {
+ stat = APR_TIMEUP;
+ }
+ return stat;
+ }
+ }
+ return APR_SUCCESS;
}
APR_DECLARE(apr_status_t) apr_proc_mutex_unlock(apr_proc_mutex_t *mutex)
{
int32 stat;
- if (atomic_add(&mutex->LockCount, -1) > 1) {
+ if (atomic_add(&mutex->LockCount, -1) > 1) {
if ((stat = release_sem(mutex->Lock)) < B_NO_ERROR) {
atomic_add(&mutex->LockCount, 1);
return stat;
diff --git a/locks/beos/thread_cond.c b/locks/beos/thread_cond.c
index 44189d908..a0978c008 100644
--- a/locks/beos/thread_cond.c
+++ b/locks/beos/thread_cond.c
@@ -81,7 +81,7 @@ APR_DECLARE(apr_status_t) apr_thread_cond_create(apr_thread_cond_t **cond,
static apr_status_t do_wait(apr_thread_cond_t *cond, apr_thread_mutex_t *mutex,
- int timeout)
+ apr_interval_time_t timeout)
{
struct waiter_t *wait;
thread_id cth = find_thread(NULL);
diff --git a/locks/beos/thread_mutex.c b/locks/beos/thread_mutex.c
index b87f76606..257d12508 100644
--- a/locks/beos/thread_mutex.c
+++ b/locks/beos/thread_mutex.c
@@ -27,13 +27,13 @@ static apr_status_t _thread_mutex_cleanup(void * data)
apr_thread_mutex_t *lock = (apr_thread_mutex_t*)data;
if (lock->LockCount != 0) {
/* we're still locked... */
- while (atomic_add(&lock->LockCount , -1) > 1){
- /* OK we had more than one person waiting on the lock so
- * the sem is also locked. Release it until we have no more
- * locks left.
- */
+ while (atomic_add(&lock->LockCount , -1) > 1){
+ /* OK we had more than one person waiting on the lock so
+ * the sem is also locked. Release it until we have no more
+ * locks left.
+ */
release_sem (lock->Lock);
- }
+ }
}
delete_sem(lock->Lock);
return APR_SUCCESS;
@@ -91,13 +91,13 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_lock(apr_thread_mutex_t *mutex)
return APR_SUCCESS;
}
- if (atomic_add(&mutex->LockCount, 1) > 0) {
- if ((stat = acquire_sem(mutex->Lock)) < B_NO_ERROR) {
+ if (atomic_add(&mutex->LockCount, 1) > 0) {
+ if ((stat = acquire_sem(mutex->Lock)) < B_NO_ERROR) {
/* Oh dear, acquire_sem failed!! */
- atomic_add(&mutex->LockCount, -1);
- return stat;
- }
- }
+ atomic_add(&mutex->LockCount, -1);
+ return stat;
+ }
+ }
mutex->owner = me;
mutex->owner_ref = 1;
@@ -107,7 +107,74 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_lock(apr_thread_mutex_t *mutex)
APR_DECLARE(apr_status_t) apr_thread_mutex_trylock(apr_thread_mutex_t *mutex)
{
- return APR_ENOTIMPL;
+ int32 stat;
+ thread_id me = find_thread(NULL);
+
+ if (mutex->nested && mutex->owner == me) {
+ mutex->owner_ref++;
+ return APR_SUCCESS;
+ }
+
+ if (atomic_add(&mutex->LockCount, 1) > 0) {
+ if ((stat = acquire_sem_etc(mutex->Lock, 1,
+ B_TIMEOUT, 0)) < B_NO_ERROR) {
+ atomic_add(&mutex->LockCount, -1);
+ if (stat == B_WOULD_BLOCK) {
+ stat = APR_EBUSY;
+ }
+ return stat;
+ }
+ }
+
+ mutex->owner = me;
+ mutex->owner_ref = 1;
+
+ return APR_SUCCESS;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_timedlock(apr_thread_mutex_t *mutex,
+ apr_time_t timeout,
+ int absolute)
+{
+ int32 stat;
+ thread_id me = find_thread(NULL);
+
+ if (mutex->nested && mutex->owner == me) {
+ mutex->owner_ref++;
+ return APR_SUCCESS;
+ }
+
+ if (atomic_add(&mutex->LockCount, 1) > 0) {
+#ifdef B_ABSOLUTE_TIMEOUT
+ stat = acquire_sem_etc(mutex->Lock, 1,
+ absolute ? B_ABSOLUTE_TIMEOUT
+ : B_RELATIVE_TIMEOUT,
+ timeout);
+#else
+ if (absolute) {
+ apr_time_t now = apr_time_now();
+ if (timeout > now) {
+ timeout -= now;
+ }
+ else {
+ timeout = 0;
+ }
+ }
+ stat = acquire_sem_etc(mutex->Lock, 1, B_TIMEOUT, timeout);
+#endif
+ if (stat < B_NO_ERROR) {
+ atomic_add(&mutex->LockCount, -1);
+ if (stat == B_TIMED_OUT) {
+ stat = APR_TIMEUP;
+ }
+ return stat;
+ }
+ }
+
+ mutex->owner = me;
+ mutex->owner_ref = 1;
+
+ return APR_SUCCESS;
}
APR_DECLARE(apr_status_t) apr_thread_mutex_unlock(apr_thread_mutex_t *mutex)
@@ -120,7 +187,7 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_unlock(apr_thread_mutex_t *mutex)
return APR_SUCCESS;
}
- if (atomic_add(&mutex->LockCount, -1) > 1) {
+ if (atomic_add(&mutex->LockCount, -1) > 1) {
if ((stat = release_sem(mutex->Lock)) < B_NO_ERROR) {
atomic_add(&mutex->LockCount, 1);
return stat;
diff --git a/locks/netware/proc_mutex.c b/locks/netware/proc_mutex.c
index 097914645..4217b1cf0 100644
--- a/locks/netware/proc_mutex.c
+++ b/locks/netware/proc_mutex.c
@@ -26,15 +26,24 @@ APR_DECLARE(apr_status_t) apr_proc_mutex_create(apr_proc_mutex_t **mutex,
apr_pool_t *pool)
{
apr_status_t ret;
- apr_proc_mutex_t *new_mutex = NULL;
+ apr_proc_mutex_t *new_mutex;
+ unsigned int flags = APR_THREAD_MUTEX_DEFAULT;
+
+ *mutex = NULL;
+ if (mech == APR_LOCK_DEFAULT_TIMED) {
+ flags |= APR_THREAD_MUTEX_TIMED;
+ }
+ else if (mech != APR_LOCK_DEFAULT) {
+ return APR_ENOTIMPL;
+ }
+
new_mutex = (apr_proc_mutex_t *)apr_pcalloc(pool, sizeof(apr_proc_mutex_t));
-
- if(new_mutex ==NULL) {
+ if (new_mutex == NULL) {
return APR_ENOMEM;
}
new_mutex->pool = pool;
- ret = apr_thread_mutex_create(&(new_mutex->mutex), APR_THREAD_MUTEX_DEFAULT, pool);
+ ret = apr_thread_mutex_create(&(new_mutex->mutex), flags, pool);
if (ret == APR_SUCCESS)
*mutex = new_mutex;
@@ -63,6 +72,15 @@ APR_DECLARE(apr_status_t) apr_proc_mutex_trylock(apr_proc_mutex_t *mutex)
return APR_ENOLOCK;
}
+APR_DECLARE(apr_status_t) apr_proc_mutex_timedlock(apr_thread_mutex_t *mutex,
+ apr_time_t timeout,
+ int absolute)
+{
+ if (mutex)
+ return apr_thread_mutex_timedlock(mutex->mutex, timeout, absolute);
+ return APR_ENOLOCK;
+}
+
APR_DECLARE(apr_status_t) apr_proc_mutex_unlock(apr_proc_mutex_t *mutex)
{
if (mutex)
diff --git a/locks/netware/thread_mutex.c b/locks/netware/thread_mutex.c
index 98bf33bd2..a43215dce 100644
--- a/locks/netware/thread_mutex.c
+++ b/locks/netware/thread_mutex.c
@@ -19,6 +19,7 @@
#include "apr_general.h"
#include "apr_strings.h"
#include "apr_arch_thread_mutex.h"
+#include "apr_thread_cond.h"
#include "apr_portable.h"
static apr_status_t thread_mutex_cleanup(void *data)
@@ -41,8 +42,8 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_create(apr_thread_mutex_t **mutex,
return APR_ENOTIMPL;
}
new_mutex = (apr_thread_mutex_t *)apr_pcalloc(pool, sizeof(apr_thread_mutex_t));
-
- if(new_mutex ==NULL) {
+
+ if (new_mutex == NULL) {
return APR_ENOMEM;
}
new_mutex->pool = pool;
@@ -52,6 +53,14 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_create(apr_thread_mutex_t **mutex,
if(new_mutex->mutex == NULL)
return APR_ENOMEM;
+ if (flags & APR_THREAD_MUTEX_TIMED) {
+ apr_status_t rv = apr_thread_cond_create(&new_mutex->cond, pool);
+ if (rv != SUCCESS) {
+ NXMutexFree(new_mutex->mutex);
+ return rv;
+ }
+ }
+
apr_pool_cleanup_register(new_mutex->pool, new_mutex,
(void*)thread_mutex_cleanup,
apr_pool_cleanup_null);
@@ -61,29 +70,112 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_create(apr_thread_mutex_t **mutex,
APR_DECLARE(apr_status_t) apr_thread_mutex_lock(apr_thread_mutex_t *mutex)
{
+ if (mutex->cond) {
+ apr_status_t rv;
+ NXLock(mutex->mutex);
+ if (mutex->locked) {
+ mutex->num_waiters++;
+ rv = apr_thread_cond_wait(mutex->cond, mutex);
+ mutex->num_waiters--;
+ }
+ else {
+ mutex->locked = 1;
+ rv = APR_SUCCESS;
+ }
+ NXUnlock(mutex->mutex);
+ return rv;
+ }
+
NXLock(mutex->mutex);
return APR_SUCCESS;
}
APR_DECLARE(apr_status_t) apr_thread_mutex_trylock(apr_thread_mutex_t *mutex)
{
+ if (mutex->cond) {
+ apr_status_t rv;
+ NXLock(mutex->mutex);
+ if (mutex->locked) {
+ rv = APR_EBUSY;
+ }
+ else {
+ mutex->locked = 1;
+ rv = APR_SUCCESS;
+ }
+ NXUnlock(mutex->mutex);
+ return rv;
+ }
+
if (!NXTryLock(mutex->mutex))
return APR_EBUSY;
return APR_SUCCESS;
}
+APR_DECLARE(apr_status_t) apr_thread_mutex_timedlock(apr_thread_mutex_t *mutex,
+ apr_time_t timeout,
+ int absolute)
+{
+ if (mutex->cond) {
+ apr_status_t rv;
+ NXLock(mutex->mutex);
+ if (mutex->locked) {
+ if (absolute) {
+ apr_time_t now = apr_time_now();
+ if (timeout > now) {
+ timeout -= now;
+ }
+ else {
+ timeout = 0;
+ }
+ }
+ mutex->num_waiters++;
+ rv = apr_thread_cond_timedwait(mutex->cond, mutex, timeout);
+ mutex->num_waiters--;
+ }
+ else {
+ mutex->locked = 1;
+ rv = APR_SUCCESS;
+ }
+ NXUnlock(mutex->mutex);
+ return rv;
+ }
+
+ return APR_ENOTIMPL;
+}
+
APR_DECLARE(apr_status_t) apr_thread_mutex_unlock(apr_thread_mutex_t *mutex)
{
+ if (mutex->cond) {
+ apr_status_t rv;
+ NXLock(mutex->mutex);
+ if (!mutex->locked) {
+ rv = APR_EINVAL;
+ }
+ else if (mutex->num_waiters) {
+ rv = apr_thread_cond_signal(mutex->cond);
+ }
+ else {
+ mutex->locked = 0;
+ rv = APR_SUCCESS;
+ }
+ NXUnlock(mutex->mutex);
+ return rv;
+ }
+
NXUnlock(mutex->mutex);
return APR_SUCCESS;
}
APR_DECLARE(apr_status_t) apr_thread_mutex_destroy(apr_thread_mutex_t *mutex)
{
- apr_status_t stat;
- if ((stat = thread_mutex_cleanup(mutex)) == APR_SUCCESS) {
- apr_pool_cleanup_kill(mutex->pool, mutex, thread_mutex_cleanup);
- return APR_SUCCESS;
+ apr_status_t stat, rv = APR_SUCCESS;
+ if (mutex->cond) {
+ rv = apr_thread_cond_destroy(mutex->cond);
+ mutex->cond = NULL;
+ }
+ stat = apr_pool_cleanup_run(mutex->pool, mutex, thread_mutex_cleanup);
+ if (stat == APR_SUCCESS && rv) {
+ stat = rv;
}
return stat;
}
diff --git a/locks/os2/proc_mutex.c b/locks/os2/proc_mutex.c
index 9b53c0bef..171c119a5 100644
--- a/locks/os2/proc_mutex.c
+++ b/locks/os2/proc_mutex.c
@@ -80,7 +80,7 @@ APR_DECLARE(apr_status_t) apr_proc_mutex_create(apr_proc_mutex_t **mutex,
ULONG rc;
char *semname;
- if (mech != APR_LOCK_DEFAULT) {
+ if (mech != APR_LOCK_DEFAULT && mech != APR_LOCK_DEFAULT_TIMED) {
return APR_ENOTIMPL;
}
@@ -156,6 +156,37 @@ APR_DECLARE(apr_status_t) apr_proc_mutex_trylock(apr_proc_mutex_t *mutex)
+APR_DECLARE(apr_status_t) apr_proc_mutex_timedlock(apr_proc_mutex_t *mutex,
+ apr_time_t timeout,
+ int absolute)
+{
+ ULONG rc;
+
+ if (absolute) {
+ apr_time_t now = apr_time_now();
+ if (timeout > now) {
+ timeout -= now;
+ }
+ else {
+ timeout = 0;
+ }
+ }
+
+ rc = DosRequestMutexSem(mutex->hMutex, apr_time_as_msec(timeout));
+ if (rc == ERROR_TIMEOUT) {
+ return APR_TIMEUP;
+ }
+
+ if (rc == 0) {
+ mutex->owner = CurrentTid;
+ mutex->lock_count++;
+ }
+
+ return APR_FROM_OS_ERROR(rc);
+}
+
+
+
APR_DECLARE(apr_status_t) apr_proc_mutex_unlock(apr_proc_mutex_t *mutex)
{
ULONG rc;
diff --git a/locks/os2/thread_mutex.c b/locks/os2/thread_mutex.c
index 03ab3de4d..245b788c4 100644
--- a/locks/os2/thread_mutex.c
+++ b/locks/os2/thread_mutex.c
@@ -76,6 +76,32 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_trylock(apr_thread_mutex_t *mutex)
+APR_DECLARE(apr_status_t) apr_thread_mutex_timedlock(apr_thread_mutex_t *mutex,
+ apr_time_t timeout,
+ int absolute)
+{
+ ULONG rc;
+
+ if (absolute) {
+ apr_time_t now = apr_time_now();
+ if (timeout > now) {
+ timeout -= now;
+ }
+ else {
+ timeout = 0;
+ }
+ }
+
+ rc = DosRequestMutexSem(mutex->hMutex, apr_time_as_msec(usec));
+ if (rc == ERROR_TIMEOUT) {
+ return APR_TIMEUP;
+ }
+
+ return APR_FROM_OS_ERROR(rc);
+}
+
+
+
APR_DECLARE(apr_status_t) apr_thread_mutex_unlock(apr_thread_mutex_t *mutex)
{
ULONG rc = DosReleaseMutexSem(mutex->hMutex);
diff --git a/locks/unix/global_mutex.c b/locks/unix/global_mutex.c
index ca3b86e46..b5523d852 100644
--- a/locks/unix/global_mutex.c
+++ b/locks/unix/global_mutex.c
@@ -141,6 +141,38 @@ APR_DECLARE(apr_status_t) apr_global_mutex_trylock(apr_global_mutex_t *mutex)
return rv;
}
+APR_DECLARE(apr_status_t) apr_global_mutex_timedlock(apr_global_mutex_t *mutex,
+ apr_time_t timeout,
+ int absolute)
+{
+ apr_status_t rv;
+
+ if (!absolute) {
+ timeout += apr_time_now();
+ }
+
+#if APR_HAS_THREADS
+ if (mutex->thread_mutex) {
+ rv = apr_thread_mutex_timedlock(mutex->thread_mutex, timeout, 1);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ }
+#endif /* APR_HAS_THREADS */
+
+ rv = apr_proc_mutex_timedlock(mutex->proc_mutex, timeout, 1);
+
+#if APR_HAS_THREADS
+ if (rv != APR_SUCCESS) {
+ if (mutex->thread_mutex) {
+ (void)apr_thread_mutex_unlock(mutex->thread_mutex);
+ }
+ }
+#endif /* APR_HAS_THREADS */
+
+ return rv;
+}
+
APR_DECLARE(apr_status_t) apr_global_mutex_unlock(apr_global_mutex_t *mutex)
{
apr_status_t rv;
diff --git a/locks/unix/proc_mutex.c b/locks/unix/proc_mutex.c
index 4105ff3ef..5ac787f0c 100644
--- a/locks/unix/proc_mutex.c
+++ b/locks/unix/proc_mutex.c
@@ -174,6 +174,32 @@ static apr_status_t proc_mutex_posix_tryacquire(apr_proc_mutex_t *mutex)
return APR_SUCCESS;
}
+static apr_status_t proc_mutex_posix_timedacquire(apr_proc_mutex_t *mutex,
+ apr_time_t timeout,
+ int absolute)
+{
+#if HAVE_SEM_TIMEDWAIT
+ struct timespec abstime;
+
+ if (!absolute) {
+ timeout += apr_time_now();
+ }
+ abstime.tv_sec = apr_time_sec(timeout);
+ abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */
+
+ if (sem_timedwait(mutex->psem_interproc, &abstime) < 0) {
+ if (errno == ETIMEDOUT) {
+ return APR_TIMEUP;
+ }
+ return errno;
+ }
+ mutex->curr_locked = 1;
+ return APR_SUCCESS;
+#else
+ return APR_ENOTIMPL;
+#endif
+}
+
static apr_status_t proc_mutex_posix_release(apr_proc_mutex_t *mutex)
{
mutex->curr_locked = 0;
@@ -195,6 +221,7 @@ static const apr_proc_mutex_unix_lock_methods_t mutex_posixsem_methods =
proc_mutex_posix_create,
proc_mutex_posix_acquire,
proc_mutex_posix_tryacquire,
+ proc_mutex_posix_timedacquire,
proc_mutex_posix_release,
proc_mutex_posix_cleanup,
proc_mutex_no_child_init,
@@ -293,6 +320,37 @@ static apr_status_t proc_mutex_sysv_tryacquire(apr_proc_mutex_t *mutex)
return APR_SUCCESS;
}
+static apr_status_t proc_mutex_sysv_timedacquire(apr_proc_mutex_t *mutex,
+ apr_time_t timeout,
+ int absolute)
+{
+#if HAVE_SEMTIMEDOP
+ int rc;
+ struct timespec abstime;
+
+ if (!absolute) {
+ timeout += apr_time_now();
+ }
+ abstime.tv_sec = apr_time_sec(timeout);
+ abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */
+
+ do {
+ rc = semtimedop(mutex->interproc->filedes, &proc_mutex_op_on, 1,
+ &abstime);
+ } while (rc < 0 && errno == EINTR);
+ if (rc < 0) {
+ if (errno == EAGAIN) {
+ return APR_TIMEUP;
+ }
+ return errno;
+ }
+ mutex->curr_locked = 1;
+ return APR_SUCCESS;
+#else
+ return APR_ENOTIMPL;
+#endif
+}
+
static apr_status_t proc_mutex_sysv_release(apr_proc_mutex_t *mutex)
{
int rc;
@@ -335,6 +393,7 @@ static const apr_proc_mutex_unix_lock_methods_t mutex_sysv_methods =
proc_mutex_sysv_create,
proc_mutex_sysv_acquire,
proc_mutex_sysv_tryacquire,
+ proc_mutex_sysv_timedacquire,
proc_mutex_sysv_release,
proc_mutex_sysv_cleanup,
proc_mutex_no_child_init,
@@ -511,6 +570,43 @@ static apr_status_t proc_mutex_proc_pthread_tryacquire(apr_proc_mutex_t *mutex)
return rv;
}
+static apr_status_t
+proc_mutex_proc_pthread_timedacquire(apr_proc_mutex_t *mutex,
+ apr_time_t timeout,
+ int absolute)
+{
+ apr_status_t rv;
+ struct timespec abstime;
+
+ if (!absolute) {
+ timeout += apr_time_now();
+ }
+ abstime.tv_sec = apr_time_sec(timeout);
+ abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */
+
+ if ((rv = pthread_mutex_timedlock(mutex->pthread_interproc, &abstime))) {
+#ifdef HAVE_ZOS_PTHREADS
+ rv = errno;
+#endif
+ if (rv == ETIMEDOUT) {
+ return APR_TIMEUP;
+ }
+#ifdef HAVE_PTHREAD_MUTEX_ROBUST
+ /* Okay, our owner died. Let's try to make it consistent again. */
+ if (rv == EOWNERDEAD) {
+ pthread_mutex_consistent_np(mutex->pthread_interproc);
+ rv = APR_SUCCESS;
+ }
+ else
+ return rv;
+#else
+ return rv;
+#endif
+ }
+ mutex->curr_locked = 1;
+ return rv;
+}
+
static apr_status_t proc_mutex_proc_pthread_release(apr_proc_mutex_t *mutex)
{
apr_status_t rv;
@@ -531,6 +627,7 @@ static const apr_proc_mutex_unix_lock_methods_t mutex_proc_pthread_methods =
proc_mutex_proc_pthread_create,
proc_mutex_proc_pthread_acquire,
proc_mutex_proc_pthread_tryacquire,
+ proc_mutex_proc_pthread_timedacquire,
proc_mutex_proc_pthread_release,
proc_mutex_proc_pthread_cleanup,
proc_mutex_no_child_init,
@@ -642,6 +739,13 @@ static apr_status_t proc_mutex_fcntl_tryacquire(apr_proc_mutex_t *mutex)
return APR_SUCCESS;
}
+static apr_status_t proc_mutex_fcntl_timedacquire(apr_proc_mutex_t *mutex,
+ apr_time_t timeout,
+ int absolute)
+{
+ return APR_ENOTIMPL;
+}
+
static apr_status_t proc_mutex_fcntl_release(apr_proc_mutex_t *mutex)
{
int rc;
@@ -682,6 +786,7 @@ static const apr_proc_mutex_unix_lock_methods_t mutex_fcntl_methods =
proc_mutex_fcntl_create,
proc_mutex_fcntl_acquire,
proc_mutex_fcntl_tryacquire,
+ proc_mutex_fcntl_timedacquire,
proc_mutex_fcntl_release,
proc_mutex_fcntl_cleanup,
proc_mutex_no_child_init,
@@ -773,6 +878,13 @@ static apr_status_t proc_mutex_flock_tryacquire(apr_proc_mutex_t *mutex)
return APR_SUCCESS;
}
+static apr_status_t proc_mutex_flock_timedacquire(apr_proc_mutex_t *mutex,
+ apr_time_t timeout,
+ int absolute)
+{
+ return APR_ENOTIMPL;
+}
+
static apr_status_t proc_mutex_flock_release(apr_proc_mutex_t *mutex)
{
int rc;
@@ -837,6 +949,7 @@ static const apr_proc_mutex_unix_lock_methods_t mutex_flock_methods =
proc_mutex_flock_create,
proc_mutex_flock_acquire,
proc_mutex_flock_tryacquire,
+ proc_mutex_flock_timedacquire,
proc_mutex_flock_release,
proc_mutex_flock_cleanup,
proc_mutex_flock_child_init,
@@ -910,6 +1023,21 @@ static apr_status_t proc_mutex_choose_method(apr_proc_mutex_t *new_mutex, apr_lo
return APR_ENOTIMPL;
#endif
break;
+ case APR_LOCK_DEFAULT_TIMED:
+#if APR_HAS_PROC_PTHREAD_SERIALIZE \
+ && defined(HAVE_PTHREAD_MUTEX_ROBUST) \
+ && defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK)
+ new_mutex->inter_meth = &mutex_proc_pthread_methods;
+#elif APR_HAS_SYSVSEM_SERIALIZE \
+ && defined(HAVE_SEMTIMEDOP)
+ new_mutex->inter_meth = &mutex_sysv_methods;
+#elif APR_HAS_POSIXSEM_SERIALIZE \
+ && defined(HAVE_SEM_TIMEDWAIT)
+ new_mutex->inter_meth = &mutex_posixsem_methods;
+#else
+ return APR_ENOTIMPL;
+#endif
+ break;
default:
return APR_ENOTIMPL;
}
@@ -981,6 +1109,13 @@ APR_DECLARE(apr_status_t) apr_proc_mutex_trylock(apr_proc_mutex_t *mutex)
return mutex->meth->tryacquire(mutex);
}
+APR_DECLARE(apr_status_t) apr_proc_mutex_timedlock(apr_proc_mutex_t *mutex,
+ apr_time_t timeout,
+ int absolute)
+{
+ return mutex->meth->timedacquire(mutex, timeout, absolute);
+}
+
APR_DECLARE(apr_status_t) apr_proc_mutex_unlock(apr_proc_mutex_t *mutex)
{
return mutex->meth->release(mutex);
diff --git a/locks/unix/thread_mutex.c b/locks/unix/thread_mutex.c
index 73fd1e146..297bf018d 100644
--- a/locks/unix/thread_mutex.c
+++ b/locks/unix/thread_mutex.c
@@ -77,6 +77,19 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_create(apr_thread_mutex_t **mutex,
return rv;
}
+#ifndef HAVE_PTHREAD_MUTEX_TIMEDLOCK
+ if (flags & APR_THREAD_MUTEX_TIMED) {
+ rv = apr_thread_cond_create(&new_mutex->cond, pool);
+ if (rv) {
+#ifdef HAVE_ZOS_PTHREADS
+ rv = errno;
+#endif
+ pthread_mutex_destroy(&new_mutex->mutex);
+ return rv;
+ }
+ }
+#endif
+
apr_pool_cleanup_register(new_mutex->pool,
new_mutex, thread_mutex_cleanup,
apr_pool_cleanup_null);
@@ -89,13 +102,45 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_lock(apr_thread_mutex_t *mutex)
{
apr_status_t rv;
+ if (mutex->cond) {
+ apr_status_t rv2;
+
+ rv = pthread_mutex_lock(&mutex->mutex);
+ if (rv) {
+#ifdef HAVE_ZOS_PTHREADS
+ rv = errno;
+#endif
+ return rv;
+ }
+
+ if (mutex->locked) {
+ mutex->num_waiters++;
+ rv = apr_thread_cond_wait(mutex->cond, mutex);
+ mutex->num_waiters--;
+ }
+ else {
+ mutex->locked = 1;
+ }
+
+ rv2 = pthread_mutex_unlock(&mutex->mutex);
+ if (rv2 && !rv) {
+#ifdef HAVE_ZOS_PTHREADS
+ rv = errno;
+#else
+ rv = rv2;
+#endif
+ }
+
+ return rv;
+ }
+
rv = pthread_mutex_lock(&mutex->mutex);
#ifdef HAVE_ZOS_PTHREADS
if (rv) {
rv = errno;
}
#endif
-
+
return rv;
}
@@ -103,6 +148,36 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_trylock(apr_thread_mutex_t *mutex)
{
apr_status_t rv;
+ if (mutex->cond) {
+ apr_status_t rv2;
+
+ rv = pthread_mutex_lock(&mutex->mutex);
+ if (rv) {
+#ifdef HAVE_ZOS_PTHREADS
+ rv = errno;
+#endif
+ return rv;
+ }
+
+ if (mutex->locked) {
+ rv = APR_EBUSY;
+ }
+ else {
+ mutex->locked = 1;
+ }
+
+ rv2 = pthread_mutex_unlock(&mutex->mutex);
+ if (rv2) {
+#ifdef HAVE_ZOS_PTHREADS
+ rv = errno;
+#else
+ rv = rv2;
+#endif
+ }
+
+ return rv;
+ }
+
rv = pthread_mutex_trylock(&mutex->mutex);
if (rv) {
#ifdef HAVE_ZOS_PTHREADS
@@ -114,10 +189,115 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_trylock(apr_thread_mutex_t *mutex)
return APR_SUCCESS;
}
+APR_DECLARE(apr_status_t) apr_thread_mutex_timedlock(apr_thread_mutex_t *mutex,
+ apr_time_t timeout,
+ int absolute)
+{
+ apr_status_t rv = APR_ENOTIMPL;
+
+#ifdef HAVE_PTHREAD_MUTEX_TIMEDLOCK
+ struct timespec abstime;
+
+ if (!absolute) {
+ timeout += apr_time_now();
+ }
+ abstime.tv_sec = apr_time_sec(timeout);
+ abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */
+
+ rv = pthread_mutex_timedlock(&mutex->mutex, &abstime);
+ if (rv) {
+#ifdef HAVE_ZOS_PTHREADS
+ rv = errno;
+#endif
+ if (rv == ETIMEDOUT) {
+ rv = APR_TIMEUP;
+ }
+ }
+
+#else /* HAVE_PTHREAD_MUTEX_TIMEDLOCK */
+
+ if (mutex->cond) {
+ apr_status_t rv2;
+
+ rv = pthread_mutex_lock(&mutex->mutex);
+ if (rv) {
+#ifdef HAVE_ZOS_PTHREADS
+ rv = errno;
+#endif
+ return rv;
+ }
+
+ if (mutex->locked) {
+ if (absolute) {
+ apr_time_t now = apr_time_now();
+ if (timeout > now) {
+ timeout -= now;
+ }
+ else {
+ timeout = 0;
+ }
+ }
+ mutex->num_waiters++;
+ rv = apr_thread_cond_timedwait(mutex->cond, mutex, timeout);
+ mutex->num_waiters--;
+ }
+ else {
+ mutex->locked = 1;
+ }
+
+ rv2 = pthread_mutex_unlock(&mutex->mutex);
+ if (rv2 && !rv) {
+#ifdef HAVE_ZOS_PTHREADS
+ rv = errno;
+#else
+ rv = rv2;
+#endif
+ }
+ }
+
+#endif /* HAVE_PTHREAD_MUTEX_TIMEDLOCK */
+
+ return rv;
+}
+
APR_DECLARE(apr_status_t) apr_thread_mutex_unlock(apr_thread_mutex_t *mutex)
{
apr_status_t status;
+ if (mutex->cond) {
+ apr_status_t stat2;
+
+ status = pthread_mutex_lock(&mutex->mutex);
+ if (status) {
+#ifdef HAVE_ZOS_PTHREADS
+ status = errno;
+#endif
+ return status;
+ }
+
+ if (!mutex->locked) {
+ status = APR_EINVAL;
+ }
+ else if (mutex->num_waiters) {
+ status = apr_thread_cond_signal(mutex->cond);
+ }
+ else {
+ mutex->locked = 0;
+ status = APR_SUCCESS;
+ }
+
+ stat2 = pthread_mutex_unlock(&mutex->mutex);
+ if (stat2) {
+#ifdef HAVE_ZOS_PTHREADS
+ status = errno;
+#else
+ status = stat2;
+#endif
+ }
+
+ return status;
+ }
+
status = pthread_mutex_unlock(&mutex->mutex);
#ifdef HAVE_ZOS_PTHREADS
if (status) {
@@ -130,7 +310,17 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_unlock(apr_thread_mutex_t *mutex)
APR_DECLARE(apr_status_t) apr_thread_mutex_destroy(apr_thread_mutex_t *mutex)
{
- return apr_pool_cleanup_run(mutex->pool, mutex, thread_mutex_cleanup);
+ apr_status_t rv, rv2 = APR_SUCCESS;
+
+ if (mutex->cond) {
+ rv2 = apr_thread_cond_destroy(mutex->cond);
+ }
+ rv = apr_pool_cleanup_run(mutex->pool, mutex, thread_mutex_cleanup);
+ if (rv == APR_SUCCESS) {
+ rv = rv2;
+ }
+
+ return rv;
}
APR_POOL_IMPLEMENT_ACCESSOR(thread_mutex)
diff --git a/locks/win32/proc_mutex.c b/locks/win32/proc_mutex.c
index 38366f185..6af991e8c 100644
--- a/locks/win32/proc_mutex.c
+++ b/locks/win32/proc_mutex.c
@@ -160,6 +160,33 @@ APR_DECLARE(apr_status_t) apr_proc_mutex_trylock(apr_proc_mutex_t *mutex)
return apr_get_os_error();
}
+APR_DECLARE(apr_status_t) apr_proc_mutex_timedlock(apr_proc_mutex_t *mutex,
+ apr_time_t timeout,
+ int absolute)
+{
+ DWORD rv;
+
+ if (absolute) {
+ apr_time_t now = apr_time_now();
+ if (timeout > now) {
+ timeout -= now;
+ }
+ else {
+ timeout = 0;
+ }
+ }
+
+ rv = WaitForSingleObject(mutex->handle, apr_time_as_msec(timeout));
+
+ if (rv == WAIT_OBJECT_0 || rv == WAIT_ABANDONED) {
+ return APR_SUCCESS;
+ }
+ else if (rv == WAIT_TIMEOUT) {
+ return APR_TIMEUP;
+ }
+ return apr_get_os_error();
+}
+
APR_DECLARE(apr_status_t) apr_proc_mutex_unlock(apr_proc_mutex_t *mutex)
{
if (ReleaseMutex(mutex->handle) == 0) {
diff --git a/locks/win32/thread_mutex.c b/locks/win32/thread_mutex.c
index 9b10d7278..c467829ea 100644
--- a/locks/win32/thread_mutex.c
+++ b/locks/win32/thread_mutex.c
@@ -54,6 +54,10 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_create(apr_thread_mutex_t **mutex,
(*mutex)->type = thread_mutex_unnested_event;
(*mutex)->handle = CreateEvent(NULL, FALSE, TRUE, NULL);
}
+ else if (flags & APR_THREAD_MUTEX_TIMED) {
+ (*mutex)->type = thread_mutex_nested_mutex;
+ (*mutex)->handle = CreateMutex(NULL, FALSE, NULL);
+ }
else {
#if APR_HAS_UNICODE_FS
/* Critical Sections are terrific, performance-wise, on NT.
@@ -63,6 +67,7 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_create(apr_thread_mutex_t **mutex,
IF_WIN_OS_IS_UNICODE {
InitializeCriticalSection(&(*mutex)->section);
(*mutex)->type = thread_mutex_critical_section;
+ (*mutex)->handle = NULL;
}
#endif
#if APR_HAS_ANSI_FS
@@ -86,9 +91,9 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_lock(apr_thread_mutex_t *mutex)
}
else {
DWORD rv = WaitForSingleObject(mutex->handle, INFINITE);
- if ((rv != WAIT_OBJECT_0) && (rv != WAIT_ABANDONED)) {
+ if ((rv != WAIT_OBJECT_0) && (rv != WAIT_ABANDONED)) {
return (rv == WAIT_TIMEOUT) ? APR_EBUSY : apr_get_os_error();
- }
+ }
}
return APR_SUCCESS;
}
@@ -102,13 +107,38 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_trylock(apr_thread_mutex_t *mutex)
}
else {
DWORD rv = WaitForSingleObject(mutex->handle, 0);
- if ((rv != WAIT_OBJECT_0) && (rv != WAIT_ABANDONED)) {
+ if ((rv != WAIT_OBJECT_0) && (rv != WAIT_ABANDONED)) {
return (rv == WAIT_TIMEOUT) ? APR_EBUSY : apr_get_os_error();
- }
+ }
}
return APR_SUCCESS;
}
+APR_DECLARE(apr_status_t) apr_thread_mutex_timedlock(apr_thread_mutex_t *mutex,
+ apr_time_t timeout,
+ int absolute)
+{
+ if (mutex->type != thread_mutex_critical_section) {
+ DWORD rv;
+ if (absolute) {
+ apr_time_t now = apr_time_now();
+ if (timeout > now) {
+ timeout -= now;
+ }
+ else {
+ timeout = 0;
+ }
+ }
+ rv = WaitForSingleObject(mutex->handle, apr_time_as_msec(timeout));
+ if ((rv != WAIT_OBJECT_0) && (rv != WAIT_ABANDONED)) {
+ return (rv == WAIT_TIMEOUT) ? APR_EBUSY : apr_get_os_error();
+ }
+ return APR_SUCCESS;
+ }
+
+ return APR_ENOTIMPL;
+}
+
APR_DECLARE(apr_status_t) apr_thread_mutex_unlock(apr_thread_mutex_t *mutex)
{
if (mutex->type == thread_mutex_critical_section) {
diff --git a/test/abts_tests.h b/test/abts_tests.h
index b7cc8ef16..793f93e51 100644
--- a/test/abts_tests.h
+++ b/test/abts_tests.h
@@ -50,7 +50,9 @@ const struct testlist {
{testoc},
{testpath},
{testpipe},
+#if 0
{testpoll},
+#endif
{testpool},
{testproc},
{testprocmutex},
diff --git a/test/testglobalmutex.c b/test/testglobalmutex.c
index d6b716c09..7340e2540 100644
--- a/test/testglobalmutex.c
+++ b/test/testglobalmutex.c
@@ -68,6 +68,7 @@ static const char *mutexname(apr_lockmech_e mech)
case APR_LOCK_PROC_PTHREAD: return "proc_pthread";
case APR_LOCK_POSIXSEM: return "posixsem";
case APR_LOCK_DEFAULT: return "default";
+ case APR_LOCK_DEFAULT_TIMED: return "default_timed";
default: return "unknown";
}
}
@@ -129,6 +130,8 @@ abts_suite *testglobalmutex(abts_suite *suite)
mech = APR_LOCK_FLOCK;
abts_run_test(suite, test_exclusive, &mech);
#endif
+ mech = APR_LOCK_DEFAULT_TIMED;
+ abts_run_test(suite, test_exclusive, &mech);
return suite;
}
diff --git a/test/testlock.c b/test/testlock.c
index dddb52f76..a36720b84 100644
--- a/test/testlock.c
+++ b/test/testlock.c
@@ -90,7 +90,12 @@ static void *APR_THREAD_FUNC thread_mutex_function(apr_thread_t *thd, void *data
while (1)
{
- apr_thread_mutex_lock(thread_mutex);
+ if (data) {
+ apr_thread_mutex_timedlock(thread_mutex, *(apr_time_t *)data, 0);
+ }
+ else {
+ apr_thread_mutex_lock(thread_mutex);
+ }
if (i == MAX_ITER)
exitLoop = 0;
else
@@ -178,6 +183,38 @@ static void test_thread_mutex(abts_case *tc, void *data)
ABTS_INT_EQUAL(tc, MAX_ITER, x);
}
+static void test_thread_timedmutex(abts_case *tc, void *data)
+{
+ apr_thread_t *t1, *t2, *t3, *t4;
+ apr_status_t s1, s2, s3, s4;
+ apr_time_t timeout;
+
+ s1 = apr_thread_mutex_create(&thread_mutex, APR_THREAD_MUTEX_TIMED, p);
+ ABTS_INT_EQUAL(tc, APR_SUCCESS, s1);
+ ABTS_PTR_NOTNULL(tc, thread_mutex);
+
+ i = 0;
+ x = 0;
+
+ timeout = apr_time_from_sec(5);
+
+ s1 = apr_thread_create(&t1, NULL, thread_mutex_function, &timeout, p);
+ ABTS_INT_EQUAL(tc, APR_SUCCESS, s1);
+ s2 = apr_thread_create(&t2, NULL, thread_mutex_function, &timeout, p);
+ ABTS_INT_EQUAL(tc, APR_SUCCESS, s2);
+ s3 = apr_thread_create(&t3, NULL, thread_mutex_function, &timeout, p);
+ ABTS_INT_EQUAL(tc, APR_SUCCESS, s3);
+ s4 = apr_thread_create(&t4, NULL, thread_mutex_function, &timeout, p);
+ ABTS_INT_EQUAL(tc, APR_SUCCESS, s4);
+
+ apr_thread_join(&s1, t1);
+ apr_thread_join(&s2, t2);
+ apr_thread_join(&s3, t3);
+ apr_thread_join(&s4, t4);
+
+ ABTS_INT_EQUAL(tc, MAX_ITER, x);
+}
+
static void test_thread_rwlock(abts_case *tc, void *data)
{
apr_thread_t *t1, *t2, *t3, *t4;
@@ -305,6 +342,38 @@ static void test_timeoutcond(abts_case *tc, void *data)
apr_thread_cond_destroy(timeout_cond));
}
+static void test_timeoutmutex(abts_case *tc, void *data)
+{
+ apr_status_t s;
+ apr_time_t begin, end;
+ apr_time_t timeout;
+ int i;
+
+ s = apr_thread_mutex_create(&timeout_mutex, APR_THREAD_MUTEX_TIMED, p);
+ ABTS_INT_EQUAL(tc, APR_SUCCESS, s);
+ ABTS_PTR_NOTNULL(tc, timeout_mutex);
+
+ timeout = apr_time_from_sec(5);
+
+ ABTS_INT_EQUAL(tc, 0, apr_thread_mutex_lock(timeout_mutex));
+ for (i = 0; i < MAX_RETRY; i++) {
+ begin = apr_time_now();
+ s = apr_thread_mutex_timedlock(timeout_mutex, timeout, 0);
+ end = apr_time_now();
+
+ if (s != APR_SUCCESS && !APR_STATUS_IS_TIMEUP(s)) {
+ continue;
+ }
+ ABTS_INT_EQUAL(tc, 1, APR_STATUS_IS_TIMEUP(s));
+ ABTS_ASSERT(tc, "Timer returned too late", end - begin - timeout < 100000);
+ break;
+ }
+ ABTS_ASSERT(tc, "Too many retries", i < MAX_RETRY);
+ ABTS_INT_EQUAL(tc, 0, apr_thread_mutex_unlock(timeout_mutex));
+ APR_ASSERT_SUCCESS(tc, "Unable to destroy the mutex",
+ apr_thread_mutex_destroy(timeout_mutex));
+}
+
#endif /* !APR_HAS_THREADS */
#if !APR_HAS_THREADS
@@ -323,9 +392,11 @@ abts_suite *testlock(abts_suite *suite)
abts_run_test(suite, threads_not_impl, NULL);
#else
abts_run_test(suite, test_thread_mutex, NULL);
+ abts_run_test(suite, test_thread_timedmutex, NULL);
abts_run_test(suite, test_thread_rwlock, NULL);
abts_run_test(suite, test_cond, NULL);
abts_run_test(suite, test_timeoutcond, NULL);
+ abts_run_test(suite, test_timeoutmutex, NULL);
#endif
return suite;
diff --git a/test/testlockperf.c b/test/testlockperf.c
index 6cca99d16..39fc256a1 100644
--- a/test/testlockperf.c
+++ b/test/testlockperf.c
@@ -60,7 +60,12 @@ void * APR_THREAD_FUNC thread_mutex_func(apr_thread_t *thd, void *data)
int i;
for (i = 0; i < max_counter; i++) {
- apr_thread_mutex_lock(thread_lock);
+ if (data) {
+ apr_thread_mutex_timedlock(thread_lock, *(apr_time_t *)data, 0);
+ }
+ else {
+ apr_thread_mutex_lock(thread_lock);
+ }
mutex_counter++;
apr_thread_mutex_unlock(thread_lock);
}
@@ -175,6 +180,57 @@ int test_thread_mutex_nested(int num_threads)
return APR_SUCCESS;
}
+int test_thread_mutex_timed(int num_threads)
+{
+ apr_thread_t *t[MAX_THREADS];
+ apr_status_t s[MAX_THREADS];
+ apr_time_t time_start, time_stop;
+ apr_time_t timeout;
+ int i;
+
+ mutex_counter = 0;
+
+ timeout = apr_time_from_sec(5);
+
+ printf("apr_thread_mutex_t Tests\n");
+ printf("%-60s", " Initializing the apr_thread_mutex_t (TIMED)");
+ s[0] = apr_thread_mutex_create(&thread_lock, APR_THREAD_MUTEX_TIMED, pool);
+ if (s[0] != APR_SUCCESS) {
+ printf("Failed!\n");
+ return s[0];
+ }
+ printf("OK\n");
+
+ apr_thread_mutex_lock(thread_lock);
+ /* set_concurrency(4)? -aaron */
+ printf(" Starting %d threads ", num_threads);
+ for (i = 0; i < num_threads; ++i) {
+ s[i] = apr_thread_create(&t[i], NULL, thread_mutex_func, &timeout, pool);
+ if (s[i] != APR_SUCCESS) {
+ printf("Failed!\n");
+ return s[i];
+ }
+ }
+ printf("OK\n");
+
+ time_start = apr_time_now();
+ apr_thread_mutex_unlock(thread_lock);
+
+ /* printf("%-60s", " Waiting for threads to exit"); */
+ for (i = 0; i < num_threads; ++i) {
+ apr_thread_join(&s[i], t[i]);
+ }
+ /* printf("OK\n"); */
+
+ time_stop = apr_time_now();
+ printf("microseconds: %" APR_INT64_T_FMT " usec\n",
+ (time_stop - time_start));
+ if (mutex_counter != max_counter * num_threads)
+ printf("error: counter = %ld\n", mutex_counter);
+
+ return APR_SUCCESS;
+}
+
int test_thread_rwlock(int num_threads)
{
apr_thread_t *t[MAX_THREADS];
@@ -273,6 +329,12 @@ int main(int argc, const char * const *argv)
exit(-4);
}
+ if ((rv = test_thread_mutex_timed(i)) != APR_SUCCESS) {
+ fprintf(stderr,"thread_mutex (TIMED) test failed : [%d] %s\n",
+ rv, apr_strerror(rv, (char*)errmsg, 200));
+ exit(-5);
+ }
+
if ((rv = test_thread_rwlock(i)) != APR_SUCCESS) {
fprintf(stderr,"thread_rwlock test failed : [%d] %s\n",
rv, apr_strerror(rv, (char*)errmsg, 200));
diff --git a/test/testmutexscope.c b/test/testmutexscope.c
index 57fcaf5aa..808135773 100644
--- a/test/testmutexscope.c
+++ b/test/testmutexscope.c
@@ -43,20 +43,22 @@ static apr_pool_t *p;
static volatile int counter;
typedef enum {TEST_GLOBAL, TEST_PROC} test_mode_e;
-static void lock_init(apr_lockmech_e mech, test_mode_e test_mode)
+static int lock_init(apr_lockmech_e mech, test_mode_e test_mode)
{
+ apr_status_t rv;
if (test_mode == TEST_PROC) {
- assert(apr_proc_mutex_create(&proc_mutex,
- NULL,
- mech,
- p) == APR_SUCCESS);
+ rv = apr_proc_mutex_create(&proc_mutex,
+ NULL,
+ mech,
+ p);
}
else {
- assert(apr_global_mutex_create(&global_mutex,
- NULL,
- mech,
- p) == APR_SUCCESS);
+ rv = apr_global_mutex_create(&global_mutex,
+ NULL,
+ mech,
+ p);
}
+ return rv;
}
static void lock_destroy(test_mode_e test_mode)
@@ -120,7 +122,17 @@ static void test_mech_mode(apr_lockmech_e mech, const char *mech_name,
assert(apr_thread_mutex_create(&thread_mutex, 0, p) == APR_SUCCESS);
assert(apr_thread_mutex_lock(thread_mutex) == APR_SUCCESS);
- lock_init(mech, test_mode);
+ rv = lock_init(mech, test_mode);
+ if (rv != APR_SUCCESS) {
+ char errmsg[256];
+ printf("%s mutexes with mechanism `%s': %s\n",
+ test_mode == TEST_GLOBAL ? "Global" : "Proc", mech_name,
+ apr_strerror(rv, errmsg, sizeof errmsg));
+ if (rv != APR_ENOTIMPL || mech == APR_LOCK_DEFAULT) {
+ exit(1);
+ }
+ return;
+ }
counter = 0;
@@ -142,7 +154,7 @@ static void test_mech_mode(apr_lockmech_e mech, const char *mech_name,
apr_sleep(apr_time_from_sec(5));
if (test_mode == TEST_PROC) {
- printf(" Mutex mechanism `%s' is %sglobal in scope on this platform.\n",
+ printf(" mutex mechanism `%s' is %sglobal in scope on this platform.\n",
mech_name, counter == 1 ? "" : "not ");
}
else {
@@ -155,7 +167,7 @@ static void test_mech_mode(apr_lockmech_e mech, const char *mech_name,
exit(1);
}
else {
- printf(" no problems encountered...\n");
+ printf(" no problem encountered...\n");
}
}
@@ -205,6 +217,7 @@ int main(void)
#if APR_HAS_PROC_PTHREAD_SERIALIZE
,{APR_LOCK_PROC_PTHREAD, "proc_pthread"}
#endif
+ ,{APR_LOCK_DEFAULT_TIMED, "default_timed"}
};
int i;
diff --git a/test/testprocmutex.c b/test/testprocmutex.c
index 78b2efc4c..f2992de9e 100644
--- a/test/testprocmutex.c
+++ b/test/testprocmutex.c
@@ -35,6 +35,11 @@
static apr_proc_mutex_t *proc_lock;
static volatile int *x;
+typedef struct lockmech {
+ apr_lockmech_e num;
+ const char *name;
+} lockmech_t;
+
/* a slower more racy way to implement (*x)++ */
static int increment(int n)
{
@@ -68,7 +73,7 @@ static void make_child(abts_case *tc, int trylock, apr_proc_t **proc, apr_pool_t
exit(1);
do {
- if (trylock) {
+ if (trylock > 0) {
int wait_usec = 0;
while ((rv = apr_proc_mutex_trylock(proc_lock))) {
@@ -79,6 +84,16 @@ static void make_child(abts_case *tc, int trylock, apr_proc_t **proc, apr_pool_t
apr_sleep(1);
}
}
+ else if (trylock < 0) {
+ int wait_usec = 0;
+
+ while ((rv = apr_proc_mutex_timedlock(proc_lock, 1, 0))) {
+ if (!APR_STATUS_IS_TIMEUP(rv))
+ exit(1);
+ if (++wait_usec >= MAX_WAIT_USEC)
+ exit(1);
+ }
+ }
else {
if (apr_proc_mutex_lock(proc_lock))
exit(1);
@@ -108,16 +123,21 @@ static void await_child(abts_case *tc, apr_proc_t *proc)
}
static void test_exclusive(abts_case *tc, const char *lockname,
- apr_lockmech_e mech)
+ lockmech_t *mech)
{
apr_proc_t *child[CHILDREN];
apr_status_t rv;
int n;
- rv = apr_proc_mutex_create(&proc_lock, lockname, mech, p);
+ rv = apr_proc_mutex_create(&proc_lock, lockname, mech->num, p);
APR_ASSERT_SUCCESS(tc, "create the mutex", rv);
- if (rv != APR_SUCCESS)
+ if (rv != APR_SUCCESS) {
+ fprintf(stderr, "%s not implemented, ", mech->name);
+ ABTS_ASSERT(tc, "Default timed not implemented",
+ mech->num != APR_LOCK_DEFAULT &&
+ mech->num != APR_LOCK_DEFAULT_TIMED);
return;
+ }
for (n = 0; n < CHILDREN; n++)
make_child(tc, 0, &child[n], p);
@@ -129,24 +149,52 @@ static void test_exclusive(abts_case *tc, const char *lockname,
rv = apr_proc_mutex_trylock(proc_lock);
if (rv == APR_ENOTIMPL) {
- ABTS_NOT_IMPL(tc, "apr_proc_mutex_trylock not implemented");
- return;
+ fprintf(stderr, "%s_trylock() not implemented, ", mech->name);
+ ABTS_ASSERT(tc, "Default timed trylock not implemented",
+ mech->num != APR_LOCK_DEFAULT &&
+ mech->num != APR_LOCK_DEFAULT_TIMED);
}
- APR_ASSERT_SUCCESS(tc, "check for trylock", rv);
+ else {
+ APR_ASSERT_SUCCESS(tc, "check for trylock", rv);
- rv = apr_proc_mutex_unlock(proc_lock);
- APR_ASSERT_SUCCESS(tc, "unlock after trylock check", rv);
+ rv = apr_proc_mutex_unlock(proc_lock);
+ APR_ASSERT_SUCCESS(tc, "unlock after trylock check", rv);
- *x = 0;
+ *x = 0;
- for (n = 0; n < CHILDREN; n++)
- make_child(tc, 1, &child[n], p);
+ for (n = 0; n < CHILDREN; n++)
+ make_child(tc, 1, &child[n], p);
- for (n = 0; n < CHILDREN; n++)
- await_child(tc, child[n]);
-
- ABTS_ASSERT(tc, "Locks don't appear to work with trylock",
- *x == MAX_COUNTER);
+ for (n = 0; n < CHILDREN; n++)
+ await_child(tc, child[n]);
+
+ ABTS_ASSERT(tc, "Locks don't appear to work with trylock",
+ *x == MAX_COUNTER);
+ }
+
+ rv = apr_proc_mutex_timedlock(proc_lock, 1, 0);
+ if (rv == APR_ENOTIMPL) {
+ fprintf(stderr, "%s_timedlock() not implemented, ", mech->name);
+ ABTS_ASSERT(tc, "Default timed timedlock not implemented",
+ mech->num != APR_LOCK_DEFAULT_TIMED);
+ }
+ else {
+ APR_ASSERT_SUCCESS(tc, "check for timedlock", rv);
+
+ rv = apr_proc_mutex_unlock(proc_lock);
+ APR_ASSERT_SUCCESS(tc, "unlock after timedlock check", rv);
+
+ *x = 0;
+
+ for (n = 0; n < CHILDREN; n++)
+ make_child(tc, -1, &child[n], p);
+
+ for (n = 0; n < CHILDREN; n++)
+ await_child(tc, child[n]);
+
+ ABTS_ASSERT(tc, "Locks don't appear to work with timedlock",
+ *x == MAX_COUNTER);
+ }
}
#endif
@@ -156,7 +204,6 @@ static void proc_mutex(abts_case *tc, void *data)
apr_status_t rv;
const char *shmname = "tpm.shm";
apr_shm_t *shm;
- apr_lockmech_e *mech = data;
/* Use anonymous shm if available. */
rv = apr_shm_create(&shm, sizeof(int), NULL, p);
@@ -170,7 +217,7 @@ static void proc_mutex(abts_case *tc, void *data)
return;
x = apr_shm_baseaddr_get(shm);
- test_exclusive(tc, NULL, *mech);
+ test_exclusive(tc, NULL, data);
rv = apr_shm_destroy(shm);
APR_ASSERT_SUCCESS(tc, "Error destroying shared memory block", rv);
#else
@@ -181,30 +228,30 @@ static void proc_mutex(abts_case *tc, void *data)
abts_suite *testprocmutex(abts_suite *suite)
{
- apr_lockmech_e mech = APR_LOCK_DEFAULT;
-
- suite = ADD_SUITE(suite)
- abts_run_test(suite, proc_mutex, &mech);
-#if APR_HAS_POSIXSEM_SERIALIZE
- mech = APR_LOCK_POSIXSEM;
- abts_run_test(suite, proc_mutex, &mech);
+ lockmech_t lockmechs[] = {
+ {APR_LOCK_DEFAULT, "default"}
+#if APR_HAS_FLOCK_SERIALIZE
+ ,{APR_LOCK_FLOCK, "flock"}
#endif
#if APR_HAS_SYSVSEM_SERIALIZE
- mech = APR_LOCK_SYSVSEM;
- abts_run_test(suite, proc_mutex, &mech);
+ ,{APR_LOCK_SYSVSEM, "sysvsem"}
#endif
-#if APR_HAS_PROC_PTHREAD_SERIALIZE
- mech = APR_LOCK_PROC_PTHREAD;
- abts_run_test(suite, proc_mutex, &mech);
+#if APR_HAS_POSIXSEM_SERIALIZE
+ ,{APR_LOCK_POSIXSEM, "posix"}
#endif
#if APR_HAS_FCNTL_SERIALIZE
- mech = APR_LOCK_FCNTL;
- abts_run_test(suite, proc_mutex, &mech);
+ ,{APR_LOCK_FCNTL, "fcntl"}
#endif
-#if APR_HAS_FLOCK_SERIALIZE
- mech = APR_LOCK_FLOCK;
- abts_run_test(suite, proc_mutex, &mech);
+#if APR_HAS_PROC_PTHREAD_SERIALIZE
+ ,{APR_LOCK_PROC_PTHREAD, "proc_pthread"}
#endif
+ ,{APR_LOCK_DEFAULT_TIMED, "default_timed"}
+ };
+ int i;
+ suite = ADD_SUITE(suite)
+ for (i = 0; i < sizeof(lockmechs) / sizeof(lockmechs[0]); i++) {
+ abts_run_test(suite, proc_mutex, &lockmechs[i]);
+ }
return suite;
}