summaryrefslogtreecommitdiff
path: root/locks/beos
diff options
context:
space:
mode:
authordreid <dreid@13f79535-47bb-0310-9956-ffa450edef68>2001-10-28 13:05:51 +0000
committerdreid <dreid@13f79535-47bb-0310-9956-ffa450edef68>2001-10-28 13:05:51 +0000
commita5ff28cc2dac9708cae42067e62fcbfa0559d688 (patch)
tree019655117484bb20536da7c37a0564460f406009 /locks/beos
parent54d731405bb81db15a028a7a6e018c96c0e4b2ca (diff)
downloadlibapr-a5ff28cc2dac9708cae42067e62fcbfa0559d688.tar.gz
Add the actual code for most of the new lock methods. Just the conditional
implementation to add at some point. git-svn-id: http://svn.apache.org/repos/asf/apr/apr/trunk@62470 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'locks/beos')
-rw-r--r--locks/beos/proc_mutex.c71
-rw-r--r--locks/beos/thread_mutex.c112
-rw-r--r--locks/beos/thread_rwlock.c138
3 files changed, 300 insertions, 21 deletions
diff --git a/locks/beos/proc_mutex.c b/locks/beos/proc_mutex.c
index c5209831f..b9a1d3170 100644
--- a/locks/beos/proc_mutex.c
+++ b/locks/beos/proc_mutex.c
@@ -60,13 +60,51 @@
#include "apr_strings.h"
#include "apr_portable.h"
+static apr_status_t _proc_mutex_cleanup(void * data)
+{
+ apr_proc_mutex_t *lock = (apr_proc_mutex_t*)data;
+ if (lock->LockCount != 0) {
+ /* we're still locked... */
+ while (atomic_add(&lock->LockCount , -1) > 1){
+ /* OK we had more than one person waiting on the lock so
+ * the sem is also locked. Release it until we have no more
+ * locks left.
+ */
+ release_sem (lock->Lock);
+ }
+ }
+ delete_sem(lock->Lock);
+ return APR_SUCCESS;
+}
+
APR_DECLARE(apr_status_t) apr_proc_mutex_create(apr_proc_mutex_t **mutex,
const char *fname,
apr_pool_t *pool)
{
- return APR_ENOTIMPL;
+ apr_proc_mutex_t *new;
+ apr_status_t stat = APR_SUCCESS;
+
+ new = (apr_proc_mutex_t *)apr_pcalloc(pool, sizeof(apr_proc_mutex_t));
+ if (new == NULL){
+ return APR_ENOMEM;
+ }
+
+ if ((stat = create_sem(0, "APR_Lock")) < B_NO_ERROR) {
+ _proc_mutex_cleanup(new);
+ return stat;
+ }
+ new->LockCount = 0;
+ new->Lock = stat;
+ new->pool = pool;
+
+ apr_pool_cleanup_register(new->pool, (void *)new, _proc_mutex_cleanup,
+ apr_pool_cleanup_null);
+
+ (*mutex) = new;
+ return APR_SUCCESS;
}
+#if APR_HAS_CREATE_LOCKS_NP
APR_DECLARE(apr_status_t) apr_proc_mutex_create_np(apr_proc_mutex_t **mutex,
const char *fname,
apr_lockmech_e_np mech,
@@ -74,6 +112,7 @@ APR_DECLARE(apr_status_t) apr_proc_mutex_create_np(apr_proc_mutex_t **mutex,
{
return APR_ENOTIMPL;
}
+#endif
APR_DECLARE(apr_status_t) apr_proc_mutex_child_init(apr_proc_mutex_t **mutex,
const char *fname,
@@ -84,7 +123,15 @@ APR_DECLARE(apr_status_t) apr_proc_mutex_child_init(apr_proc_mutex_t **mutex,
APR_DECLARE(apr_status_t) apr_proc_mutex_lock(apr_proc_mutex_t *mutex)
{
- return APR_ENOTIMPL;
+ int32 stat;
+
+ if (atomic_add(&mutex->LockCount, 1) > 0) {
+ if ((stat = acquire_sem(mutex->Lock)) < B_NO_ERROR) {
+ atomic_add(&mutex->LockCount, -1);
+ return stat;
+ }
+ }
+ return APR_SUCCESS;
}
APR_DECLARE(apr_status_t) apr_proc_mutex_trylock(apr_proc_mutex_t *mutex)
@@ -94,12 +141,25 @@ APR_DECLARE(apr_status_t) apr_proc_mutex_trylock(apr_proc_mutex_t *mutex)
APR_DECLARE(apr_status_t) apr_proc_mutex_unlock(apr_proc_mutex_t *mutex)
{
- return APR_ENOTIMPL;
+ int32 stat;
+
+ if (atomic_add(&mutex->LockCount, -1) > 1) {
+ if ((stat = release_sem(mutex->Lock)) < B_NO_ERROR) {
+ atomic_add(&mutex->LockCount, 1);
+ return stat;
+ }
+ }
+ return APR_SUCCESS;
}
APR_DECLARE(apr_status_t) apr_proc_mutex_destroy(apr_proc_mutex_t *mutex)
{
- return APR_ENOTIMPL;
+ apr_status_t stat;
+ if ((stat = _proc_mutex_cleanup(mutex)) == APR_SUCCESS) {
+ apr_pool_cleanup_kill(mutex->pool, mutex, _proc_mutex_cleanup);
+ return APR_SUCCESS;
+ }
+ return stat;
}
APR_POOL_IMPLEMENT_ACCESSOR(proc_mutex)
@@ -122,8 +182,7 @@ APR_DECLARE(apr_status_t) apr_os_proc_mutex_put(apr_proc_mutex_t **pmutex,
return APR_ENOPOOL;
}
if ((*pmutex) == NULL) {
- (*pmutex) = (apr_proc_mutex_t *)apr_pcalloc(pool,
- sizeof(apr_proc_mutex_t));
+ (*pmutex) = (apr_proc_mutex_t *)apr_pcalloc(pool, sizeof(apr_proc_mutex_t));
(*pmutex)->pool = pool;
}
(*pmutex)->Lock = ospmutex->sem;
diff --git a/locks/beos/thread_mutex.c b/locks/beos/thread_mutex.c
index 054464f05..03d669885 100644
--- a/locks/beos/thread_mutex.c
+++ b/locks/beos/thread_mutex.c
@@ -60,21 +60,91 @@
#include "apr_strings.h"
#include "apr_portable.h"
-static apr_status_t thread_mutex_cleanup(void *data)
+static apr_status_t _thread_mutex_cleanup(void * data)
{
- return APR_ENOTIMPL;
-}
+ apr_thread_mutex_t *lock = (apr_thread_mutex_t*)data;
+ if (lock->LockCount != 0) {
+ /* we're still locked... */
+ while (atomic_add(&lock->LockCount , -1) > 1){
+ /* OK we had more than one person waiting on the lock so
+ * the sem is also locked. Release it until we have no more
+ * locks left.
+ */
+ release_sem (lock->Lock);
+ }
+ }
+ delete_sem(lock->Lock);
+ return APR_SUCCESS;
+}
APR_DECLARE(apr_status_t) apr_thread_mutex_create(apr_thread_mutex_t **mutex,
unsigned int flags,
apr_pool_t *pool)
{
- return APR_ENOTIMPL;
+ apr_thread_mutex_t *new_m;
+ apr_status_t stat = APR_SUCCESS;
+
+ new_m = (apr_thread_mutex_t *)apr_pcalloc(pool, sizeof(apr_thread_mutex_t));
+ if (new_m == NULL){
+ return APR_ENOMEM;
+ }
+
+ if ((stat = create_sem(0, "APR_Lock")) < B_NO_ERROR) {
+ _thread_mutex_cleanup(new_m);
+ return stat;
+ }
+ new_m->LockCount = 0;
+ new_m->Lock = stat;
+ new_m->pool = pool;
+ new_m->nested = flags & APR_THREAD_MUTEX_NESTED;
+
+ apr_pool_cleanup_register(new_m->pool, (void *)new_m, _thread_mutex_cleanup,
+ apr_pool_cleanup_null);
+
+ (*mutex) = new_m;
+ return APR_SUCCESS;
}
-APR_DECLARE(apr_status_t) apr_thread_mutex_lock(apr_thread_mutex_t *mutex)
+#if APR_HAS_CREATE_LOCKS_NP
+APR_DECLARE(apr_status_t) apr_thread_mutex_create_np(apr_thread_mutex_t **mutex,
+ const char *fname,
+ apr_lockmech_e_np mech,
+ apr_pool_t *pool)
{
return APR_ENOTIMPL;
+}
+#endif
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_child_init(apr_thread_mutex_t **mutex,
+ const char *fname,
+ apr_pool_t *pool)
+{
+ return APR_SUCCESS;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_lock(apr_thread_mutex_t *mutex)
+{
+ int32 stat;
+
+ if (mutex->nested && mutex->owner == find_thread(NULL)) {
+ mutex->owner_ref++;
+ return APR_SUCCESS;
+ }
+
+ if (atomic_add(&mutex->LockCount, 1) > 0) {
+ if ((stat = acquire_sem(mutex->Lock)) < B_NO_ERROR) {
+ /* Oh dear, acquire_sem failed!! */
+ atomic_add(&mutex->LockCount, -1);
+ return stat;
+ }
+ }
+
+ if (mutex->nested) {
+ mutex->owner = find_thread(NULL);
+ mutex->owner_ref = 1;
+ }
+
+ return APR_SUCCESS;
}
APR_DECLARE(apr_status_t) apr_thread_mutex_trylock(apr_thread_mutex_t *mutex)
@@ -84,13 +154,35 @@ APR_DECLARE(apr_status_t) apr_thread_mutex_trylock(apr_thread_mutex_t *mutex)
APR_DECLARE(apr_status_t) apr_thread_mutex_unlock(apr_thread_mutex_t *mutex)
{
- return APR_ENOTIMPL;
+ int32 stat;
+
+ if (mutex->nested && mutex->owner == find_thread(NULL)) {
+ mutex->owner_ref--;
+ if (mutex->owner_ref > 0)
+ return APR_SUCCESS;
+ }
+
+ if (atomic_add(&mutex->LockCount, -1) > 1) {
+ if ((stat = release_sem(mutex->Lock)) < B_NO_ERROR) {
+ atomic_add(&mutex->LockCount, 1);
+ return stat;
+ }
+ }
+
+ if (mutex->nested) {
+ mutex->owner = -1;
+ mutex->owner_ref = 0;
+ }
+
+ return APR_SUCCESS;
}
APR_DECLARE(apr_status_t) apr_thread_mutex_destroy(apr_thread_mutex_t *mutex)
{
- return APR_ENOTIMPL;
+ apr_status_t stat;
+ if ((stat = _thread_mutex_cleanup(mutex)) == APR_SUCCESS) {
+ apr_pool_cleanup_kill(mutex->pool, mutex, _thread_mutex_cleanup);
+ return APR_SUCCESS;
+ }
+ return stat;
}
-
-APR_POOL_IMPLEMENT_ACCESSOR(thread_mutex)
-
diff --git a/locks/beos/thread_rwlock.c b/locks/beos/thread_rwlock.c
index 59a93d0a7..fa5ac911e 100644
--- a/locks/beos/thread_rwlock.c
+++ b/locks/beos/thread_rwlock.c
@@ -60,15 +60,82 @@
#include "apr_strings.h"
#include "apr_portable.h"
+#define BIG_NUM 100000
+
+static apr_status_t _thread_rw_cleanup(void * data)
+{
+ apr_thread_rwlock_t *mutex = (apr_thread_rwlock_t*)data;
+
+ if (mutex->ReadCount != 0) {
+ while (atomic_add(&mutex->ReadCount , -1) > 1){
+ release_sem (mutex->Read);
+ }
+ }
+ if (mutex->WriteCount != 0) {
+ while (atomic_add(&mutex->WriteCount , -1) > 1){
+ release_sem (mutex->Write);
+ }
+ }
+ if (mutex->LockCount != 0) {
+ while (atomic_add(&mutex->LockCount , -1) > 1){
+ release_sem (mutex->Lock);
+ }
+ }
+
+ delete_sem(mutex->Read);
+ delete_sem(mutex->Write);
+ delete_sem(mutex->Lock);
+ return APR_SUCCESS;
+}
+
APR_DECLARE(apr_status_t) apr_thread_rwlock_create(apr_thread_rwlock_t **rwlock,
apr_pool_t *pool)
{
- return APR_ENOTIMPL;
+ apr_thread_rwlock_t *new;
+
+ new = (apr_thread_rwlock_t *)apr_pcalloc(pool, sizeof(apr_thread_rwlock_t));
+ if (new == NULL){
+ return APR_ENOMEM;
+ }
+
+ new->pool = pool;
+ /* we need to make 3 locks... */
+ new->ReadCount = 0;
+ new->WriteCount = 0;
+ new->LockCount = 0;
+ new->Read = create_sem(0, "APR_ReadLock");
+ new->Write = create_sem(0, "APR_WriteLock");
+ new->Lock = create_sem(0, "APR_Lock");
+
+ if (new->Lock < 0 || new->Read < 0 || new->Write < 0) {
+ _thread_rw_cleanup(new);
+ return -1;
+ }
+
+ apr_pool_cleanup_register(new->pool, (void *)new, _thread_rw_cleanup,
+ apr_pool_cleanup_null);
+ (*rwlock) = new;
+ return APR_SUCCESS;
}
APR_DECLARE(apr_status_t) apr_thread_rwlock_rdlock(apr_thread_rwlock_t *rwlock)
{
- return APR_ENOTIMPL;
+ int32 rv = APR_SUCCESS;
+
+ if (find_thread(NULL) == rwlock->writer) {
+ /* we're the writer - no problem */
+ rwlock->Nested++;
+ } else {
+ /* we're not the writer */
+ int32 r = atomic_add(&rwlock->ReadCount, 1);
+ if (r < 0) {
+ /* Oh dear, writer holds lock, wait for sem */
+ rv = acquire_sem_etc(rwlock->Read, 1, B_DO_NOT_RESCHEDULE,
+ B_INFINITE_TIMEOUT);
+ }
+ }
+
+ return rv;
}
APR_DECLARE(apr_status_t) apr_thread_rwlock_tryrdlock(apr_thread_rwlock_t *rwlock)
@@ -78,7 +145,33 @@ APR_DECLARE(apr_status_t) apr_thread_rwlock_tryrdlock(apr_thread_rwlock_t *rwloc
APR_DECLARE(apr_status_t) apr_thread_rwlock_wrlock(apr_thread_rwlock_t *rwlock)
{
- return APR_ENOTIMPL;
+ int rv = APR_SUCCESS;
+
+ if (find_thread(NULL) == rwlock->writer) {
+ rwlock->Nested++;
+ } else {
+ /* we're not the writer... */
+ if (atomic_add(&rwlock->LockCount, 1) >= 1) {
+ /* we're locked - acquire the sem */
+ rv = acquire_sem_etc(rwlock->Lock, 1, B_DO_NOT_RESCHEDULE,
+ B_INFINITE_TIMEOUT);
+ }
+ if (rv == APR_SUCCESS) {
+ /* decrement the ReadCount to a large -ve number so that
+ * we block on new readers...
+ */
+ int32 readers = atomic_add(&rwlock->ReadCount, -BIG_NUM);
+ if (readers > 0) {
+ /* readers are holding the lock */
+ rv = acquire_sem_etc(rwlock->Write, readers, B_DO_NOT_RESCHEDULE,
+ B_INFINITE_TIMEOUT);
+ }
+ if (rv == APR_SUCCESS)
+ rwlock->writer = find_thread(NULL);
+ }
+ }
+
+ return rv;
}
APR_DECLARE(apr_status_t) apr_thread_rwlock_trywrlock(apr_thread_rwlock_t *rwlock)
@@ -88,12 +181,47 @@ APR_DECLARE(apr_status_t) apr_thread_rwlock_trywrlock(apr_thread_rwlock_t *rwloc
APR_DECLARE(apr_status_t) apr_thread_rwlock_unlock(apr_thread_rwlock_t *rwlock)
{
- return APR_ENOTIMPL;
+ apr_status_t rv = APR_SUCCESS;
+ int32 readers;
+
+ /* we know we hold the lock, so don't check it :) */
+ if (find_thread(NULL) == rwlock->writer) {
+ /* we know we hold the lock, so don't check it :) */
+ if (rwlock->Nested > 1) {
+ /* we're recursively locked */
+ rwlock->Nested--;
+ return APR_SUCCESS;
+ }
+ /* OK so we need to release the sem if we have it :) */
+ readers = atomic_add(&rwlock->ReadCount, BIG_NUM) + BIG_NUM;
+ if (readers > 0) {
+ rv = release_sem_etc(rwlock->Read, readers, B_DO_NOT_RESCHEDULE);
+ }
+ if (rv == APR_SUCCESS) {
+ rwlock->writer = -1;
+ if (atomic_add(&rwlock->LockCount, -1) > 1) {
+ rv = release_sem_etc(rwlock->Lock, 1, B_DO_NOT_RESCHEDULE);
+ }
+ }
+ } else {
+ /* We weren't the Writer, so just release the ReadCount... */
+ if (atomic_add(&rwlock->ReadCount, -1) < 0) {
+ /* we have a writer waiting for the lock, so release it */
+ rv = release_sem_etc(rwlock->Write, 1, B_DO_NOT_RESCHEDULE);
+ }
+ }
+
+ return rv;
}
APR_DECLARE(apr_status_t) apr_thread_rwlock_destroy(apr_thread_rwlock_t *rwlock)
{
- return APR_ENOTIMPL;
+ apr_status_t stat;
+ if ((stat = _thread_rw_cleanup(rwlock)) == APR_SUCCESS) {
+ apr_pool_cleanup_kill(rwlock->pool, rwlock, _thread_rw_cleanup);
+ return APR_SUCCESS;
+ }
+ return stat;
}
APR_POOL_IMPLEMENT_ACCESSOR(thread_rwlock)