summaryrefslogtreecommitdiff
path: root/atomic/unix/apr_atomic.c
blob: bba9b033d1341b2a2c294b6b95aa63d2e7ee946c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
#include "apr.h"
#include "apr_lock.h"
#include "apr_thread_mutex.h"
#include "apr_atomic.h"

#if defined(APR_ATOMIC_NEED_DEFAULT) 

#if APR_HAS_THREADS

#define NUM_ATOMIC_HASH 7
/* shift by 2 to get rid of alignment issues */
#define ATOMIC_HASH(x) (int)(((long)x>>2)%NUM_ATOMIC_HASH)
static apr_thread_mutex_t **hash_mutex;

apr_status_t apr_atomic_init(apr_pool_t *p )
{
    int i;
    apr_status_t rv;
    hash_mutex =apr_palloc(p,sizeof(apr_thread_mutex_t*) * NUM_ATOMIC_HASH);
    for (i=0;i<NUM_ATOMIC_HASH;i++) {
        rv = apr_thread_mutex_create(&(hash_mutex[i]), APR_THREAD_MUTEX_DEFAULT, p);
        if (rv != APR_SUCCESS)
           return rv;
    }
    return APR_SUCCESS;
}
apr_uint32_t apr_atomic_add(volatile apr_atomic_t *mem, long val) 
{
    apr_thread_mutex_t *lock = hash_mutex[ATOMIC_HASH(mem)];
    long prev;
       
    if (apr_thread_mutex_lock(lock) == APR_SUCCESS) {
        prev = *mem;
        *mem += val;
        apr_thread_mutex_unlock(lock);
        return prev;
    }
    return *mem;
}
apr_uint32_t apr_atomic_set(volatile apr_atomic_t *mem, long val) 
{
    apr_thread_mutex_t *lock = hash_mutex[ATOMIC_HASH(mem)];
    long prev;

    if (apr_thread_mutex_lock(lock) == APR_SUCCESS) {
        prev = *mem;
        *mem = val;
        apr_thread_mutex_unlock(lock);
        return prev;
    }
    return *mem;
}

apr_uint32_t apr_atomic_inc( volatile apr_uint32_t *mem) 
{
    apr_thread_mutex_t *lock = hash_mutex[ATOMIC_HASH(mem)];
    long prev;

    if (apr_thread_mutex_lock(lock) == APR_SUCCESS) {
        prev = *mem;
        (*mem)++;
        apr_thread_mutex_unlock(lock);
        return prev;
    }
    return *mem;
}
apr_uint32_t apr_atomic_dec(volatile apr_atomic_t *mem) 
{
    apr_thread_mutex_t *lock = hash_mutex[ATOMIC_HASH(mem)];
    long prev;

    if (apr_thread_mutex_lock(lock) == APR_SUCCESS) {
        prev = *mem;
        (*mem)--;
        apr_thread_mutex_unlock(lock);
        return prev;
    }
    return *mem;
}
#if 0
/*
 * linux doesn't have a easy to do this 
 * so comment it out for the moment
 */
apr_uint32_t apr_atomic_cas(volatile apr_atomic_t *mem,apr_uint32_t with, apr_uint32_t cmp)
{
    apr_thread_mutex_t *lock = hash_mutex[ATOMIC_HASH(mem)];
    long prev;

    if (apr_thread_mutex_lock(lock) == APR_SUCCESS) {
        prev = *mem;
        if ( *mem == cmp) {
            *mem = with;
        }
        apr_thread_mutex_unlock(lock);
        return prev;
    }
    return *mem;
}
#endif
#endif /* APR_HAS_THREADS */

#endif /* APR_ATOMIC_NEED_DEFAULT */