1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
|
#include <xen/rwlock.h>
#include <xen/irq.h>
/*
* rspin_until_writer_unlock - spin until writer is gone.
* @lock : Pointer to queue rwlock structure.
* @cnts: Current queue rwlock writer status byte.
*
* In interrupt context or at the head of the queue, the reader will just
* increment the reader count & wait until the writer releases the lock.
*/
static inline void rspin_until_writer_unlock(rwlock_t *lock, u32 cnts)
{
while ( (cnts & _QW_WMASK) == _QW_LOCKED )
{
cpu_relax();
smp_rmb();
cnts = atomic_read(&lock->cnts);
}
}
/*
* queue_read_lock_slowpath - acquire read lock of a queue rwlock.
* @lock: Pointer to queue rwlock structure.
*/
void queue_read_lock_slowpath(rwlock_t *lock)
{
u32 cnts;
/*
* Readers come here when they cannot get the lock without waiting.
*/
atomic_sub(_QR_BIAS, &lock->cnts);
/*
* Put the reader into the wait queue.
*/
spin_lock(&lock->lock);
/*
* At the head of the wait queue now, wait until the writer state
* goes to 0 and then try to increment the reader count and get
* the lock. It is possible that an incoming writer may steal the
* lock in the interim, so it is necessary to check the writer byte
* to make sure that the write lock isn't taken.
*/
while ( atomic_read(&lock->cnts) & _QW_WMASK )
cpu_relax();
cnts = atomic_add_return(_QR_BIAS, &lock->cnts);
rspin_until_writer_unlock(lock, cnts);
/*
* Signal the next one in queue to become queue head.
*/
spin_unlock(&lock->lock);
lock_enter(&lock->lock.debug);
}
/*
* queue_write_lock_slowpath - acquire write lock of a queue rwlock
* @lock : Pointer to queue rwlock structure.
*/
void queue_write_lock_slowpath(rwlock_t *lock)
{
u32 cnts;
/* Put the writer into the wait queue. */
spin_lock(&lock->lock);
/* Try to acquire the lock directly if no reader is present. */
if ( !atomic_read(&lock->cnts) &&
(atomic_cmpxchg(&lock->cnts, 0, _write_lock_val()) == 0) )
goto unlock;
/*
* Set the waiting flag to notify readers that a writer is pending,
* or wait for a previous writer to go away.
*/
for ( ; ; )
{
cnts = atomic_read(&lock->cnts);
if ( !(cnts & _QW_WMASK) &&
(atomic_cmpxchg(&lock->cnts, cnts,
cnts | _QW_WAITING) == cnts) )
break;
cpu_relax();
}
/* When no more readers, set the locked flag. */
for ( ; ; )
{
cnts = atomic_read(&lock->cnts);
if ( (cnts == _QW_WAITING) &&
(atomic_cmpxchg(&lock->cnts, _QW_WAITING,
_write_lock_val()) == _QW_WAITING) )
break;
cpu_relax();
}
unlock:
spin_unlock(&lock->lock);
lock_enter(&lock->lock.debug);
}
static DEFINE_PER_CPU(cpumask_t, percpu_rwlock_readers);
void _percpu_write_lock(percpu_rwlock_t **per_cpudata,
percpu_rwlock_t *percpu_rwlock)
{
unsigned int cpu;
cpumask_t *rwlock_readers = &this_cpu(percpu_rwlock_readers);
/* Validate the correct per_cpudata variable has been provided. */
_percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
/*
* First take the write lock to protect against other writers or slow
* path readers.
*/
write_lock(&percpu_rwlock->rwlock);
/* Now set the global variable so that readers start using read_lock. */
percpu_rwlock->writer_activating = 1;
smp_mb();
/* Using a per cpu cpumask is only safe if there is no nesting. */
ASSERT(!in_irq());
cpumask_copy(rwlock_readers, &cpu_online_map);
/* Check if there are any percpu readers in progress on this rwlock. */
for ( ; ; )
{
for_each_cpu(cpu, rwlock_readers)
{
/*
* Remove any percpu readers not contending on this rwlock
* from our check mask.
*/
if ( per_cpu_ptr(per_cpudata, cpu) != percpu_rwlock )
__cpumask_clear_cpu(cpu, rwlock_readers);
}
/* Check if we've cleared all percpu readers from check mask. */
if ( cpumask_empty(rwlock_readers) )
break;
/* Give the coherency fabric a break. */
cpu_relax();
};
lock_enter(&percpu_rwlock->rwlock.lock.debug);
}
|