1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
|
#ifndef _ARM_GUEST_ATOMICS_H
#define _ARM_GUEST_ATOMICS_H
#include <xen/bitops.h>
#include <xen/sched.h>
/*
* The guest atomics helpers shares the same logic. We first try to use
* the *_timeout version of the operation. If it didn't timeout, then we
* successfully updated the memory. Nothing else to do.
*
* If it did timeout, then it means we didn't manage to update the
* memory. This is possibly because the guest is misbehaving (i.e tight
* store loop) but can also happen for other reasons (i.e nested Xen).
* In that case pause the domain and retry the operation, this time
* without a timeout.
*
* Note, those helpers rely on other part of the code to prevent sharing
* a page between Xen and multiple domain.
*/
DECLARE_PER_CPU(unsigned int, guest_safe_atomic_max);
#define guest_bitop(name) \
static inline void guest_##name(struct domain *d, int nr, volatile void *p) \
{ \
perfc_incr(atomics_guest); \
\
if ( name##_timeout(nr, p, this_cpu(guest_safe_atomic_max)) ) \
return; \
\
perfc_incr(atomics_guest_paused); \
\
domain_pause_nosync(d); \
name(nr, p); \
domain_unpause(d); \
}
#define guest_testop(name) \
static inline int guest_##name(struct domain *d, int nr, volatile void *p) \
{ \
bool succeed; \
int oldbit; \
\
perfc_incr(atomics_guest); \
\
succeed = name##_timeout(nr, p, &oldbit, \
this_cpu(guest_safe_atomic_max)); \
if ( succeed ) \
return oldbit; \
\
perfc_incr(atomics_guest_paused); \
\
domain_pause_nosync(d); \
oldbit = name(nr, p); \
domain_unpause(d); \
\
return oldbit; \
}
guest_bitop(set_bit)
guest_bitop(clear_bit)
guest_bitop(change_bit)
#undef guest_bitop
/* test_bit does not use load-store atomic operations */
#define guest_test_bit(d, nr, p) ((void)(d), test_bit(nr, p))
guest_testop(test_and_set_bit)
guest_testop(test_and_clear_bit)
guest_testop(test_and_change_bit)
#undef guest_testop
static inline void guest_clear_mask16(struct domain *d, uint16_t mask,
volatile uint16_t *p)
{
perfc_incr(atomics_guest);
if ( clear_mask16_timeout(mask, p, this_cpu(guest_safe_atomic_max)) )
return;
domain_pause_nosync(d);
clear_mask16(mask, p);
domain_unpause(d);
}
static always_inline unsigned long __guest_cmpxchg(struct domain *d,
volatile void *ptr,
unsigned long old,
unsigned long new,
unsigned int size)
{
unsigned long oldval = old;
perfc_incr(atomics_guest);
if ( __cmpxchg_timeout(ptr, &oldval, new, size,
this_cpu(guest_safe_atomic_max)) )
return oldval;
perfc_incr(atomics_guest_paused);
domain_pause_nosync(d);
oldval = __cmpxchg(ptr, old, new, size);
domain_unpause(d);
return oldval;
}
#define guest_cmpxchg(d, ptr, o, n) \
((__typeof__(*(ptr)))__guest_cmpxchg(d, ptr, \
(unsigned long)(o),\
(unsigned long)(n),\
sizeof (*(ptr))))
static inline uint64_t guest_cmpxchg64(struct domain *d,
volatile uint64_t *ptr,
uint64_t old,
uint64_t new)
{
uint64_t oldval = old;
perfc_incr(atomics_guest);
if ( __cmpxchg64_timeout(ptr, &oldval, new,
this_cpu(guest_safe_atomic_max)) )
return oldval;
perfc_incr(atomics_guest_paused);
domain_pause_nosync(d);
oldval = cmpxchg64(ptr, old, new);
domain_unpause(d);
return oldval;
}
#endif /* _ARM_GUEST_ATOMICS_H */
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* indent-tabs-mode: nil
* End:
*/
|