1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/arm/guest_atomics.c
*/
#include <xen/cpu.h>
#include <asm/guest_atomics.h>
DEFINE_PER_CPU_READ_MOSTLY(unsigned int, guest_safe_atomic_max);
/*
* Heuristic to find a safe upper-limit for load-store exclusive
* operations on memory shared with guest.
*
* At the moment, we calculate the number of iterations of a simple
* load-store atomic loop in 1uS.
*/
static void calibrate_safe_atomic(void)
{
s_time_t deadline = NOW() + MICROSECS(1);
unsigned int counter = 0;
unsigned long mem = 0;
do
{
unsigned long res, tmp;
#ifdef CONFIG_ARM_32
asm volatile (" ldrex %2, %1\n"
" add %2, %2, #1\n"
" strex %0, %2, %1\n"
: "=&r" (res), "+Q" (mem), "=&r" (tmp));
#else
asm volatile (" ldxr %w2, %1\n"
" add %w2, %w2, #1\n"
" stxr %w0, %w2, %1\n"
: "=&r" (res), "+Q" (mem), "=&r" (tmp));
#endif
counter++;
} while (NOW() < deadline);
this_cpu(guest_safe_atomic_max) = counter;
printk(XENLOG_DEBUG
"CPU%u: Guest atomics will try %u times before pausing the domain\n",
smp_processor_id(), counter);
}
static int cpu_guest_safe_atomic_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
if ( action == CPU_STARTING )
calibrate_safe_atomic();
return NOTIFY_DONE;
}
static struct notifier_block cpu_guest_safe_atomic_nfb = {
.notifier_call = cpu_guest_safe_atomic_callback,
};
static int __init guest_safe_atomic_init(void)
{
register_cpu_notifier(&cpu_guest_safe_atomic_nfb);
calibrate_safe_atomic();
return 0;
}
presmp_initcall(guest_safe_atomic_init);
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* indent-tabs-mode: nil
* End:
*/
|