1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
|
/*
* Helpers to emulate co-processor and system registers
*/
#ifndef __ASM_ARM_VREG__
#define __ASM_ARM_VREG__
typedef bool (*vreg_reg64_fn_t)(struct cpu_user_regs *regs, uint64_t *r,
bool read);
typedef bool (*vreg_reg_fn_t)(struct cpu_user_regs *regs, register_t *r,
bool read);
static inline bool vreg_emulate_cp32(struct cpu_user_regs *regs, union hsr hsr,
vreg_reg_fn_t fn)
{
struct hsr_cp32 cp32 = hsr.cp32;
/*
* Initialize to zero to avoid leaking data if there is an
* implementation error in the emulation (such as not correctly
* setting r).
*/
register_t r = 0;
bool ret;
if ( !cp32.read )
r = get_user_reg(regs, cp32.reg);
ret = fn(regs, &r, cp32.read);
if ( ret && cp32.read )
set_user_reg(regs, cp32.reg, r);
return ret;
}
static inline bool vreg_emulate_cp64(struct cpu_user_regs *regs, union hsr hsr,
vreg_reg64_fn_t fn)
{
struct hsr_cp64 cp64 = hsr.cp64;
/*
* Initialize to zero to avoid leaking data if there is an
* implementation error in the emulation (such as not correctly
* setting x).
*/
uint64_t x = 0;
bool ret;
if ( !cp64.read )
{
uint32_t r1 = get_user_reg(regs, cp64.reg1);
uint32_t r2 = get_user_reg(regs, cp64.reg2);
x = (uint64_t)r1 | ((uint64_t)r2 << 32);
}
ret = fn(regs, &x, cp64.read);
if ( ret && cp64.read )
{
set_user_reg(regs, cp64.reg1, x & 0xffffffff);
set_user_reg(regs, cp64.reg2, x >> 32);
}
return ret;
}
#ifdef CONFIG_ARM_64
static inline bool vreg_emulate_sysreg(struct cpu_user_regs *regs, union hsr hsr,
vreg_reg_fn_t fn)
{
struct hsr_sysreg sysreg = hsr.sysreg;
register_t r = 0;
bool ret;
if ( !sysreg.read )
r = get_user_reg(regs, sysreg.reg);
ret = fn(regs, &r, sysreg.read);
if ( ret && sysreg.read )
set_user_reg(regs, sysreg.reg, r);
return ret;
}
#endif
#define VREG_REG_MASK(size) ((~0UL) >> (BITS_PER_LONG - ((1 << (size)) * 8)))
/*
* The check on the size supported by the register has to be done by
* the caller of vreg_regN_*.
*
* Note that the alignment fault will always be taken in the guest
* (see B3.12.7 DDI0406.b).
*/
/* N-bit register helpers */
#define VREG_REG_HELPERS(sz, offmask) \
static inline register_t vreg_reg##sz##_extract(uint##sz##_t reg, \
const mmio_info_t *info)\
{ \
unsigned int offset = info->gpa & (offmask); \
\
reg >>= 8 * offset; \
reg &= VREG_REG_MASK(info->dabt.size); \
\
return reg; \
} \
\
static inline void vreg_reg##sz##_update(uint##sz##_t *reg, \
register_t val, \
const mmio_info_t *info) \
{ \
unsigned int offset = info->gpa & (offmask); \
uint##sz##_t mask = VREG_REG_MASK(info->dabt.size); \
unsigned int shift = offset * 8; \
\
*reg &= ~(mask << shift); \
*reg |= ((uint##sz##_t)val & mask) << shift; \
} \
\
static inline void vreg_reg##sz##_setbits(uint##sz##_t *reg, \
register_t bits, \
const mmio_info_t *info) \
{ \
unsigned int offset = info->gpa & (offmask); \
uint##sz##_t mask = VREG_REG_MASK(info->dabt.size); \
unsigned int shift = offset * 8; \
\
*reg |= ((uint##sz##_t)bits & mask) << shift; \
} \
\
static inline void vreg_reg##sz##_clearbits(uint##sz##_t *reg, \
register_t bits, \
const mmio_info_t *info) \
{ \
unsigned int offset = info->gpa & (offmask); \
uint##sz##_t mask = VREG_REG_MASK(info->dabt.size); \
unsigned int shift = offset * 8; \
\
*reg &= ~(((uint##sz##_t)bits & mask) << shift); \
}
VREG_REG_HELPERS(64, 0x7);
VREG_REG_HELPERS(32, 0x3);
#undef VREG_REG_HELPERS
#endif /* __ASM_ARM_VREG__ */
|