1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
|
#ifndef __ASM_DOMAIN_H__
#define __ASM_DOMAIN_H__
#include <xen/cache.h>
#include <xen/timer.h>
#include <asm/page.h>
#include <asm/p2m.h>
#include <asm/vfp.h>
#include <asm/mmio.h>
#include <asm/gic.h>
#include <asm/vgic.h>
#include <asm/vpl011.h>
#include <public/hvm/params.h>
struct hvm_domain
{
uint64_t params[HVM_NR_PARAMS];
};
#ifdef CONFIG_ARM_64
enum domain_type {
DOMAIN_32BIT,
DOMAIN_64BIT,
};
#define is_32bit_domain(d) ((d)->arch.type == DOMAIN_32BIT)
#define is_64bit_domain(d) ((d)->arch.type == DOMAIN_64BIT)
#else
#define is_32bit_domain(d) (1)
#define is_64bit_domain(d) (0)
#endif
#define is_domain_direct_mapped(d) ((d)->cdf & CDF_directmap)
/*
* Is the domain using the host memory layout?
*
* Direct-mapped domain will always have the RAM mapped with GFN == MFN.
* To avoid any trouble finding space, it is easier to force using the
* host memory layout.
*
* The hardware domain will use the host layout regardless of
* direct-mapped because some OS may rely on a specific address ranges
* for the devices.
*/
#define domain_use_host_layout(d) (is_domain_direct_mapped(d) || \
is_hardware_domain(d))
struct vtimer {
struct vcpu *v;
int irq;
struct timer timer;
register_t ctl;
uint64_t cval;
};
struct paging_domain {
spinlock_t lock;
/* Free P2M pages from the pre-allocated P2M pool */
struct page_list_head p2m_freelist;
/* Number of pages from the pre-allocated P2M pool */
unsigned long p2m_total_pages;
};
struct arch_domain
{
#ifdef CONFIG_ARM_64
enum domain_type type;
#endif
/* Virtual MMU */
struct p2m_domain p2m;
struct hvm_domain hvm;
struct paging_domain paging;
struct vmmio vmmio;
/* Continuable domain_relinquish_resources(). */
unsigned int rel_priv;
struct {
uint64_t offset;
s_time_t nanoseconds;
} virt_timer_base;
struct vgic_dist vgic;
struct vuart {
#define VUART_BUF_SIZE 128
char *buf;
int idx;
const struct vuart_info *info;
spinlock_t lock;
} vuart;
unsigned int evtchn_irq;
#ifdef CONFIG_ACPI
void *efi_acpi_table;
paddr_t efi_acpi_gpa;
paddr_t efi_acpi_len;
#endif
/* Monitor options */
struct {
uint8_t privileged_call_enabled : 1;
} monitor;
#ifdef CONFIG_SBSA_VUART_CONSOLE
struct vpl011 vpl011;
#endif
#ifdef CONFIG_TEE
void *tee;
#endif
} __cacheline_aligned;
struct arch_vcpu
{
struct {
#ifdef CONFIG_ARM_32
register_t r4;
register_t r5;
register_t r6;
register_t r7;
register_t r8;
register_t r9;
register_t sl;
#else
register_t x19;
register_t x20;
register_t x21;
register_t x22;
register_t x23;
register_t x24;
register_t x25;
register_t x26;
register_t x27;
register_t x28;
#endif
register_t fp;
register_t sp;
register_t pc;
} saved_context;
void *stack;
/*
* Points into ->stack, more convenient than doing pointer arith
* all the time.
*/
struct cpu_info *cpu_info;
/* Fault Status */
#ifdef CONFIG_ARM_32
uint32_t dfsr;
uint32_t dfar, ifar;
#else
uint64_t far;
uint32_t esr;
#endif
uint32_t ifsr; /* 32-bit guests only */
uint32_t afsr0, afsr1;
/* MMU */
register_t vbar;
register_t ttbcr;
uint64_t ttbr0, ttbr1;
uint32_t dacr; /* 32-bit guests only */
uint64_t par;
#ifdef CONFIG_ARM_32
uint32_t mair0, mair1;
uint32_t amair0, amair1;
#else
uint64_t mair;
uint64_t amair;
#endif
/* Control Registers */
register_t sctlr;
register_t actlr;
uint32_t cpacr;
uint32_t contextidr;
register_t tpidr_el0;
register_t tpidr_el1;
register_t tpidrro_el0;
/* HYP configuration */
register_t hcr_el2;
register_t mdcr_el2;
uint32_t teecr, teehbr; /* ThumbEE, 32-bit guests only */
#ifdef CONFIG_ARM_32
/*
* ARMv8 only supports a trivial implementation on Jazelle when in AArch32
* mode and therefore has no extended control registers.
*/
uint32_t joscr, jmcr;
#endif
/* Float-pointer */
struct vfp_state vfp;
/* CP 15 */
uint32_t csselr;
register_t vmpidr;
/* Holds gic context data */
union gic_state_data gic;
uint64_t lr_mask;
struct vgic_cpu vgic;
/* Timer registers */
register_t cntkctl;
struct vtimer phys_timer;
struct vtimer virt_timer;
bool vtimer_initialized;
/*
* The full P2M may require some cleaning (e.g when emulation
* set/way). As the action can take a long time, it requires
* preemption. It is deferred until we return to guest, where we can
* more easily check for softirqs and preempt the vCPU safely.
*/
bool need_flush_to_ram;
} __cacheline_aligned;
void vcpu_show_registers(const struct vcpu *);
void vcpu_switch_to_aarch64_mode(struct vcpu *);
/*
* Due to the restriction of GICv3, the number of vCPUs in AFF0 is
* limited to 16, thus only the first 4 bits of AFF0 are legal. We will
* use the first 2 affinity levels here, expanding the number of vCPU up
* to 4096(==16*256), which is more than the PEs that GIC-500 supports.
*
* Since we don't save information of vCPU's topology (affinity) in
* vMPIDR at the moment, we map the vcpuid to the vMPIDR linearly.
*/
static inline unsigned int vaffinity_to_vcpuid(register_t vaff)
{
unsigned int vcpuid;
vaff &= MPIDR_HWID_MASK;
vcpuid = MPIDR_AFFINITY_LEVEL(vaff, 0);
vcpuid |= MPIDR_AFFINITY_LEVEL(vaff, 1) << 4;
return vcpuid;
}
static inline register_t vcpuid_to_vaffinity(unsigned int vcpuid)
{
register_t vaff;
/*
* Right now only AFF0 and AFF1 are supported in virtual affinity.
* Since only the first 4 bits in AFF0 are used in GICv3, the
* available bits are 12 (4+8).
*/
BUILD_BUG_ON(!(MAX_VIRT_CPUS < ((1 << 12))));
vaff = (vcpuid & 0x0f) << MPIDR_LEVEL_SHIFT(0);
vaff |= ((vcpuid >> 4) & MPIDR_LEVEL_MASK) << MPIDR_LEVEL_SHIFT(1);
return vaff;
}
static inline struct vcpu_guest_context *alloc_vcpu_guest_context(void)
{
return xmalloc(struct vcpu_guest_context);
}
static inline void free_vcpu_guest_context(struct vcpu_guest_context *vgc)
{
xfree(vgc);
}
static inline void arch_vcpu_block(struct vcpu *v) {}
#define arch_vm_assist_valid_mask(d) (1UL << VMASST_TYPE_runstate_update_flag)
/* vPCI is not available on Arm */
#define has_vpci(d) ({ (void)(d); false; })
struct arch_vcpu_io {
struct instr_details dabt_instr; /* when the instruction is decoded */
};
struct guest_memory_policy {};
static inline void update_guest_memory_policy(struct vcpu *v,
struct guest_memory_policy *gmp)
{}
#endif /* __ASM_DOMAIN_H__ */
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/
|