1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
|
/*
* arch/x86/hvm/monitor.c
*
* Arch-specific hardware virtual machine event abstractions.
*
* Copyright (c) 2004, Intel Corporation.
* Copyright (c) 2005, International Business Machines Corporation.
* Copyright (c) 2008, Citrix Systems, Inc.
* Copyright (c) 2016, Bitdefender S.R.L.
* Copyright (c) 2016, Tamas K Lengyel (tamas@tklengyel.com)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; If not, see <http://www.gnu.org/licenses/>.
*/
#include <xen/vm_event.h>
#include <xen/mem_access.h>
#include <xen/monitor.h>
#include <asm/hvm/monitor.h>
#include <asm/hvm/nestedhvm.h>
#include <asm/altp2m.h>
#include <asm/monitor.h>
#include <asm/p2m.h>
#include <asm/paging.h>
#include <asm/vm_event.h>
#include <public/vm_event.h>
static void set_npt_base(struct vcpu *v, vm_event_request_t *req)
{
if ( nestedhvm_enabled(v->domain) && nestedhvm_vcpu_in_guestmode(v) )
{
req->flags |= VM_EVENT_FLAG_NESTED_P2M;
req->data.regs.x86.npt_base = nhvm_vcpu_p2m_base(v);
}
}
bool hvm_monitor_cr(unsigned int index, unsigned long value, unsigned long old)
{
struct vcpu *curr = current;
struct arch_domain *ad = &curr->domain->arch;
unsigned int ctrlreg_bitmask = monitor_ctrlreg_bitmask(index);
if ( (ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask) &&
(!(ad->monitor.write_ctrlreg_onchangeonly & ctrlreg_bitmask) ||
value != old) &&
((value ^ old) & ~ad->monitor.write_ctrlreg_mask[index]) )
{
bool sync = ad->monitor.write_ctrlreg_sync & ctrlreg_bitmask;
vm_event_request_t req = {
.reason = VM_EVENT_REASON_WRITE_CTRLREG,
.u.write_ctrlreg.index = index,
.u.write_ctrlreg.new_value = value,
.u.write_ctrlreg.old_value = old
};
set_npt_base(curr, &req);
return monitor_traps(curr, sync, &req) >= 0 &&
curr->domain->arch.monitor.control_register_values;
}
return false;
}
bool hvm_monitor_emul_unimplemented(void)
{
struct vcpu *curr = current;
/*
* Send a vm_event to the monitor to signal that the current
* instruction couldn't be emulated.
*/
vm_event_request_t req = {
.reason = VM_EVENT_REASON_EMUL_UNIMPLEMENTED,
.vcpu_id = curr->vcpu_id,
};
set_npt_base(curr, &req);
return curr->domain->arch.monitor.emul_unimplemented_enabled &&
monitor_traps(curr, true, &req) == 1;
}
bool hvm_monitor_msr(unsigned int msr, uint64_t new_value, uint64_t old_value)
{
struct vcpu *curr = current;
if ( monitored_msr(curr->domain, msr) &&
(!monitored_msr_onchangeonly(curr->domain, msr) ||
new_value != old_value) )
{
vm_event_request_t req = {
.reason = VM_EVENT_REASON_MOV_TO_MSR,
.u.mov_to_msr.msr = msr,
.u.mov_to_msr.new_value = new_value,
.u.mov_to_msr.old_value = old_value
};
set_npt_base(curr, &req);
return monitor_traps(curr, 1, &req) >= 0 &&
curr->domain->arch.monitor.control_register_values;
}
return false;
}
void hvm_monitor_descriptor_access(uint64_t exit_info,
uint64_t vmx_exit_qualification,
uint8_t descriptor, bool is_write)
{
struct vcpu *curr = current;
vm_event_request_t req = {
.reason = VM_EVENT_REASON_DESCRIPTOR_ACCESS,
.u.desc_access.descriptor = descriptor,
.u.desc_access.is_write = is_write,
};
if ( cpu_has_vmx )
{
req.u.desc_access.arch.vmx.instr_info = exit_info;
req.u.desc_access.arch.vmx.exit_qualification = vmx_exit_qualification;
}
set_npt_base(curr, &req);
monitor_traps(curr, true, &req);
}
static inline unsigned long gfn_of_rip(unsigned long rip)
{
struct vcpu *curr = current;
struct segment_register sreg;
uint32_t pfec = PFEC_page_present | PFEC_insn_fetch;
if ( hvm_get_cpl(curr) == 3 )
pfec |= PFEC_user_mode;
hvm_get_segment_register(curr, x86_seg_cs, &sreg);
return paging_gva_to_gfn(curr, sreg.base + rip, &pfec);
}
int hvm_monitor_debug(unsigned long rip, enum hvm_monitor_debug_type type,
unsigned int trap_type, unsigned int insn_length,
unsigned int pending_dbg)
{
/*
* rc < 0 error in monitor/vm_event, crash
* !rc continue normally
* rc > 0 paused waiting for response, work here is done
*/
struct vcpu *curr = current;
struct arch_domain *ad = &curr->domain->arch;
vm_event_request_t req = {};
bool sync;
switch ( type )
{
case HVM_MONITOR_SOFTWARE_BREAKPOINT:
if ( !ad->monitor.software_breakpoint_enabled )
return 0;
req.reason = VM_EVENT_REASON_SOFTWARE_BREAKPOINT;
req.u.software_breakpoint.gfn = gfn_of_rip(rip);
req.u.software_breakpoint.type = trap_type;
req.u.software_breakpoint.insn_length = insn_length;
sync = true;
break;
case HVM_MONITOR_SINGLESTEP_BREAKPOINT:
if ( !ad->monitor.singlestep_enabled )
return 0;
if ( curr->arch.hvm.fast_single_step.enabled )
{
p2m_altp2m_check(curr, curr->arch.hvm.fast_single_step.p2midx);
curr->arch.hvm.single_step = false;
curr->arch.hvm.fast_single_step.enabled = false;
curr->arch.hvm.fast_single_step.p2midx = 0;
return 0;
}
req.reason = VM_EVENT_REASON_SINGLESTEP;
req.u.singlestep.gfn = gfn_of_rip(rip);
sync = true;
break;
case HVM_MONITOR_DEBUG_EXCEPTION:
if ( !ad->monitor.debug_exception_enabled )
return 0;
req.reason = VM_EVENT_REASON_DEBUG_EXCEPTION;
req.u.debug_exception.gfn = gfn_of_rip(rip);
req.u.debug_exception.pending_dbg = pending_dbg;
req.u.debug_exception.type = trap_type;
req.u.debug_exception.insn_length = insn_length;
sync = !!ad->monitor.debug_exception_sync;
break;
default:
return -EOPNOTSUPP;
}
set_npt_base(curr, &req);
return monitor_traps(curr, sync, &req);
}
int hvm_monitor_cpuid(unsigned long insn_length, unsigned int leaf,
unsigned int subleaf)
{
struct vcpu *curr = current;
struct arch_domain *ad = &curr->domain->arch;
vm_event_request_t req = {};
if ( !ad->monitor.cpuid_enabled )
return 0;
req.reason = VM_EVENT_REASON_CPUID;
req.u.cpuid.insn_length = insn_length;
req.u.cpuid.leaf = leaf;
req.u.cpuid.subleaf = subleaf;
set_npt_base(curr, &req);
return monitor_traps(curr, 1, &req);
}
void hvm_monitor_interrupt(unsigned int vector, unsigned int type,
unsigned int err, uint64_t cr2)
{
struct vcpu *curr = current;
vm_event_request_t req = {
.reason = VM_EVENT_REASON_INTERRUPT,
.u.interrupt.x86.vector = vector,
.u.interrupt.x86.type = type,
.u.interrupt.x86.error_code = err,
.u.interrupt.x86.cr2 = cr2,
};
set_npt_base(curr, &req);
monitor_traps(curr, 1, &req);
}
/*
* Send memory access vm_events based on pfec. Returns true if the event was
* sent and false for p2m_get_mem_access() error, no violation and event send
* error. Assumes the caller will enable/disable arch.vm_event->send_event.
*/
bool hvm_monitor_check_p2m(unsigned long gla, gfn_t gfn, uint32_t pfec,
uint16_t kind)
{
xenmem_access_t access;
struct vcpu *curr = current;
vm_event_request_t req = {};
paddr_t gpa = (gfn_to_gaddr(gfn) | (gla & ~PAGE_MASK));
int rc;
ASSERT(curr->arch.vm_event->send_event);
/*
* p2m_get_mem_access() can fail from a invalid MFN and return -ESRCH
* in which case access must be restricted.
*/
rc = p2m_get_mem_access(curr->domain, gfn, &access, altp2m_vcpu_idx(curr));
if ( rc == -ESRCH )
access = XENMEM_access_n;
else if ( rc )
return false;
switch ( access )
{
case XENMEM_access_x:
case XENMEM_access_rx:
if ( pfec & PFEC_write_access )
req.u.mem_access.flags = MEM_ACCESS_R | MEM_ACCESS_W;
break;
case XENMEM_access_w:
case XENMEM_access_rw:
if ( pfec & PFEC_insn_fetch )
req.u.mem_access.flags = MEM_ACCESS_X;
break;
case XENMEM_access_r:
case XENMEM_access_n:
if ( pfec & PFEC_write_access )
req.u.mem_access.flags |= MEM_ACCESS_R | MEM_ACCESS_W;
if ( pfec & PFEC_insn_fetch )
req.u.mem_access.flags |= MEM_ACCESS_X;
break;
case XENMEM_access_wx:
case XENMEM_access_rwx:
case XENMEM_access_rx2rw:
case XENMEM_access_n2rwx:
case XENMEM_access_default:
break;
}
if ( !req.u.mem_access.flags )
return false; /* no violation */
if ( kind == npfec_kind_with_gla )
req.u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA |
MEM_ACCESS_GLA_VALID;
else if ( kind == npfec_kind_in_gpt )
req.u.mem_access.flags |= MEM_ACCESS_FAULT_IN_GPT |
MEM_ACCESS_GLA_VALID;
req.reason = VM_EVENT_REASON_MEM_ACCESS;
req.u.mem_access.gfn = gfn_x(gfn);
req.u.mem_access.gla = gla;
req.u.mem_access.offset = gpa & ~PAGE_MASK;
set_npt_base(curr, &req);
return monitor_traps(curr, true, &req) >= 0;
}
int hvm_monitor_vmexit(unsigned long exit_reason,
unsigned long exit_qualification)
{
struct vcpu *curr = current;
struct arch_domain *ad = &curr->domain->arch;
vm_event_request_t req = {};
ASSERT(ad->monitor.vmexit_enabled);
req.reason = VM_EVENT_REASON_VMEXIT;
req.u.vmexit.arch.vmx.reason = exit_reason;
req.u.vmexit.arch.vmx.qualification = exit_qualification;
set_npt_base(curr, &req);
return monitor_traps(curr, ad->monitor.vmexit_sync, &req);
}
int hvm_monitor_io(unsigned int port, unsigned int bytes,
bool in, bool str)
{
struct vcpu *curr = current;
struct arch_domain *ad = &curr->domain->arch;
vm_event_request_t req = {
.reason = VM_EVENT_REASON_IO_INSTRUCTION,
.u.io.bytes = bytes,
.u.io.port = port,
.u.io.in = in,
.u.io.str = str,
};
if ( !ad->monitor.io_enabled )
return 0;
set_npt_base(curr, &req);
return monitor_traps(curr, true, &req);
}
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/
|