1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/******************************************************************************
* arch/x86/pv/traps.c
*
* PV low level entry points.
*
* Copyright (c) 2017 Citrix Systems Ltd.
*/
#include <xen/event.h>
#include <xen/hypercall.h>
#include <xen/lib.h>
#include <xen/softirq.h>
#include <asm/pv/trace.h>
#include <asm/shared.h>
#include <asm/traps.h>
#include <irq_vectors.h>
void pv_inject_event(const struct x86_event *event)
{
struct vcpu *curr = current;
struct cpu_user_regs *regs = guest_cpu_user_regs();
struct trap_bounce *tb;
const struct trap_info *ti;
const uint8_t vector = event->vector;
unsigned int error_code = event->error_code;
bool use_error_code;
ASSERT(vector == event->vector); /* Confirm no truncation. */
if ( event->type == X86_EVENTTYPE_HW_EXCEPTION )
{
ASSERT(vector < 32);
use_error_code = X86_EXC_HAVE_EC & (1u << vector);
}
else
{
ASSERT(event->type == X86_EVENTTYPE_SW_INTERRUPT);
use_error_code = false;
}
if ( use_error_code )
ASSERT(error_code != X86_EVENT_NO_EC);
else
ASSERT(error_code == X86_EVENT_NO_EC);
tb = &curr->arch.pv.trap_bounce;
ti = &curr->arch.pv.trap_ctxt[vector];
tb->flags = TBF_EXCEPTION;
tb->cs = ti->cs;
tb->eip = ti->address;
if ( event->type == X86_EVENTTYPE_HW_EXCEPTION &&
vector == X86_EXC_PF )
{
curr->arch.pv.ctrlreg[2] = event->cr2;
arch_set_cr2(curr, event->cr2);
/* Re-set error_code.user flag appropriately for the guest. */
error_code &= ~PFEC_user_mode;
if ( !guest_kernel_mode(curr, regs) )
error_code |= PFEC_user_mode;
trace_pv_page_fault(event->cr2, error_code);
}
else
trace_pv_trap(vector, regs->rip, use_error_code, error_code);
if ( use_error_code )
{
tb->flags |= TBF_EXCEPTION_ERRCODE;
tb->error_code = error_code;
}
if ( TI_GET_IF(ti) )
tb->flags |= TBF_INTERRUPT;
if ( unlikely(null_trap_bounce(curr, tb)) )
{
gprintk(XENLOG_ERR,
"Unhandled: vec %u, %s[%04x]\n",
vector, vector_name(vector), error_code);
if ( vector == X86_EXC_PF )
show_page_walk(event->cr2);
}
}
/*
* Called from asm to set up the MCE trapbounce info.
* Returns false no callback is set up, else true.
*/
bool set_guest_machinecheck_trapbounce(void)
{
struct vcpu *curr = current;
struct trap_bounce *tb = &curr->arch.pv.trap_bounce;
pv_inject_hw_exception(X86_EXC_MC, X86_EVENT_NO_EC);
tb->flags &= ~TBF_EXCEPTION; /* not needed for MCE delivery path */
return !null_trap_bounce(curr, tb);
}
/*
* Called from asm to set up the NMI trapbounce info.
* Returns false if no callback is set up, else true.
*/
bool set_guest_nmi_trapbounce(void)
{
struct vcpu *curr = current;
struct trap_bounce *tb = &curr->arch.pv.trap_bounce;
pv_inject_hw_exception(X86_EXC_NMI, X86_EVENT_NO_EC);
tb->flags &= ~TBF_EXCEPTION; /* not needed for NMI delivery path */
return !null_trap_bounce(curr, tb);
}
static DEFINE_PER_CPU(struct vcpu *, softirq_nmi_vcpu);
static void cf_check nmi_softirq(void)
{
struct vcpu **v_ptr = &this_cpu(softirq_nmi_vcpu);
BUG_ON(*v_ptr == NULL);
/*
* Only used to defer wakeup of domain/vcpu to a safe (non-NMI)
* context.
*/
vcpu_kick(*v_ptr);
*v_ptr = NULL;
}
void __init pv_trap_init(void)
{
#ifdef CONFIG_PV32
/* The 32-on-64 hypercall vector is only accessible from ring 1. */
_set_gate(idt_table + HYPERCALL_VECTOR,
SYS_DESC_trap_gate, 1, entry_int82);
#endif
/* Fast trap for int80 (faster than taking the #GP-fixup path). */
_set_gate(idt_table + LEGACY_SYSCALL_VECTOR, SYS_DESC_trap_gate, 3,
&int80_direct_trap);
open_softirq(NMI_SOFTIRQ, nmi_softirq);
}
/*
* Deliver NMI to PV guest. Return 0 on success.
* Called in NMI context, so no use of printk().
*/
int pv_raise_nmi(struct vcpu *v)
{
struct vcpu **v_ptr = &per_cpu(softirq_nmi_vcpu, smp_processor_id());
if ( cmpxchgptr(v_ptr, NULL, v) )
return -EBUSY;
if ( !test_and_set_bool(v->arch.nmi_pending) )
{
/* Not safe to wake up a vcpu here */
raise_softirq(NMI_SOFTIRQ);
return 0;
}
*v_ptr = NULL;
/* Delivery failed */
return -EIO;
}
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/
|