1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
|
/*
* entry.S: SVM architecture-specific entry/exit handling.
* Copyright (c) 2005-2007, Advanced Micro Devices, Inc.
* Copyright (c) 2004, Intel Corporation.
* Copyright (c) 2008, Citrix Systems, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; If not, see <http://www.gnu.org/licenses/>.
*/
.file "svm/entry.S"
#include <xen/lib.h>
#include <asm/asm_defns.h>
#include <asm/page.h>
ENTRY(svm_asm_do_resume)
GET_CURRENT(bx)
.Lsvm_do_resume:
call svm_intr_assist
call nsvm_vcpu_switch
ASSERT_NOT_IN_ATOMIC
mov VCPU_processor(%rbx),%eax
lea irq_stat+IRQSTAT_softirq_pending(%rip),%rdx
xor %ecx,%ecx
shl $IRQSTAT_shift,%eax
cli
cmp %ecx,(%rdx,%rax,1)
jne .Lsvm_process_softirqs
cmp %cl,VCPU_nsvm_hap_enabled(%rbx)
UNLIKELY_START(ne, nsvm_hap)
cmp %rcx,VCPU_nhvm_p2m(%rbx)
sete %al
test VCPU_nhvm_guestmode(%rbx),%al
UNLIKELY_DONE(z, nsvm_hap)
/*
* Someone shot down our nested p2m table; go round again
* and nsvm_vcpu_switch() will fix it for us.
*/
sti
jmp .Lsvm_do_resume
__UNLIKELY_END(nsvm_hap)
call svm_vmenter_helper
clgi
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
/* SPEC_CTRL_EXIT_TO_SVM Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
.macro svm_vmentry_spec_ctrl
mov VCPU_arch_msrs(%rbx), %rax
movzbl CPUINFO_last_spec_ctrl(%rsp), %edx
mov VCPUMSR_spec_ctrl_raw(%rax), %eax
cmp %edx, %eax
je 1f /* Skip write if value is correct. */
mov $MSR_SPEC_CTRL, %ecx
xor %edx, %edx
wrmsr
mov %al, CPUINFO_last_spec_ctrl(%rsp)
1: /* No Spectre v1 concerns. Execution will hit VMRUN imminently. */
.endm
ALTERNATIVE "", svm_vmentry_spec_ctrl, X86_FEATURE_SC_MSR_HVM
pop %r15
pop %r14
pop %r13
pop %r12
pop %rbp
mov VCPU_svm_vmcb_pa(%rbx),%rax
pop %rbx
pop %r11
pop %r10
pop %r9
pop %r8
pop %rcx /* Skip %rax: restored by VMRUN. */
pop %rcx
pop %rdx
pop %rsi
pop %rdi
sti
vmrun
SAVE_ALL
GET_CURRENT(bx)
/* SPEC_CTRL_ENTRY_FROM_SVM Req: %rsp=regs/cpuinfo, %rdx=0 Clob: acd */
.macro svm_vmexit_cond_ibpb
testb $SCF_entry_ibpb, CPUINFO_xen_spec_ctrl(%rsp)
jz .L_skip_ibpb
mov $MSR_PRED_CMD, %ecx
mov $PRED_CMD_IBPB, %eax
wrmsr
.L_skip_ibpb:
.endm
ALTERNATIVE "", svm_vmexit_cond_ibpb, X86_FEATURE_IBPB_ENTRY_HVM
ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_HVM
.macro svm_vmexit_spec_ctrl
movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax
movzbl CPUINFO_last_spec_ctrl(%rsp), %edx
cmp %edx, %eax
je 1f /* Skip write if value is correct. */
mov $MSR_SPEC_CTRL, %ecx
xor %edx, %edx
wrmsr
mov %al, CPUINFO_last_spec_ctrl(%rsp)
1:
.endm
ALTERNATIVE "", svm_vmexit_spec_ctrl, X86_FEATURE_SC_MSR_HVM
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
/*
* STGI is executed unconditionally, and is sufficiently serialising
* to safely resolve any Spectre-v1 concerns in the above logic.
*/
stgi
GLOBAL(svm_stgi_label)
call svm_vmexit_handler
jmp .Lsvm_do_resume
.Lsvm_process_softirqs:
sti
call do_softirq
jmp .Lsvm_do_resume
.type svm_asm_do_resume, @function
.size svm_asm_do_resume, . - svm_asm_do_resume
|