/*
* entry.S: SVM architecture-specific entry/exit handling.
* Copyright (c) 2005-2007, Advanced Micro Devices, Inc.
* Copyright (c) 2004, Intel Corporation.
* Copyright (c) 2008, Citrix Systems, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; If not, see .
*/
.file "svm/entry.S"
#include
#include
#include
ENTRY(svm_asm_do_resume)
GET_CURRENT(bx)
.Lsvm_do_resume:
call svm_intr_assist
call nsvm_vcpu_switch
ASSERT_NOT_IN_ATOMIC
mov VCPU_processor(%rbx),%eax
lea irq_stat+IRQSTAT_softirq_pending(%rip),%rdx
xor %ecx,%ecx
shl $IRQSTAT_shift,%eax
cli
cmp %ecx,(%rdx,%rax,1)
jne .Lsvm_process_softirqs
cmp %cl,VCPU_nsvm_hap_enabled(%rbx)
UNLIKELY_START(ne, nsvm_hap)
cmp %rcx,VCPU_nhvm_p2m(%rbx)
sete %al
test VCPU_nhvm_guestmode(%rbx),%al
UNLIKELY_DONE(z, nsvm_hap)
/*
* Someone shot down our nested p2m table; go round again
* and nsvm_vcpu_switch() will fix it for us.
*/
sti
jmp .Lsvm_do_resume
__UNLIKELY_END(nsvm_hap)
call svm_vmenter_helper
clgi
/* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
/* SPEC_CTRL_EXIT_TO_SVM Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
.macro svm_vmentry_spec_ctrl
mov VCPU_arch_msrs(%rbx), %rax
movzbl CPUINFO_last_spec_ctrl(%rsp), %edx
mov VCPUMSR_spec_ctrl_raw(%rax), %eax
cmp %edx, %eax
je 1f /* Skip write if value is correct. */
mov $MSR_SPEC_CTRL, %ecx
xor %edx, %edx
wrmsr
mov %al, CPUINFO_last_spec_ctrl(%rsp)
1: /* No Spectre v1 concerns. Execution will hit VMRUN imminently. */
.endm
ALTERNATIVE "", svm_vmentry_spec_ctrl, X86_FEATURE_SC_MSR_HVM
pop %r15
pop %r14
pop %r13
pop %r12
pop %rbp
mov VCPU_svm_vmcb_pa(%rbx),%rax
pop %rbx
pop %r11
pop %r10
pop %r9
pop %r8
pop %rcx /* Skip %rax: restored by VMRUN. */
pop %rcx
pop %rdx
pop %rsi
pop %rdi
sti
vmrun
SAVE_ALL
GET_CURRENT(bx)
/* SPEC_CTRL_ENTRY_FROM_SVM Req: %rsp=regs/cpuinfo, %rdx=0 Clob: acd */
.macro svm_vmexit_cond_ibpb
testb $SCF_entry_ibpb, CPUINFO_xen_spec_ctrl(%rsp)
jz .L_skip_ibpb
mov $MSR_PRED_CMD, %ecx
mov $PRED_CMD_IBPB, %eax
wrmsr
.L_skip_ibpb:
.endm
ALTERNATIVE "", svm_vmexit_cond_ibpb, X86_FEATURE_IBPB_ENTRY_HVM
ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_HVM
.macro svm_vmexit_spec_ctrl
movzbl CPUINFO_xen_spec_ctrl(%rsp), %eax
movzbl CPUINFO_last_spec_ctrl(%rsp), %edx
cmp %edx, %eax
je 1f /* Skip write if value is correct. */
mov $MSR_SPEC_CTRL, %ecx
xor %edx, %edx
wrmsr
mov %al, CPUINFO_last_spec_ctrl(%rsp)
1:
.endm
ALTERNATIVE "", svm_vmexit_spec_ctrl, X86_FEATURE_SC_MSR_HVM
/* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
/*
* STGI is executed unconditionally, and is sufficiently serialising
* to safely resolve any Spectre-v1 concerns in the above logic.
*/
stgi
GLOBAL(svm_stgi_label)
call svm_vmexit_handler
jmp .Lsvm_do_resume
.Lsvm_process_softirqs:
sti
call do_softirq
jmp .Lsvm_do_resume
.type svm_asm_do_resume, @function
.size svm_asm_do_resume, . - svm_asm_do_resume