summaryrefslogtreecommitdiff
path: root/xen/arch/x86/hvm/svm/asid.c
blob: 09f8c23fd99ac7bee65e38ff044626132f94efad (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * asid.c: handling ASIDs in SVM.
 * Copyright (c) 2007, Advanced Micro Devices, Inc.
 */

#include <asm/amd.h>
#include <asm/hvm/nestedhvm.h>
#include <asm/hvm/svm/svm.h>

#include "svm.h"

void svm_asid_init(const struct cpuinfo_x86 *c)
{
    int nasids = 0;

    /* Check for erratum #170, and leave ASIDs disabled if it's present. */
    if ( !cpu_has_amd_erratum(c, AMD_ERRATUM_170) )
        nasids = cpuid_ebx(0x8000000A);

    hvm_asid_init(nasids);
}

/*
 * Called directly before VMRUN.  Checks if the VCPU needs a new ASID,
 * assigns it, and if required, issues required TLB flushes.
 */
void svm_asid_handle_vmrun(void)
{
    struct vcpu *curr = current;
    struct vmcb_struct *vmcb = curr->arch.hvm.svm.vmcb;
    struct hvm_vcpu_asid *p_asid =
        nestedhvm_vcpu_in_guestmode(curr)
        ? &vcpu_nestedhvm(curr).nv_n2asid : &curr->arch.hvm.n1asid;
    bool need_flush = hvm_asid_handle_vmenter(p_asid);

    /* ASID 0 indicates that ASIDs are disabled. */
    if ( p_asid->asid == 0 )
    {
        vmcb_set_guest_asid(vmcb, 1);
        vmcb->tlb_control =
            cpu_has_svm_flushbyasid ? TLB_CTRL_FLUSH_ASID : TLB_CTRL_FLUSH_ALL;
        return;
    }

    if ( vmcb_get_guest_asid(vmcb) != p_asid->asid )
        vmcb_set_guest_asid(vmcb, p_asid->asid);

    vmcb->tlb_control =
        !need_flush ? TLB_CTRL_NO_FLUSH :
        cpu_has_svm_flushbyasid ? TLB_CTRL_FLUSH_ASID : TLB_CTRL_FLUSH_ALL;
}

/*
 * Local variables:
 * mode: C
 * c-file-style: "BSD"
 * c-basic-offset: 4
 * tab-width: 4
 * indent-tabs-mode: nil
 * End:
 */