summaryrefslogtreecommitdiff
path: root/xen/arch/x86/hvm/nestedhvm.c
blob: ec68551127b785318f4787d17703915e58978b63 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Nested HVM
 * Copyright (c) 2011, Advanced Micro Devices, Inc.
 * Author: Christoph Egger <Christoph.Egger@amd.com>
 */

#include <asm/msr.h>
#include <asm/hvm/support.h>
#include <asm/hvm/hvm.h>
#include <asm/p2m.h>    /* for struct p2m_domain */
#include <asm/hvm/nestedhvm.h>
#include <asm/event.h>  /* for local_event_delivery_(en|dis)able */
#include <asm/paging.h> /* for paging_mode_hap() */

static unsigned long *shadow_io_bitmap[3];

/* Nested VCPU */
bool_t
nestedhvm_vcpu_in_guestmode(struct vcpu *v)
{
    return vcpu_nestedhvm(v).nv_guestmode;
}

void
nestedhvm_vcpu_reset(struct vcpu *v)
{
    struct nestedvcpu *nv = &vcpu_nestedhvm(v);

    nv->nv_vmentry_pending = 0;
    nv->nv_vmexit_pending = 0;
    nv->nv_vmswitch_in_progress = 0;
    nv->nv_ioport80 = 0;
    nv->nv_ioportED = 0;

    hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
    nv->nv_vvmcx = NULL;
    nv->nv_vvmcxaddr = INVALID_PADDR;
    nv->nv_flushp2m = 0;
    nv->nv_p2m = NULL;
    nv->stale_np2m = false;
    nv->np2m_generation = 0;

    hvm_asid_flush_vcpu_asid(&nv->nv_n2asid);

    alternative_vcall(hvm_funcs.nhvm_vcpu_reset, v);

    /* vcpu is in host mode */
    nestedhvm_vcpu_exit_guestmode(v);
}

int
nestedhvm_vcpu_initialise(struct vcpu *v)
{
    int rc;

    if ( !shadow_io_bitmap[0] )
        return -ENOMEM;

    rc = alternative_call(hvm_funcs.nhvm_vcpu_initialise, v);
    if ( rc )
        return rc;

    nestedhvm_vcpu_reset(v);
    return 0;
}

void
nestedhvm_vcpu_destroy(struct vcpu *v)
{
    alternative_vcall(hvm_funcs.nhvm_vcpu_destroy, v);
}

static void cf_check nestedhvm_flushtlb_ipi(void *info)
{
    struct vcpu *v = current;
    struct domain *d = info;

    ASSERT(d != NULL);
    if (v->domain != d) {
        /* This cpu doesn't belong to the domain */
        return;
    }

    /* Just flush the ASID (or request a new one).
     * This is cheaper than flush_tlb_local() and has
     * the same desired effect.
     */
    hvm_asid_flush_core();
    vcpu_nestedhvm(v).nv_p2m = NULL;
    vcpu_nestedhvm(v).stale_np2m = true;
}

void
nestedhvm_vmcx_flushtlb(struct p2m_domain *p2m)
{
    on_selected_cpus(p2m->dirty_cpumask, nestedhvm_flushtlb_ipi,
        p2m->domain, 1);
    cpumask_clear(p2m->dirty_cpumask);
}

/* Common shadow IO Permission bitmap */

/* There four global patterns of io bitmap each guest can
 * choose depending on interception of io port 0x80 and/or
 * 0xED (shown in table below).
 * The users of the bitmap patterns are in SVM/VMX specific code.
 *
 * bitmap        port 0x80  port 0xed
 * hvm_io_bitmap cleared    cleared
 * iomap[0]      cleared    set
 * iomap[1]      set        cleared
 * iomap[2]      set        set
 */

static int __init cf_check nestedhvm_setup(void)
{
    /* Same format and size as hvm_io_bitmap (Intel needs only 2 pages). */
    unsigned nr = cpu_has_vmx ? 2 : 3;
    unsigned int i, order = get_order_from_pages(nr);

    if ( !hvm_funcs.name )
        return 0;

    /* shadow_io_bitmaps can't be declared static because
     *   they must fulfill hw requirements (page aligned section)
     *   and doing so triggers the ASSERT(va >= XEN_VIRT_START)
     *   in __virt_to_maddr()
     *
     * So as a compromise pre-allocate them when xen boots.
     * This function must be called from within start_xen() when
     * it is valid to use _xmalloc()
     */

    for ( i = 0; i < ARRAY_SIZE(shadow_io_bitmap); i++ )
    {
        shadow_io_bitmap[i] = alloc_xenheap_pages(order, 0);
        if ( !shadow_io_bitmap[i] )
        {
            while ( i-- )
            {
                free_xenheap_pages(shadow_io_bitmap[i], order);
                shadow_io_bitmap[i] = NULL;
            }
            return -ENOMEM;
        }
        memset(shadow_io_bitmap[i], ~0U, nr << PAGE_SHIFT);
    }

    __clear_bit(0x80, shadow_io_bitmap[0]);
    __clear_bit(0xed, shadow_io_bitmap[1]);

    return 0;
}
__initcall(nestedhvm_setup);

unsigned long *
nestedhvm_vcpu_iomap_get(bool_t port_80, bool_t port_ed)
{
    int i;

    if (!hvm_port80_allowed)
        port_80 = 1;

    if (port_80 == 0) {
        if (port_ed == 0)
            return hvm_io_bitmap;
        i = 0;
    } else {
        if (port_ed == 0)
            i = 1;
        else
            i = 2;
    }

    return shadow_io_bitmap[i];
}