1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* xen/arch/arm/vpci.c
*/
#include <xen/sched.h>
#include <xen/vpci.h>
#include <asm/mmio.h>
static pci_sbdf_t vpci_sbdf_from_gpa(const struct pci_host_bridge *bridge,
paddr_t gpa)
{
pci_sbdf_t sbdf;
if ( bridge )
{
sbdf.sbdf = VPCI_ECAM_BDF(gpa - bridge->cfg->phys_addr);
sbdf.seg = bridge->segment;
sbdf.bus += bridge->cfg->busn_start;
}
else
sbdf.sbdf = VPCI_ECAM_BDF(gpa - GUEST_VPCI_ECAM_BASE);
return sbdf;
}
static int vpci_mmio_read(struct vcpu *v, mmio_info_t *info,
register_t *r, void *p)
{
struct pci_host_bridge *bridge = p;
pci_sbdf_t sbdf = vpci_sbdf_from_gpa(bridge, info->gpa);
/* data is needed to prevent a pointer cast on 32bit */
unsigned long data;
if ( vpci_ecam_read(sbdf, ECAM_REG_OFFSET(info->gpa),
1U << info->dabt.size, &data) )
{
*r = data;
return 1;
}
*r = ~0ul;
return 0;
}
static int vpci_mmio_write(struct vcpu *v, mmio_info_t *info,
register_t r, void *p)
{
struct pci_host_bridge *bridge = p;
pci_sbdf_t sbdf = vpci_sbdf_from_gpa(bridge, info->gpa);
return vpci_ecam_write(sbdf, ECAM_REG_OFFSET(info->gpa),
1U << info->dabt.size, r);
}
static const struct mmio_handler_ops vpci_mmio_handler = {
.read = vpci_mmio_read,
.write = vpci_mmio_write,
};
static int vpci_setup_mmio_handler_cb(struct domain *d,
struct pci_host_bridge *bridge)
{
struct pci_config_window *cfg = bridge->cfg;
register_mmio_handler(d, &vpci_mmio_handler,
cfg->phys_addr, cfg->size, bridge);
/* We have registered a single MMIO handler. */
return 1;
}
int domain_vpci_init(struct domain *d)
{
if ( !has_vpci(d) )
return 0;
/*
* The hardware domain gets as many MMIOs as required by the
* physical host bridge.
* Guests get the virtual platform layout: one virtual host bridge for now.
*/
if ( is_hardware_domain(d) )
{
int ret;
ret = pci_host_iterate_bridges_and_count(d, vpci_setup_mmio_handler_cb);
if ( ret < 0 )
return ret;
}
else
register_mmio_handler(d, &vpci_mmio_handler,
GUEST_VPCI_ECAM_BASE, GUEST_VPCI_ECAM_SIZE, NULL);
return 0;
}
static int vpci_get_num_handlers_cb(struct domain *d,
struct pci_host_bridge *bridge)
{
/* Each bridge has a single MMIO handler for the configuration space. */
return 1;
}
unsigned int domain_vpci_get_num_mmio_handlers(struct domain *d)
{
if ( !has_vpci(d) )
return 0;
if ( is_hardware_domain(d) )
{
int ret = pci_host_iterate_bridges_and_count(d, vpci_get_num_handlers_cb);
if ( ret < 0 )
{
ASSERT_UNREACHABLE();
return 0;
}
return ret;
}
/*
* For guests each host bridge requires one region to cover the
* configuration space. At the moment, we only expose a single host bridge.
*/
return 1;
}
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* indent-tabs-mode: nil
* End:
*/
|