1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
|
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* pv/grant_table.c
*
* Grant table interfaces for PV guests
*
* Copyright (C) 2017 Wei Liu <wei.liu2@citrix.com>
*/
#include <xen/types.h>
#include <public/grant_table.h>
#include <asm/p2m.h>
#include <asm/pv/mm.h>
#include "mm.h"
static unsigned int grant_to_pte_flags(unsigned int grant_flags,
unsigned int cache_flags)
{
unsigned int pte_flags =
_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_GNTTAB | _PAGE_NX;
if ( grant_flags & GNTMAP_application_map )
pte_flags |= _PAGE_USER;
if ( !(grant_flags & GNTMAP_readonly) )
pte_flags |= _PAGE_RW;
pte_flags |= MASK_INSR((grant_flags >> _GNTMAP_guest_avail0), _PAGE_AVAIL);
pte_flags |= cacheattr_to_pte_flags(cache_flags >> 5);
return pte_flags;
}
int create_grant_pv_mapping(uint64_t addr, mfn_t frame,
unsigned int flags, unsigned int cache_flags)
{
struct vcpu *curr = current;
struct domain *currd = curr->domain;
l1_pgentry_t nl1e, ol1e = { }, *pl1e;
struct page_info *page;
mfn_t gl1mfn;
int rc = GNTST_general_error;
nl1e = l1e_from_mfn(frame, grant_to_pte_flags(flags, cache_flags));
nl1e = adjust_guest_l1e(nl1e, currd);
/*
* The meaning of addr depends on GNTMAP_contains_pte. It is either a
* machine address of an L1e the guest has nominated to be altered, or a
* linear address we need to look up the appropriate L1e for.
*/
if ( flags & GNTMAP_contains_pte )
{
/* addr must be suitably aligned, or we will corrupt adjacent ptes. */
if ( !IS_ALIGNED(addr, sizeof(nl1e)) )
{
gdprintk(XENLOG_WARNING,
"Misaligned PTE address %"PRIx64"\n", addr);
goto out;
}
gl1mfn = _mfn(addr >> PAGE_SHIFT);
page = get_page_from_mfn(gl1mfn, currd);
if ( !page )
goto out;
pl1e = map_domain_page(gl1mfn) + (addr & ~PAGE_MASK);
}
else
{
/* Guest trying to pass an out-of-range linear address? */
if ( is_pv_32bit_domain(currd) && addr != (uint32_t)addr )
goto out;
pl1e = map_guest_l1e(addr, &gl1mfn);
if ( !pl1e )
{
gdprintk(XENLOG_WARNING,
"Could not find L1 PTE for linear address %"PRIx64"\n",
addr);
goto out;
}
page = get_page_from_mfn(gl1mfn, currd);
if ( !page )
goto out_unmap;
}
if ( !page_lock(page) )
goto out_put;
if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
goto out_unlock;
ol1e = *pl1e;
if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr, 0) )
{
/*
* We always create mappings in this path. However, our caller,
* map_grant_ref(), only passes potentially non-zero cache_flags for
* MMIO frames, so this path doesn't create non-coherent mappings of
* RAM frames and there's no need to calculate PGT_non_coherent.
*/
ASSERT(!cache_flags || is_iomem_page(frame));
rc = GNTST_okay;
}
out_unlock:
page_unlock(page);
out_put:
put_page(page);
out_unmap:
unmap_domain_page(pl1e);
if ( rc == GNTST_okay )
put_page_from_l1e(ol1e, currd);
out:
return rc;
}
/*
* This exists soley for implementing GNTABOP_unmap_and_replace, the ABI of
* which is bizarre. This GNTTABOP isn't used any more, but was used by
* classic-xen kernels and PVOps Linux before the M2P_OVERRIDE infrastructure
* was replaced with something which actually worked.
*
* Look up the L1e mapping linear, and zap it. Return the L1e via *out.
* Returns a boolean indicating success. If success, the caller is
* responsible for calling put_page_from_l1e().
*/
static bool steal_linear_address(unsigned long linear, l1_pgentry_t *out)
{
struct vcpu *curr = current;
struct domain *currd = curr->domain;
l1_pgentry_t *pl1e, ol1e;
struct page_info *page;
mfn_t gl1mfn;
bool okay = false;
ASSERT(is_pv_domain(currd));
pl1e = map_guest_l1e(linear, &gl1mfn);
if ( !pl1e )
{
gdprintk(XENLOG_WARNING,
"Could not find L1 PTE for linear %"PRIx64"\n", linear);
goto out;
}
page = get_page_from_mfn(gl1mfn, currd);
if ( !page )
goto out_unmap;
if ( !page_lock(page) )
goto out_put;
if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
goto out_unlock;
ol1e = *pl1e;
okay = UPDATE_ENTRY(l1, pl1e, ol1e, l1e_empty(), gl1mfn, curr, 0);
if ( okay )
*out = ol1e;
out_unlock:
page_unlock(page);
out_put:
put_page(page);
out_unmap:
unmap_domain_page(pl1e);
out:
return okay;
}
/*
* Passing a new_addr of zero is taken to mean destroy. Passing a non-zero
* new_addr has only ever been available via GNTABOP_unmap_and_replace, and
* only when !(flags & GNTMAP_contains_pte).
*/
int replace_grant_pv_mapping(uint64_t addr, mfn_t frame,
uint64_t new_addr, unsigned int flags)
{
struct vcpu *curr = current;
struct domain *currd = curr->domain;
l1_pgentry_t nl1e = l1e_empty(), ol1e, *pl1e;
struct page_info *page;
mfn_t gl1mfn;
int rc = GNTST_general_error;
unsigned int grant_pte_flags = grant_to_pte_flags(flags, 0);
/*
* On top of the explicit settings done by create_grant_pv_mapping()
* also open-code relevant parts of adjust_guest_l1e(). Don't mirror
* available and cachability flags, though.
*/
if ( !is_pv_32bit_domain(currd) )
grant_pte_flags |= (grant_pte_flags & _PAGE_USER)
? _PAGE_GLOBAL
: _PAGE_GUEST_KERNEL | _PAGE_USER;
/*
* addr comes from Xen's active_entry tracking, and was used successfully
* to create a grant.
*
* The meaning of addr depends on GNTMAP_contains_pte. It is either a
* machine address of an L1e the guest has nominated to be altered, or a
* linear address we need to look up the appropriate L1e for.
*/
if ( flags & GNTMAP_contains_pte )
{
/* Replace not available in this addressing mode. */
if ( new_addr )
goto out;
/* Sanity check that we won't clobber the pagetable. */
if ( !IS_ALIGNED(addr, sizeof(nl1e)) )
{
ASSERT_UNREACHABLE();
goto out;
}
gl1mfn = _mfn(addr >> PAGE_SHIFT);
page = get_page_from_mfn(gl1mfn, currd);
if ( !page )
goto out;
pl1e = map_domain_page(gl1mfn) + (addr & ~PAGE_MASK);
}
else
{
if ( is_pv_32bit_domain(currd) )
{
if ( addr != (uint32_t)addr )
{
ASSERT_UNREACHABLE();
goto out;
}
/* Guest trying to pass an out-of-range linear address? */
if ( new_addr != (uint32_t)new_addr )
goto out;
}
if ( new_addr && !steal_linear_address(new_addr, &nl1e) )
goto out;
pl1e = map_guest_l1e(addr, &gl1mfn);
if ( !pl1e )
goto out;
page = get_page_from_mfn(gl1mfn, currd);
if ( !page )
goto out_unmap;
}
if ( !page_lock(page) )
goto out_put;
if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table )
goto out_unlock;
ol1e = *pl1e;
/*
* Check that the address supplied is actually mapped to frame (with
* appropriate permissions).
*/
if ( unlikely(!mfn_eq(l1e_get_mfn(ol1e), frame)) ||
unlikely((l1e_get_flags(ol1e) ^ grant_pte_flags) &
(_PAGE_PRESENT | _PAGE_RW)) )
{
gdprintk(XENLOG_ERR,
"PTE %"PRIpte" for %"PRIx64" doesn't match grant (%"PRIpte")\n",
l1e_get_intpte(ol1e), addr,
l1e_get_intpte(l1e_from_mfn(frame, grant_pte_flags)));
goto out_unlock;
}
if ( unlikely((l1e_get_flags(ol1e) ^ grant_pte_flags) &
~(_PAGE_AVAIL | PAGE_CACHE_ATTRS)) )
gdprintk(XENLOG_WARNING,
"PTE flags %x for %"PRIx64" don't match grant (%x)\n",
l1e_get_flags(ol1e), addr, grant_pte_flags);
if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr, 0) )
{
/*
* Generally, replace_grant_pv_mapping() is used to destroy mappings
* (n1le = l1e_empty()), but it can be a present mapping on the
* GNTABOP_unmap_and_replace path.
*
* In such cases, the PTE is fully transplanted from its old location
* via steal_linear_addr(), so we need not perform PGT_non_coherent
* checking here.
*/
rc = GNTST_okay;
}
out_unlock:
page_unlock(page);
out_put:
put_page(page);
out_unmap:
unmap_domain_page(pl1e);
out:
/* If there was an error, we are still responsible for the stolen pte. */
if ( rc )
put_page_from_l1e(nl1e, currd);
return rc;
}
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/
|