1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
|
/******************************************************************************
* Original code extracted from arch/x86/x86_64/mm.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; If not, see <http://www.gnu.org/licenses/>.
*/
#include <xen/init.h>
#include <xen/mm.h>
#include <xen/bitops.h>
#include <xen/nospec.h>
/* Parameters for PFN/MADDR compression. */
unsigned long __read_mostly max_pdx;
unsigned long __read_mostly pfn_pdx_bottom_mask = ~0UL;
unsigned long __read_mostly ma_va_bottom_mask = ~0UL;
unsigned long __read_mostly pfn_top_mask = 0;
unsigned long __read_mostly ma_top_mask = 0;
unsigned long __read_mostly pfn_hole_mask = 0;
unsigned int __read_mostly pfn_pdx_hole_shift = 0;
unsigned long __read_mostly pdx_group_valid[BITS_TO_LONGS(
(FRAMETABLE_NR + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT)] = { [0] = 1 };
bool __mfn_valid(unsigned long mfn)
{
if ( unlikely(evaluate_nospec(mfn >= max_page)) )
return false;
return likely(!(mfn & pfn_hole_mask)) &&
likely(test_bit(pfn_to_pdx(mfn) / PDX_GROUP_COUNT,
pdx_group_valid));
}
/* Sets all bits from the most-significant 1-bit down to the LSB */
static u64 __init fill_mask(u64 mask)
{
while (mask & (mask + 1))
mask |= mask + 1;
return mask;
}
/* We don't want to compress the low MAX_ORDER bits of the addresses. */
uint64_t __init pdx_init_mask(uint64_t base_addr)
{
return fill_mask(max(base_addr,
(uint64_t)1 << (MAX_ORDER + PAGE_SHIFT)) - 1);
}
u64 __init pdx_region_mask(u64 base, u64 len)
{
return fill_mask(base ^ (base + len - 1));
}
void set_pdx_range(unsigned long smfn, unsigned long emfn)
{
unsigned long idx, eidx;
idx = pfn_to_pdx(smfn) / PDX_GROUP_COUNT;
eidx = (pfn_to_pdx(emfn - 1) + PDX_GROUP_COUNT) / PDX_GROUP_COUNT;
for ( ; idx < eidx; ++idx )
__set_bit(idx, pdx_group_valid);
}
void __init pfn_pdx_hole_setup(unsigned long mask)
{
unsigned int i, j, bottom_shift = 0, hole_shift = 0;
/*
* We skip the first MAX_ORDER bits, as we never want to compress them.
* This guarantees that page-pointer arithmetic remains valid within
* contiguous aligned ranges of 2^MAX_ORDER pages. Among others, our
* buddy allocator relies on this assumption.
*
* If the logic changes here, we might have to update the ARM specific
* init_pdx too.
*/
for ( j = MAX_ORDER-1; ; )
{
i = find_next_zero_bit(&mask, BITS_PER_LONG, j + 1);
if ( i >= BITS_PER_LONG )
break;
j = find_next_bit(&mask, BITS_PER_LONG, i + 1);
if ( j >= BITS_PER_LONG )
break;
if ( j - i > hole_shift )
{
hole_shift = j - i;
bottom_shift = i;
}
}
if ( !hole_shift )
return;
printk(KERN_INFO "PFN compression on bits %u...%u\n",
bottom_shift, bottom_shift + hole_shift - 1);
pfn_pdx_hole_shift = hole_shift;
pfn_pdx_bottom_mask = (1UL << bottom_shift) - 1;
ma_va_bottom_mask = (PAGE_SIZE << bottom_shift) - 1;
pfn_hole_mask = ((1UL << hole_shift) - 1) << bottom_shift;
pfn_top_mask = ~(pfn_pdx_bottom_mask | pfn_hole_mask);
ma_top_mask = pfn_top_mask << PAGE_SHIFT;
}
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* indent-tabs-mode: nil
* End:
*/
|