summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2023-03-21 11:59:44 +0000
committerAndrew Cooper <andrew.cooper3@citrix.com>2023-03-21 12:07:43 +0000
commitb0d6684ee58f7252940f5a62e4b85bdc56307eef (patch)
treee91b9f74aa848bdd54c1b1402f7367f489ce9820
parent84dfe7a56f04a7412fa4869b3e756c49e1cfbe75 (diff)
downloadxen-b0d6684ee58f7252940f5a62e4b85bdc56307eef.tar.gz
x86/shadow: account for log-dirty mode when pre-allocating
Pre-allocation is intended to ensure that in the course of constructing or updating shadows there won't be any risk of just made shadows or shadows being acted upon can disappear under our feet. The amount of pages pre-allocated then, however, needs to account for all possible subsequent allocations. While the use in sh_page_fault() accounts for all shadows which may need making, so far it didn't account for allocations coming from log-dirty tracking (which piggybacks onto the P2M allocation functions). Since shadow_prealloc() takes a count of shadows (or other data structures) rather than a count of pages, putting the adjustment at the call site of this function won't work very well: We simply can't express the correct count that way in all cases. Instead take care of this in the function itself, by "snooping" for L1 type requests. (While not applicable right now, future new request sites of L1 tables would then also be covered right away.) It is relevant to note here that pre-allocations like the one done from shadow_alloc_p2m_page() are benign when they fall in the "scope" of an earlier pre-alloc which already included that count: The inner call will simply find enough pages available then; it'll bail right away. This is CVE-2022-42332 / XSA-427. Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Tim Deegan <tim@xen.org> (cherry picked from commit 91767a71061035ae42be93de495cd976f863a41a)
-rw-r--r--xen/arch/x86/mm/paging.c1
-rw-r--r--xen/arch/x86/mm/shadow/common.c12
-rw-r--r--xen/include/asm-x86/paging.h4
3 files changed, 16 insertions, 1 deletions
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 97ac9ccf59..9fb66e65cd 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -280,6 +280,7 @@ void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn)
if ( unlikely(!VALID_M2P(pfn_x(pfn))) )
return;
+ BUILD_BUG_ON(paging_logdirty_levels() != 4);
i1 = L1_LOGDIRTY_IDX(pfn);
i2 = L2_LOGDIRTY_IDX(pfn);
i3 = L3_LOGDIRTY_IDX(pfn);
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 1de0139742..c14a269935 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1015,7 +1015,17 @@ bool shadow_prealloc(struct domain *d, unsigned int type, unsigned int count)
if ( unlikely(d->is_dying) )
return false;
- ret = _shadow_prealloc(d, shadow_size(type) * count);
+ count *= shadow_size(type);
+ /*
+ * Log-dirty handling may result in allocations when populating its
+ * tracking structures. Tie this to the caller requesting space for L1
+ * shadows.
+ */
+ if ( paging_mode_log_dirty(d) &&
+ ((SHF_L1_ANY | SHF_FL1_ANY) & (1u << type)) )
+ count += paging_logdirty_levels();
+
+ ret = _shadow_prealloc(d, count);
if ( !ret && (!d->is_shutting_down || d->shutdown_code != SHUTDOWN_crash) )
/*
* Failing to allocate memory required for shadow usage can only result in
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index 27890791d8..c6b429c691 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -192,6 +192,10 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn);
#define L4_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3 + PAGETABLE_ORDER * 2)) & \
(LOGDIRTY_NODE_ENTRIES-1))
+#define paging_logdirty_levels() \
+ (DIV_ROUND_UP(PADDR_BITS - PAGE_SHIFT - (PAGE_SHIFT + 3), \
+ PAGE_SHIFT - ilog2(sizeof(mfn_t))) + 1)
+
#ifdef CONFIG_HVM
/* VRAM dirty tracking support */
struct sh_dirty_vram {