summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlejandro Vallejo <alejandro.vallejo@cloud.com>2023-05-16 17:18:31 +0200
committerJan Beulich <jbeulich@suse.com>2023-05-16 17:18:31 +0200
commit1240932a8d3174849a144f395eb858c755a6a297 (patch)
tree010c3cc7b53f96045fb59bfd6bc2c2cbba155cc8
parenteda98ea870803ea204a1928519b3f21ec6a679b6 (diff)
downloadxen-1240932a8d3174849a144f395eb858c755a6a297.tar.gz
x86: Refactor conditional guard in probe_cpuid_faulting()
Move vendor-specific checks to the vendor-specific callers. While at it move the synth cap setters to the callers too, as it's needed for a later patch and it's not a functional change either. No functional change. Signed-off-by: Alejandro Vallejo <alejandro.vallejo@cloud.com> Reviewed-by: Jan Beulich <jbeulich@suse.com>
-rw-r--r--xen/arch/x86/cpu/amd.c13
-rw-r--r--xen/arch/x86/cpu/common.c13
-rw-r--r--xen/arch/x86/cpu/intel.c12
3 files changed, 23 insertions, 15 deletions
diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 9a1a3858ed..98fb80ee88 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -271,8 +271,19 @@ static void __init noinline amd_init_levelling(void)
{
const struct cpuidmask *m = NULL;
- if (probe_cpuid_faulting())
+ /*
+ * If there's support for CpuidUserDis or CPUID faulting then
+ * we can skip levelling because CPUID accesses are trapped anyway.
+ *
+ * CPUID faulting is an Intel feature analogous to CpuidUserDis, so
+ * that can only be present when Xen is itself virtualized (because
+ * it can be emulated)
+ */
+ if (cpu_has_hypervisor && probe_cpuid_faulting()) {
+ expected_levelling_cap |= LCAP_faulting;
+ levelling_caps |= LCAP_faulting;
return;
+ }
probe_masking_msrs();
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index edc4db1335..52646f7dfb 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -131,17 +131,6 @@ bool __init probe_cpuid_faulting(void)
uint64_t val;
int rc;
- /*
- * Don't bother looking for CPUID faulting if we aren't virtualised on
- * AMD or Hygon hardware - it won't be present. Likewise for Fam0F
- * Intel hardware.
- */
- if (((boot_cpu_data.x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) ||
- ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
- boot_cpu_data.x86 == 0xf)) &&
- !cpu_has_hypervisor)
- return false;
-
if ((rc = rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val)) == 0)
raw_cpu_policy.platform_info.cpuid_faulting =
val & MSR_PLATFORM_INFO_CPUID_FAULTING;
@@ -155,8 +144,6 @@ bool __init probe_cpuid_faulting(void)
return false;
}
- expected_levelling_cap |= LCAP_faulting;
- levelling_caps |= LCAP_faulting;
setup_force_cpu_cap(X86_FEATURE_CPUID_FAULTING);
return true;
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index 71fc1a1e18..168cd58f36 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -226,8 +226,18 @@ static void cf_check intel_ctxt_switch_masking(const struct vcpu *next)
*/
static void __init noinline intel_init_levelling(void)
{
- if (probe_cpuid_faulting())
+ /*
+ * Intel Fam0f is old enough that probing for CPUID faulting support
+ * introduces spurious #GP(0) when the appropriate MSRs are read,
+ * so skip it altogether. In the case where Xen is virtualized these
+ * MSRs may be emulated though, so we allow it in that case.
+ */
+ if ((boot_cpu_data.x86 != 0xf || cpu_has_hypervisor) &&
+ probe_cpuid_faulting()) {
+ expected_levelling_cap |= LCAP_faulting;
+ levelling_caps |= LCAP_faulting;
return;
+ }
probe_masking_msrs();