[xen master] x86/msr: Drop {MISC_ENABLES, PLATFORM_INFO}.available

classic Classic list List threaded Threaded
1 message Options
Reply | Threaded
Open this post in threaded view
|

[xen master] x86/msr: Drop {MISC_ENABLES, PLATFORM_INFO}.available

patchbot
commit 2df1d2ba132ff5e5d997af264f458c9182f08a26
Author:     Andrew Cooper <[hidden email]>
AuthorDate: Wed Jun 27 11:34:47 2018 +0000
Commit:     Andrew Cooper <[hidden email]>
CommitDate: Mon Jul 2 18:04:21 2018 +0100

    x86/msr: Drop {MISC_ENABLES,PLATFORM_INFO}.available
   
    These MSRs are non-architectural and the available booleans were used in lieu
    of an architectural signal of availability.
   
    However, in hindsight, the additional booleans make toolstack MSR interactions
    more complicated.  The MSRs are unconditionally available to HVM guests, but
    currently for PV guests, are hidden when CPUID faulting is unavailable.
    Instead, switch them to being unconditionally readable, even for PV guests.
   
    The new behaviour is:
      * PLATFORM_INFO is unconditionally readable even for PV guests and will
        indicate the presence or absence of CPUID Faulting in bit 31.
      * MISC_FEATURES_ENABLES is unconditionally readable, and bit 0 may be set
        iff PLATFORM_INFO reports that CPUID Faulting is available.
   
    As a minor bugfix, CPUID Faulting for HVM guests is not restricted to
    Intel/AMD hardware.  In particular, VIA have a VT-x implementaion conforming
    to the Intel specification.
   
    Signed-off-by: Andrew Cooper <[hidden email]>
    Reviewed-by: Sergey Dyasli <[hidden email]>
    Reviewed-by: Jan Beulich <[hidden email]>
    Reviewed-by: Roger Pau Monné <[hidden email]>
---
 xen/arch/x86/cpu/common.c |  9 ++-------
 xen/arch/x86/msr.c        | 30 +-----------------------------
 xen/include/asm-x86/msr.h | 19 +++++++++++++++----
 3 files changed, 18 insertions(+), 40 deletions(-)

diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index aa8a21e5b8..bdd45c30fb 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -115,13 +115,8 @@ bool __init probe_cpuid_faulting(void)
  int rc;
 
  if ((rc = rdmsr_safe(MSR_INTEL_PLATFORM_INFO, val)) == 0)
- {
- struct msr_domain_policy *dp = &raw_msr_domain_policy;
-
- dp->plaform_info.available = true;
- if (val & MSR_PLATFORM_INFO_CPUID_FAULTING)
- dp->plaform_info.cpuid_faulting = true;
- }
+ raw_msr_domain_policy.plaform_info.cpuid_faulting =
+ val & MSR_PLATFORM_INFO_CPUID_FAULTING;
 
  if (rc ||
     !(val & MSR_PLATFORM_INFO_CPUID_FAULTING) ||
diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index 1e12ccb729..6599f10d32 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -54,35 +54,21 @@ static void __init calculate_host_policy(void)
 static void __init calculate_hvm_max_policy(void)
 {
     struct msr_domain_policy *dp = &hvm_max_msr_domain_policy;
-    struct msr_vcpu_policy *vp = &hvm_max_msr_vcpu_policy;
 
     if ( !hvm_enabled )
         return;
 
     *dp = host_msr_domain_policy;
 
-    /* 0x000000ce  MSR_INTEL_PLATFORM_INFO */
     /* It's always possible to emulate CPUID faulting for HVM guests */
-    if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
-         boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
-    {
-        dp->plaform_info.available = true;
-        dp->plaform_info.cpuid_faulting = true;
-    }
-
-    /* 0x00000140  MSR_INTEL_MISC_FEATURES_ENABLES */
-    vp->misc_features_enables.available = dp->plaform_info.cpuid_faulting;
+    dp->plaform_info.cpuid_faulting = true;
 }
 
 static void __init calculate_pv_max_policy(void)
 {
     struct msr_domain_policy *dp = &pv_max_msr_domain_policy;
-    struct msr_vcpu_policy *vp = &pv_max_msr_vcpu_policy;
 
     *dp = host_msr_domain_policy;
-
-    /* 0x00000140  MSR_INTEL_MISC_FEATURES_ENABLES */
-    vp->misc_features_enables.available = dp->plaform_info.cpuid_faulting;
 }
 
 void __init init_guest_msr_policy(void)
@@ -107,10 +93,7 @@ int init_domain_msr_policy(struct domain *d)
 
     /* See comment in intel_ctxt_switch_levelling() */
     if ( is_control_domain(d) )
-    {
-        dp->plaform_info.available = false;
         dp->plaform_info.cpuid_faulting = false;
-    }
 
     d->arch.msr = dp;
 
@@ -130,10 +113,6 @@ int init_vcpu_msr_policy(struct vcpu *v)
     *vp = is_pv_domain(d) ? pv_max_msr_vcpu_policy :
                             hvm_max_msr_vcpu_policy;
 
-    /* See comment in intel_ctxt_switch_levelling() */
-    if ( is_control_domain(d) )
-        vp->misc_features_enables.available = false;
-
     v->arch.msr = vp;
 
     return 0;
@@ -160,8 +139,6 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
         break;
 
     case MSR_INTEL_PLATFORM_INFO:
-        if ( !dp->plaform_info.available )
-            goto gp_fault;
         *val = (uint64_t)dp->plaform_info.cpuid_faulting <<
                _MSR_PLATFORM_INFO_CPUID_FAULTING;
         break;
@@ -171,8 +148,6 @@ int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
         goto gp_fault;
 
     case MSR_INTEL_MISC_FEATURES_ENABLES:
-        if ( !vp->misc_features_enables.available )
-            goto gp_fault;
         *val = (uint64_t)vp->misc_features_enables.cpuid_faulting <<
                _MSR_MISC_FEATURES_CPUID_FAULTING;
         break;
@@ -258,9 +233,6 @@ int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
     {
         bool old_cpuid_faulting = vp->misc_features_enables.cpuid_faulting;
 
-        if ( !vp->misc_features_enables.available )
-            goto gp_fault;
-
         rsvd = ~0ull;
         if ( dp->plaform_info.cpuid_faulting )
             rsvd &= ~MSR_MISC_FEATURES_CPUID_FAULTING;
diff --git a/xen/include/asm-x86/msr.h b/xen/include/asm-x86/msr.h
index f14f265aa5..627b7ced93 100644
--- a/xen/include/asm-x86/msr.h
+++ b/xen/include/asm-x86/msr.h
@@ -260,9 +260,15 @@ static inline void wrmsr_tsc_aux(uint32_t val)
 /* MSR policy object for shared per-domain MSRs */
 struct msr_domain_policy
 {
-    /* 0x000000ce  MSR_INTEL_PLATFORM_INFO */
+    /*
+     * 0x000000ce - MSR_INTEL_PLATFORM_INFO
+     *
+     * This MSR is non-architectural, but for simplicy we allow it to be read
+     * unconditionally.  CPUID Faulting support can be fully emulated for HVM
+     * guests so can be offered unconditionally, while support for PV guests
+     * is dependent on real hardware support.
+     */
     struct {
-        bool available; /* This MSR is non-architectural */
         bool cpuid_faulting;
     } plaform_info;
 };
@@ -288,9 +294,14 @@ struct msr_vcpu_policy
         uint32_t raw;
     } spec_ctrl;
 
-    /* 0x00000140  MSR_INTEL_MISC_FEATURES_ENABLES */
+    /*
+     * 0x00000140 - MSR_INTEL_MISC_FEATURES_ENABLES
+     *
+     * This MSR is non-architectural, but for simplicy we allow it to be read
+     * unconditionally.  The CPUID Faulting bit is the only writeable bit, and
+     * only if enumerated by MSR_PLATFORM_INFO.
+     */
     struct {
-        bool available; /* This MSR is non-architectural */
         bool cpuid_faulting;
     } misc_features_enables;
 };
--
generated by git-patchbot for /home/xen/git/xen.git#master


_______________________________________________
Xen-changelog mailing list
[hidden email]
https://lists.xenproject.org/xen-changelog