[xen staging] x86/HVM: move vendor independent CPU save/restore logic to shared code

classic Classic list List threaded Threaded
1 message Options
Reply | Threaded
Open this post in threaded view
|

[xen staging] x86/HVM: move vendor independent CPU save/restore logic to shared code

patchbot
commit 27225bbb6d148f6d373a4a89b0dc3f3f5bf4d193
Author:     Jan Beulich <[hidden email]>
AuthorDate: Tue Oct 9 16:25:35 2018 +0200
Commit:     Jan Beulich <[hidden email]>
CommitDate: Tue Oct 9 16:25:35 2018 +0200

    x86/HVM: move vendor independent CPU save/restore logic to shared code
   
    A few pieces of the handling here are (no longer?) vendor specific, and
    hence there's no point in replicating the code. Zero the full structure
    before calling the save hook, eliminating the need for the hook
    functions to zero individual fields.
   
    Signed-off-by: Jan Beulich <[hidden email]>
    Acked-by: Razvan Cojocaru <[hidden email]>
    Reviewed-by: Andrew Cooper <[hidden email]>
    Reviewed-by: Boris Ostrovsky <[hidden email]
    Reviewed-by: Kevin Tian <[hidden email]>
---
 xen/arch/x86/hvm/hvm.c     |  8 ++++++++
 xen/arch/x86/hvm/svm/svm.c | 11 -----------
 xen/arch/x86/hvm/vmx/vmx.c | 12 ------------
 xen/arch/x86/vm_event.c    | 12 ++++++------
 4 files changed, 14 insertions(+), 29 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 6c1301df42..9c105ff056 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -787,12 +787,17 @@ static int hvm_save_cpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
         .r13 = v->arch.user_regs.r13,
         .r14 = v->arch.user_regs.r14,
         .r15 = v->arch.user_regs.r15,
+        .cr0 = v->arch.hvm.guest_cr[0],
+        .cr2 = v->arch.hvm.guest_cr[2],
+        .cr3 = v->arch.hvm.guest_cr[3],
+        .cr4 = v->arch.hvm.guest_cr[4],
         .dr0 = v->arch.debugreg[0],
         .dr1 = v->arch.debugreg[1],
         .dr2 = v->arch.debugreg[2],
         .dr3 = v->arch.debugreg[3],
         .dr6 = v->arch.debugreg[6],
         .dr7 = v->arch.debugreg[7],
+        .msr_efer = v->arch.hvm.guest_efer,
     };
 
     /*
@@ -1023,6 +1028,9 @@ static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
     if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
         return -EINVAL;
 
+    v->arch.hvm.guest_cr[2] = ctxt.cr2;
+    hvm_update_guest_cr(v, 2);
+
     if ( hvm_funcs.tsc_scaling.setup )
         hvm_funcs.tsc_scaling.setup(v);
 
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index c98cfc2c13..fa18cc07fd 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -272,17 +272,10 @@ static int svm_vmcb_save(struct vcpu *v, struct hvm_hw_cpu *c)
 {
     struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
-    c->cr0 = v->arch.hvm.guest_cr[0];
-    c->cr2 = v->arch.hvm.guest_cr[2];
-    c->cr3 = v->arch.hvm.guest_cr[3];
-    c->cr4 = v->arch.hvm.guest_cr[4];
-
     c->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs;
     c->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp;
     c->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip;
 
-    c->pending_event = 0;
-    c->error_code = 0;
     if ( vmcb->eventinj.fields.v &&
          hvm_event_needs_reinjection(vmcb->eventinj.fields.type,
                                      vmcb->eventinj.fields.vector) )
@@ -341,11 +334,9 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
     }
 
     v->arch.hvm.guest_cr[0] = c->cr0 | X86_CR0_ET;
-    v->arch.hvm.guest_cr[2] = c->cr2;
     v->arch.hvm.guest_cr[3] = c->cr3;
     v->arch.hvm.guest_cr[4] = c->cr4;
     svm_update_guest_cr(v, 0, 0);
-    svm_update_guest_cr(v, 2, 0);
     svm_update_guest_cr(v, 4, 0);
 
     /* Load sysenter MSRs into both VMCB save area and VCPU fields. */
@@ -387,8 +378,6 @@ static void svm_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
     data->msr_star         = vmcb->star;
     data->msr_cstar        = vmcb->cstar;
     data->msr_syscall_mask = vmcb->sfmask;
-    data->msr_efer         = v->arch.hvm.guest_efer;
-    data->msr_flags        = 0;
 }
 
 
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index bf90e22a9a..c85aa62ce7 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -646,19 +646,10 @@ static void vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c)
 
     vmx_vmcs_enter(v);
 
-    c->cr0 = v->arch.hvm.guest_cr[0];
-    c->cr2 = v->arch.hvm.guest_cr[2];
-    c->cr3 = v->arch.hvm.guest_cr[3];
-    c->cr4 = v->arch.hvm.guest_cr[4];
-
-    c->msr_efer = v->arch.hvm.guest_efer;
-
     __vmread(GUEST_SYSENTER_CS, &c->sysenter_cs);
     __vmread(GUEST_SYSENTER_ESP, &c->sysenter_esp);
     __vmread(GUEST_SYSENTER_EIP, &c->sysenter_eip);
 
-    c->pending_event = 0;
-    c->error_code = 0;
     __vmread(VM_ENTRY_INTR_INFO, &ev);
     if ( (ev & INTR_INFO_VALID_MASK) &&
          hvm_event_needs_reinjection(MASK_EXTR(ev, INTR_INFO_INTR_TYPE_MASK),
@@ -732,10 +723,8 @@ static int vmx_vmcs_restore(struct vcpu *v, struct hvm_hw_cpu *c)
 
     vmx_vmcs_enter(v);
 
-    v->arch.hvm.guest_cr[2] = c->cr2;
     v->arch.hvm.guest_cr[4] = c->cr4;
     vmx_update_guest_cr(v, 0, 0);
-    vmx_update_guest_cr(v, 2, 0);
     vmx_update_guest_cr(v, 4, 0);
 
     v->arch.hvm.guest_efer = c->msr_efer;
@@ -770,7 +759,6 @@ static int vmx_vmcs_restore(struct vcpu *v, struct hvm_hw_cpu *c)
 static void vmx_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
 {
     data->shadow_gs        = v->arch.hvm.vmx.shadow_gs;
-    data->msr_flags        = 0;
     data->msr_lstar        = v->arch.hvm.vmx.lstar;
     data->msr_star         = v->arch.hvm.vmx.star;
     data->msr_cstar        = v->arch.hvm.vmx.cstar;
diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
index a2e470a65b..15de43c3e6 100644
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -127,7 +127,7 @@ void vm_event_fill_regs(vm_event_request_t *req)
 #ifdef CONFIG_HVM
     const struct cpu_user_regs *regs = guest_cpu_user_regs();
     struct segment_register seg;
-    struct hvm_hw_cpu ctxt;
+    struct hvm_hw_cpu ctxt = {};
     struct vcpu *curr = current;
 
     ASSERT(is_hvm_vcpu(curr));
@@ -157,16 +157,16 @@ void vm_event_fill_regs(vm_event_request_t *req)
     req->data.regs.x86.rip    = regs->rip;
 
     req->data.regs.x86.dr7 = curr->arch.debugreg[7];
-    req->data.regs.x86.cr0 = ctxt.cr0;
-    req->data.regs.x86.cr2 = ctxt.cr2;
-    req->data.regs.x86.cr3 = ctxt.cr3;
-    req->data.regs.x86.cr4 = ctxt.cr4;
+    req->data.regs.x86.cr0 = curr->arch.hvm.guest_cr[0];
+    req->data.regs.x86.cr2 = curr->arch.hvm.guest_cr[2];
+    req->data.regs.x86.cr3 = curr->arch.hvm.guest_cr[3];
+    req->data.regs.x86.cr4 = curr->arch.hvm.guest_cr[4];
 
     req->data.regs.x86.sysenter_cs = ctxt.sysenter_cs;
     req->data.regs.x86.sysenter_esp = ctxt.sysenter_esp;
     req->data.regs.x86.sysenter_eip = ctxt.sysenter_eip;
 
-    req->data.regs.x86.msr_efer = ctxt.msr_efer;
+    req->data.regs.x86.msr_efer = curr->arch.hvm.guest_efer;
     req->data.regs.x86.msr_star = ctxt.msr_star;
     req->data.regs.x86.msr_lstar = ctxt.msr_lstar;
 
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
[hidden email]
https://lists.xenproject.org/xen-changelog