Quantcast

patch for event driving lsapic interrupt check and injection

classic Classic list List threaded Threaded
1 message Options
Reply | Threaded
Open this post in threaded view
|  
Report Content as Inappropriate

patch for event driving lsapic interrupt check and injection

Dong, Eddie
Dan:
        As previous discussion agreed, the event drivering interrupt
check and injection is better than polling based, the following patch
fix the situation in VT case, I am planning to cover non VTI case with a
seperate variable to indicate the pending interrupt if you want to see
it happen.
        Thanks,eddie


Index: include/asm-ia64/domain.h
===================================================================
--- include/asm-ia64/domain.h (revision 1111)
+++ include/asm-ia64/domain.h (working copy)
@@ -80,6 +80,8 @@
     void (*schedule_tail) (struct exec_domain *);
     struct trap_bounce trap_bounce;
     thash_cb_t *vtlb;
+    char irq_new_pending;
+    char irq_new_condition; // vpsr.i/vtpr change, check for pending
VHPI
     //for phycial  emulation
     unsigned long old_rsc;
     int mode_flags;
Index: include/asm-ia64/vmx_vcpu.h
===================================================================
--- include/asm-ia64/vmx_vcpu.h (revision 1111)
+++ include/asm-ia64/vmx_vcpu.h (working copy)
@@ -401,16 +401,10 @@
     VPD_CR(vcpu,lid)=val;
     return IA64_NO_FAULT;
 }
+extern IA64FAULT vmx_vcpu_set_tpr(VCPU *vcpu, u64 val);
+
 static inline
 IA64FAULT
-vmx_vcpu_set_tpr(VCPU *vcpu, u64 val)
-{
-    VPD_CR(vcpu,tpr)=val;
-    //TODO
-    return IA64_NO_FAULT;
-}
-static inline
-IA64FAULT
 vmx_vcpu_set_eoi(VCPU *vcpu, u64 val)
 {
     guest_write_eoi(vcpu);
Index: include/asm-ia64/xenprocessor.h
===================================================================
--- include/asm-ia64/xenprocessor.h (revision 1111)
+++ include/asm-ia64/xenprocessor.h (working copy)
@@ -166,6 +166,16 @@
     };
 } ipi_d_t;
 
+typedef union {
+    __u64 val;
+    struct {
+        __u64 ig0 : 4;
+        __u64 mic : 4;
+        __u64 rsv : 8;
+        __u64 mmi : 1;
+        __u64 ig1 : 47;
+    };
+} tpr_t;
 
 #define IA64_ISR_CODE_MASK0     0xf
 #define IA64_UNIMPL_DADDR_FAULT     0x30
Index: arch/ia64/vmx_process.c
===================================================================
--- arch/ia64/vmx_process.c (revision 1111)
+++ arch/ia64/vmx_process.c (working copy)
@@ -188,26 +188,20 @@
 // ONLY gets called from ia64_leave_kernel
 // ONLY call with interrupts disabled?? (else might miss one?)
 // NEVER successful if already reflecting a trap/fault because psr.i==0
-void vmx_deliver_pending_interrupt(struct pt_regs *regs)
+void leave_hypervisor_tail(struct pt_regs *regs)
 {
  struct domain *d = current->domain;
  struct exec_domain *ed = current;
  // FIXME: Will this work properly if doing an RFI???
  if (!is_idle_task(d) ) { // always comes from guest
- //vcpu_poke_timer(ed);
- //if (vcpu_deliverable_interrupts(ed)) {
- // unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
- // foodpi();
- //
reflect_interruption(0,isr,0,regs,IA64_EXTINT_VECTOR);
- //}
-        extern void vmx_dorfirfi(void);
  struct pt_regs *user_regs = vcpu_regs(current);
 
  if (user_regs != regs)
  printk("WARNING: checking pending interrupt in
nested interrupt!!!\n");
- if (regs->cr_iip == *(unsigned long *)vmx_dorfirfi)
- return;
- vmx_check_pending_irq(ed);
+ if ( ed->arch.irq_new_pending ) {
+ ed->arch.irq_new_pending = 0;
+ vmx_check_pending_irq(ed);
+ }
  }
 }
 
Index: arch/ia64/asm-offsets.c
===================================================================
--- arch/ia64/asm-offsets.c (revision 1111)
+++ arch/ia64/asm-offsets.c (working copy)
@@ -187,6 +187,7 @@
 
 #ifdef  CONFIG_VTI
  DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct exec_domain,
arch.arch_vmx.vpd));
+ DEFINE(IA64_VLSAPIC_INSVC_BASE_OFFSET, offsetof (struct
exec_domain, arch.arch_vmx.in_service[0]));
  DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta));
  DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t));
 
Index: arch/ia64/vlsapic.c
===================================================================
--- arch/ia64/vlsapic.c (revision 1111)
+++ arch/ia64/vlsapic.c (working copy)
@@ -92,7 +92,7 @@
 {
     vtime_t *vtm;
     VCPU    *vcpu = (VCPU*)data;
-    u64    cur_itc,vitm;
+    u64     cur_itc,vitm;
 
     UINT64  vec;
     
@@ -182,12 +182,12 @@
 /* Interrupt must be disabled at this point */
 
 extern u64 tick_to_ns(u64 tick);
-#define TIMER_SLOP (50*1000) /* ns */ /* copy from ac_timer.c */
+#define TIMER_SLOP (50*1000) /* ns */  /* copy from ac_timer.c */
 void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm)
 {
     uint64_t    cur_itc,vitm,vitv;
     uint64_t    expires;
-    long     diff_now, diff_last;
+    long        diff_now, diff_last;
     uint64_t    spsr;
     
     vitv = VPD_CR(vcpu, itv);
@@ -217,7 +217,7 @@
     else if ( vtm->timer_hooked ) {
         expires = NOW() + tick_to_ns(0-diff_now) + TIMER_SLOP;
         mod_ac_timer (&(vtm->vtm_timer), expires);
- printf("mod vtm_timer\n");
+        DPRINTK("mod vtm_timer\n");
 //fire_itc = cur_itc;
 //fire_itm = vitm;
     }
@@ -262,30 +262,38 @@
     vtm_interruption_update(vcpu, vtm);
 }
 
-
-
 /*
  * Next for vLSapic
  */
 
 #define  NMI_VECTOR         2
 #define  ExtINT_VECTOR      0
-
+#define  NULL_VECTOR        -1
 #define  VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.arch_vmx.in_service[i])
-/*
- * LID-CR64: Keep in vpd.
- * IVR-CR65: (RO) see guest_read_ivr().
- * TPR-CR66: Keep in vpd, acceleration enabled.
- * EOI-CR67: see guest_write_eoi().
- * IRR0-3 - CR68-71: (RO) Keep in vpd irq_pending[]
- *          can move to vpd for optimization.
- * ITV: in time virtualization.
- * PMV: Keep in vpd initialized as 0x10000.
- * CMCV: Keep in vpd initialized as 0x10000.
- * LRR0-1: Keep in vpd, initialized as 0x10000.
- *
- */
 
+static void update_vhpi(VCPU *vcpu, int vec)
+{
+    u64     vhpi;
+    if ( vec == NULL_VECTOR ) {
+        vhpi = 0;
+    }
+    else if ( vec == NMI_VECTOR ) { // NMI
+        vhpi = 32;
+    } else if (vec == ExtINT_VECTOR) { //ExtINT
+        vhpi = 16;
+    }
+    else {
+        vhpi = vec / 16;
+    }
+
+    VMX_VPD(vcpu,vhpi) = vhpi;
+    // TODO: Add support for XENO
+    if ( VMX_VPD(vcpu,vac).a_int ) {
+        ia64_call_vsa ( PAL_VPS_SET_PENDING_INTERRUPT,
+                (uint64_t) &(vcpu->arch.arch_vmx.vpd), 0, 0,0,0,0,0);
+    }
+}
+
 void vlsapic_reset(VCPU *vcpu)
 {
     int     i;
@@ -301,9 +309,11 @@
     VPD_CR(vcpu, cmcv) = 0x10000;
     VPD_CR(vcpu, lrr0) = 0x10000;   // default reset value?
     VPD_CR(vcpu, lrr1) = 0x10000;   // default reset value?
+    update_vhpi(vcpu, NULL_VECTOR);
     for ( i=0; i<4; i++) {
         VLSAPIC_INSVC(vcpu,i) = 0;
     }
+    DPRINTK("VLSAPIC inservice base=%lp\n", &VLSAPIC_INSVC(vcpu,0) );
 }
 
 /*
@@ -314,7 +324,7 @@
  */
 static __inline__ int highest_bits(uint64_t *dat)
 {
-    uint64_t  bits, bitnum=-1;
+    uint64_t  bits, bitnum;
     int i;
     
     /* loop for all 256 bits */
@@ -325,12 +335,12 @@
             return i*64+bitnum;
         }
     }
-   return -1;
+   return NULL_VECTOR;
 }
 
 /*
  * Return 0-255 for pending irq.
- *        -1 when no pending.
+ *        NULL_VECTOR: when no pending.
  */
 static int highest_pending_irq(VCPU *vcpu)
 {
@@ -353,7 +363,7 @@
 static int is_higher_irq(int pending, int inservice)
 {
     return ( (pending >> 4) > (inservice>>4) ||
-                ((pending != -1) && (inservice == -1)) );
+                ((pending != NULL_VECTOR) && (inservice ==
NULL_VECTOR)) );
 }
 
 static int is_higher_class(int pending, int mic)
@@ -366,43 +376,99 @@
     return (vec == 1 || ((vec <= 14 && vec >= 3)));
 }
 
+#define   IRQ_NO_MASKED         0
+#define   IRQ_MASKED_BY_VTPR    1
+#define   IRQ_MASKED_BY_INSVC   2   // masked by inservice IRQ
+
 /* See Table 5-8 in SDM vol2 for the definition */
 static int
-irq_masked(VCPU *vcpu, int h_pending, int h_inservice)
+_xirq_masked(VCPU *vcpu, int h_pending, int h_inservice)
 {
-    uint64_t    vtpr;
+    tpr_t    vtpr;
+    uint64_t    mmi;
     
-    vtpr = VPD_CR(vcpu, tpr);
+    vtpr.val = VPD_CR(vcpu, tpr);
 
-    if ( h_pending == NMI_VECTOR && h_inservice != NMI_VECTOR )
+    if ( h_inservice == NMI_VECTOR ) {
+        return IRQ_MASKED_BY_INSVC;
+    }
+    if ( h_pending == NMI_VECTOR ) {
         // Non Maskable Interrupt
-        return 0;
+        return IRQ_NO_MASKED;
+    }
+    if ( h_inservice == ExtINT_VECTOR ) {
+        return IRQ_MASKED_BY_INSVC;
+    }
+    mmi = vtpr.mmi;
+    if ( h_pending == ExtINT_VECTOR ) {
+        if ( mmi ) {
+            // mask all external IRQ
+            return IRQ_MASKED_BY_VTPR;
+        }
+        else {
+            return IRQ_NO_MASKED;
+        }
+    }
 
-    if ( h_pending == ExtINT_VECTOR && h_inservice >= 16)
-        return (vtpr>>16)&1;    // vtpr.mmi
+    if ( is_higher_irq(h_pending, h_inservice) ) {
+        if ( !mmi && is_higher_class(h_pending, vtpr.mic) ) {
+            return IRQ_NO_MASKED;
+        }
+        else {
+            return IRQ_MASKED_BY_VTPR;
+        }
+    }
+    else {
+        return IRQ_MASKED_BY_INSVC;
+    }
+}
 
-    if ( !(vtpr&(1UL<<16)) &&
-          is_higher_irq(h_pending, h_inservice) &&
-          is_higher_class(h_pending, (vtpr>>4)&0xf) )
-        return 0;
-
-    return 1;
+static int irq_masked(VCPU *vcpu, int h_pending, int h_inservice)
+{
+    int mask;
+    
+    mask = _xirq_masked(vcpu, h_pending, h_inservice);
+    return mask;
 }
 
+
+/*
+ * May come from virtualization fault or
+ * nested host interrupt.
+ */
 void vmx_vcpu_pend_interrupt(VCPU *vcpu, UINT64 vector)
 {
     uint64_t    spsr;
 
     if (vector & ~0xff) {
-        printf("vmx_vcpu_pend_interrupt: bad vector\n");
+        DPRINTK("vmx_vcpu_pend_interrupt: bad vector\n");
         return;
     }
     local_irq_save(spsr);
     VPD_CR(vcpu,irr[vector>>6]) |= 1UL<<(vector&63);
     local_irq_restore(spsr);
+    vcpu->arch.irq_new_pending = 1;
 }
 
 /*
+ * Add batch of pending interrupt.
+ * The interrupt source is contained in pend_irr[0-3] with
+ * each bits stand for one interrupt.
+ */
+void vmx_vcpu_pend_batch_interrupt(VCPU *vcpu, UINT64 *pend_irr)
+{
+    uint64_t    spsr;
+    int     i;
+
+    local_irq_save(spsr);
+    for (i=0 ; i<4; i++ ) {
+        VPD_CR(vcpu,irr[i]) |= pend_irr[i];
+    }
+    local_irq_restore(spsr);
+    vcpu->arch.irq_new_pending = 1;
+}
+
+/*
  * If the new pending interrupt is enabled and not masked, we directly
inject
  * it into the guest. Otherwise, we set the VHPI if vac.a_int=1 so that
when
  * the interrupt becomes unmasked, it gets injected.
@@ -416,7 +482,7 @@
  */
 int vmx_check_pending_irq(VCPU *vcpu)
 {
-    uint64_t  spsr;
+    uint64_t  spsr, mask;
     int     h_pending, h_inservice;
     int injected=0;
     uint64_t    isr;
@@ -424,40 +490,47 @@
 
     local_irq_save(spsr);
     h_pending = highest_pending_irq(vcpu);
-    if ( h_pending == -1 ) goto chk_irq_exit;
+    if ( h_pending == NULL_VECTOR ) goto chk_irq_exit;
     h_inservice = highest_inservice_irq(vcpu);
 
     vpsr.val = vmx_vcpu_get_psr(vcpu);
-    if (  vpsr.i &&
-        !irq_masked(vcpu, h_pending, h_inservice) ) {
-        //inject_guest_irq(v);
+    mask = irq_masked(vcpu, h_pending, h_inservice);
+    if (  vpsr.i && IRQ_NO_MASKED == mask ) {
         isr = vpsr.val & IA64_PSR_RI;
         if ( !vpsr.ic )
             panic("Interrupt when IC=0\n");
         vmx_reflect_interruption(0,isr,0, 12 ); // EXT IRQ
         injected = 1;
     }
-    else if ( VMX_VPD(vcpu,vac).a_int &&
-            is_higher_irq(h_pending,h_inservice) ) {
-        vmx_inject_vhpi(vcpu,h_pending);
+    else if ( mask == IRQ_MASKED_BY_INSVC ) {
+        // cann't inject VHPI
+//        DPRINTK("IRQ masked by higher inservice\n");
     }
+    else {
+        // masked by vpsr.i or vtpr.
+        update_vhpi(vcpu,h_pending);
+    }
 
 chk_irq_exit:
     local_irq_restore(spsr);
     return injected;
 }
 
+/*
+ * Only coming from virtualization fault.
+ */
 void guest_write_eoi(VCPU *vcpu)
 {
     int vec;
     uint64_t  spsr;
 
     vec = highest_inservice_irq(vcpu);
-    if ( vec < 0 ) panic("Wrong vector to EOI\n");
+    if ( vec == NULL_VECTOR ) panic("Wrong vector to EOI\n");
     local_irq_save(spsr);
     VLSAPIC_INSVC(vcpu,vec>>6) &= ~(1UL <<(vec&63));
     local_irq_restore(spsr);
     VPD_CR(vcpu, eoi)=0;    // overwrite the data
+    vmx_check_pending_irq(vcpu);
 }
 
 uint64_t guest_read_vivr(VCPU *vcpu)
@@ -468,37 +541,54 @@
     local_irq_save(spsr);
     vec = highest_pending_irq(vcpu);
     h_inservice = highest_inservice_irq(vcpu);
-    if ( vec < 0 || irq_masked(vcpu, vec, h_inservice) ) {
+    if ( vec == NULL_VECTOR ||
+        irq_masked(vcpu, vec, h_inservice) != IRQ_NO_MASKED ) {
         local_irq_restore(spsr);
         return IA64_SPURIOUS_INT_VECTOR;
     }
 
     VLSAPIC_INSVC(vcpu,vec>>6) |= (1UL <<(vec&63));
     VPD_CR(vcpu, irr[vec>>6]) &= ~(1UL <<(vec&63));
-
-    h_inservice = highest_inservice_irq(vcpu);
-    next = highest_pending_irq(vcpu);
-    if ( VMX_VPD(vcpu,vac).a_int &&
-        (is_higher_irq(next, h_inservice) || (next == -1)) )
-        vmx_inject_vhpi(vcpu, next);
+    update_vhpi(vcpu, NULL_VECTOR);     // clear VHPI till EOI or IRR
write
     local_irq_restore(spsr);
     return (uint64_t)vec;
 }
 
-void vmx_inject_vhpi(VCPU *vcpu, u8 vec)
+static void generate_exirq(VCPU *vcpu)
 {
-        VMX_VPD(vcpu,vhpi) = vec / 16;
+    IA64_PSR    vpsr;
+    uint64_t    isr;
+    
+    vpsr.val = vmx_vcpu_get_psr(vcpu);
+    update_vhpi(vcpu, NULL_VECTOR);
+    isr = vpsr.val & IA64_PSR_RI;
+    if ( !vpsr.ic )
+        panic("Interrupt when IC=0\n");
+    vmx_reflect_interruption(0,isr,0, 12 ); // EXT IRQ
+}
 
+vhpi_detection(VCPU *vcpu)
+{
+    uint64_t    threshold,vhpi;
+    tpr_t       vtpr;
+    IA64_PSR    vpsr;
+    
+    vpsr.val = vmx_vcpu_get_psr(vcpu);
+    vtpr.val = VPD_CR(vcpu, tpr);
 
-        // non-maskable
-        if ( vec == NMI_VECTOR ) // NMI
-                VMX_VPD(vcpu,vhpi) = 32;
-        else if (vec == ExtINT_VECTOR) //ExtINT
-                VMX_VPD(vcpu,vhpi) = 16;
-        else if (vec == -1)
-                VMX_VPD(vcpu,vhpi) = 0; /* Nothing pending */
+    threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
+    vhpi = VMX_VPD(vcpu,vhpi);
+    if ( vhpi > threshold ) {
+        // interrupt actived
+        generate_exirq (vcpu);
+    }
+}
 
-        ia64_call_vsa ( PAL_VPS_SET_PENDING_INTERRUPT,
-            (uint64_t) &(vcpu->arch.arch_vmx.vpd), 0, 0,0,0,0,0);
+vmx_vexirq(VCPU *vcpu)
+{
+    static  uint64_t  vexirq_count=0;
+
+    vexirq_count ++;
+    printk("Virtual ex-irq %ld\n", vexirq_count);
+    generate_exirq (vcpu);
 }
-
Index: arch/ia64/vmx_vcpu.c
===================================================================
--- arch/ia64/vmx_vcpu.c (revision 1111)
+++ arch/ia64/vmx_vcpu.c (working copy)
@@ -23,8 +23,6 @@
  *  Xuefei Xu (Anthony Xu) ([hidden email])
  */
 
-
-
 #include <linux/sched.h>
 #include <public/arch-ia64.h>
 #include <asm/ia64_int.h>
@@ -71,8 +69,8 @@
 
 //unsigned long last_guest_rsm = 0x0;
 struct guest_psr_bundle{
- unsigned long ip;
- unsigned long psr;
+    unsigned long ip;
+    unsigned long psr;
 };
 
 struct guest_psr_bundle guest_psr_buf[100];
@@ -107,20 +105,24 @@
                 IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA
             ));
 
+    if ( !old_psr.i && (value & IA64_PSR_I) ) {
+        // vpsr.i 0->1
+        vcpu->arch.irq_new_condition = 1;
+    }
     new_psr.val=vmx_vcpu_get_psr(vcpu);
     {
- struct xen_regs *regs = vcpu_regs(vcpu);
- guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
- guest_psr_buf[guest_psr_index].psr = new_psr.val;
- if (++guest_psr_index >= 100)
-    guest_psr_index = 0;
+    struct xen_regs *regs = vcpu_regs(vcpu);
+    guest_psr_buf[guest_psr_index].ip = regs->cr_iip;
+    guest_psr_buf[guest_psr_index].psr = new_psr.val;
+    if (++guest_psr_index >= 100)
+        guest_psr_index = 0;
     }
 #if 0
     if (old_psr.i != new_psr.i) {
- if (old_psr.i)
- last_guest_rsm = vcpu_regs(vcpu)->cr_iip;
- else
- last_guest_rsm = 0;
+    if (old_psr.i)
+        last_guest_rsm = vcpu_regs(vcpu)->cr_iip;
+    else
+        last_guest_rsm = 0;
     }
 #endif
 
@@ -270,8 +272,8 @@
 {
      va &= ~ (PSIZE(ps)-1);
      if ( va == 0x2000000002908000UL ||
-  va == 0x600000000000C000UL ) {
- stop();
+      va == 0x600000000000C000UL ) {
+    stop();
      }
      if (tlb_debug) printf("%s at %lx %lx\n", str, va, 1UL<<ps);
 }
@@ -433,4 +435,11 @@
     return IA64_NO_FAULT;
 }
 
+IA64FAULT
+vmx_vcpu_set_tpr(VCPU *vcpu, u64 val)
+{
+    VPD_CR(vcpu,tpr)=val;
+    vcpu->arch.irq_new_condition = 1;
+    return IA64_NO_FAULT;
+}
 
Index: arch/ia64/vmx_ivt.S
===================================================================
--- arch/ia64/vmx_ivt.S (revision 1111)
+++ arch/ia64/vmx_ivt.S (working copy)
@@ -503,10 +503,13 @@
  .org vmx_ia64_ivt+0x3400
 
////////////////////////////////////////////////////////////////////////
/////////////////
 // 0x3400 Entry 13 (size 64 bundles) Reserved
+ENTRY(vmx_virtual_exirq)
  VMX_DBG_FAULT(13)
- VMX_FAULT(13)
+ mov r31=pr
+        mov r19=13
+        br.sptk vmx_dispatch_vexirq
+END(vmx_virtual_exirq)
 
-
  .org vmx_ia64_ivt+0x3800
 
////////////////////////////////////////////////////////////////////////
/////////////////
 // 0x3800 Entry 14 (size 64 bundles) Reserved
@@ -905,7 +908,25 @@
 END(vmx_dispatch_virtualization_fault)
 
 
+ENTRY(vmx_dispatch_vexirq)
+    VMX_SAVE_MIN_WITH_COVER_R19
+    alloc r14=ar.pfs,0,0,1,0
+    mov out0=r13
 
+    ssm psr.ic
+    ;;
+    srlz.i                  // guarantee that interruption collection
is on
+    ;;
+    ssm psr.i               // restore psr.i
+    adds r3=16,r2                // set up second base pointer
+    ;;
+    VMX_SAVE_REST
+    movl r14=ia64_leave_hypervisor
+    ;;
+    mov rp=r14
+    br.call.sptk.many b6=vmx_vexirq
+END(vmx_dispatch_vexirq)
+
 ENTRY(vmx_dispatch_tlb_miss)
     VMX_SAVE_MIN_WITH_COVER_R19
     alloc r14=ar.pfs,0,0,3,0
Index: arch/ia64/vmx_virt.c
===================================================================
--- arch/ia64/vmx_virt.c (revision 1111)
+++ arch/ia64/vmx_virt.c (working copy)
@@ -1276,8 +1276,14 @@
 }
 
 
+static void post_emulation_action(VCPU *vcpu)
+{
+    if ( vcpu->arch.irq_new_condition ) {
+        vcpu->arch.irq_new_condition = 0;
+        vhpi_detection(vcpu);
+    }
+}
 
-
 //#define  BYPASS_VMAL_OPCODE
 extern IA64_SLOT_TYPE  slot_types[0x20][3];
 IA64_BUNDLE __vmx_get_domain_bundle(u64 iip)
@@ -1494,6 +1500,7 @@
     }
 
     recover_if_physical_mode(vcpu);
+    post_emulation_action (vcpu);
 //TODO    set_irq_check(v);
     return;
 
Index: arch/ia64/vmx_entry.S
===================================================================
--- arch/ia64/vmx_entry.S (revision 1111)
+++ arch/ia64/vmx_entry.S (working copy)
@@ -217,7 +217,7 @@
     alloc loc0=ar.pfs,0,1,1,0
     adds out0=16,r12
     ;;
-    br.call.sptk.many b0=vmx_deliver_pending_interrupt
+    br.call.sptk.many b0=leave_hypervisor_tail
     mov ar.pfs=loc0
     adds r8=IA64_VPD_BASE_OFFSET,r13
     ;;

_______________________________________________
Xen-ia64-devel mailing list
[hidden email]
http://lists.xensource.com/xen-ia64-devel
Loading...