PERFORCE change 89314 for review

Kip Macy kmacy at FreeBSD.org
Fri Jan 6 17:56:38 PST 2006


http://perforce.freebsd.org/chv.cgi?CH=89314

Change 89314 by kmacy at kmacy:freebsd7_xen3 on 2006/01/07 01:56:08

	update to event channel management to handle physical irqs
	this another small piece for dom0 support

Affected files ...

.. //depot/projects/xen3/src/sys/i386-xen/i386-xen/evtchn.c#2 edit

Differences ...

==== //depot/projects/xen3/src/sys/i386-xen/i386-xen/evtchn.c#2 (text+ko) ====

@@ -3,7 +3,8 @@
  * 
  * Communication via Xen event channels.
  * 
- * Copyright (c) 2002-2004, K A Fraser
+ * Copyright (c) 2002-2005, K A Fraser
+ * Copyright (c) 2005-2006 Kip Macy
  */
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -24,6 +25,47 @@
 
 
 
+/* linux helper functions that got sucked in 
+ * rename and move XXX
+ */
+
+
+#define BITS_PER_LONG 32
+#define NR_CPUS      MAX_VIRT_CPUS
+
+#define BITS_TO_LONGS(bits) \
+	(((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
+#define DECLARE_BITMAP(name,bits) \
+	unsigned long name[BITS_TO_LONGS(bits)]
+typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } xen_cpumask_t;
+static inline int find_first_bit(const unsigned long *addr, unsigned size)
+{
+	int d0, d1;
+	int res;
+
+	/* This looks at memory. Mark it volatile to tell gcc not to move it around */
+	__asm__ __volatile__(
+		"xorl %%eax,%%eax\n\t"
+		"repe; scasl\n\t"
+		"jz 1f\n\t"
+		"leal -4(%%edi),%%edi\n\t"
+		"bsfl (%%edi),%%eax\n"
+		"1:\tsubl %%ebx,%%edi\n\t"
+		"shll $3,%%edi\n\t"
+		"addl %%edi,%%eax"
+		:"=a" (res), "=&c" (d0), "=&D" (d1)
+		:"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory");
+	return res;
+}
+
+#define min_t(type,x,y) \
+	({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
+#define first_cpu(src) __first_cpu(&(src), NR_CPUS)
+static inline int __first_cpu(const xen_cpumask_t *srcp, int nbits)
+{
+	return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
+}
+
 static inline unsigned long __ffs(unsigned long word)
 {
         __asm__("bsfl %1,%0"
@@ -33,7 +75,24 @@
 }
 
 static struct mtx irq_mapping_update_lock;
+static struct xenpic *xp;
+struct xenpic_intsrc {
+	struct intsrc     xp_intsrc;
+	uint8_t           xp_vector;
+	boolean_t	  xp_masked;
+};
+
+struct xenpic { 
+	struct pic           *xp_dynirq_pic; 
+	struct pic           *xp_pirq_pic;   
+	uint16_t             xp_numintr; 
+	struct xenpic_intsrc xp_pins[0]; 
+}; 
+
+
 
+
+
 #define TODO            printf("%s: not implemented!\n", __func__) 
 
 /* IRQ <-> event-channel mappings. */
@@ -62,12 +121,13 @@
 #endif
 DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
 
+/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
+static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
+
 /* Reference counts for bindings to IRQs. */
 static int irq_bindcount[NR_IRQS];
 
 #define VALID_EVTCHN(_chn) ((_chn) != 0)
-#define BITS_PER_LONG 32
-#define NR_CPUS      MAX_VIRT_CPUS
 
 #ifdef CONFIG_SMP
 
@@ -179,7 +239,7 @@
 {
 	int irq;
 
-	mtx_lock(&irq_mapping_update_lock);
+	mtx_lock_spin(&irq_mapping_update_lock);
 	
 	if ((irq = evtchn_to_irq[evtchn]) == -1) {
 		irq = find_unbound_irq();
@@ -188,9 +248,10 @@
 	}
 
 	irq_bindcount[irq]++;
+	intr_register_source(&xp->xp_pins[irq].xp_intsrc);
+
+	mtx_unlock_spin(&irq_mapping_update_lock);
     
-	mtx_unlock(&irq_mapping_update_lock);
-    
 	return irq;
 }
 
@@ -200,7 +261,7 @@
 	evtchn_op_t op;
 	int evtchn, irq;
 
-	mtx_lock(&irq_mapping_update_lock);
+	mtx_lock_spin(&irq_mapping_update_lock);
 
 	if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
 		op.cmd              = EVTCHNOP_bind_virq;
@@ -221,7 +282,7 @@
 
 	irq_bindcount[irq]++;
 
-	mtx_unlock(&irq_mapping_update_lock);
+	mtx_unlock_spin(&irq_mapping_update_lock);
 
 	return irq;
 }
@@ -232,7 +293,7 @@
 	evtchn_op_t op;
 	int evtchn, irq;
 	
-	mtx_lock(&irq_mapping_update_lock);
+	mtx_lock_spin(&irq_mapping_update_lock);
 	
 	if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
 		op.u.bind_ipi.vcpu = cpu;
@@ -251,7 +312,7 @@
 
 	irq_bindcount[irq]++;
 
-	mtx_unlock(&irq_mapping_update_lock);
+	mtx_unlock_spin(&irq_mapping_update_lock);
 
 	return irq;
 }
@@ -263,7 +324,7 @@
 	evtchn_op_t op = { .cmd = EVTCHNOP_close };
 	int evtchn = evtchn_from_irq(irq);
 
-	mtx_lock(&irq_mapping_update_lock);
+	mtx_lock_spin(&irq_mapping_update_lock);
 
 	if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
 		op.u.close.port = evtchn;
@@ -287,12 +348,9 @@
 		irq_info[irq] = IRQ_UNBOUND;
 	}
 
-	mtx_unlock(&irq_mapping_update_lock);
+	mtx_unlock_spin(&irq_mapping_update_lock);
 }
 
-
-
-
 int 
 bind_evtchn_to_irqhandler(unsigned int evtchn,
 			  const char *devname,
@@ -304,6 +362,7 @@
 	int retval;
 
 	irq = bind_evtchn_to_irq(evtchn);
+	intr_register_source(&xp->xp_pins[irq].xp_intsrc);
 	retval = intr_add_handler(devname, irq, handler, arg, irqflags, NULL);
 	if (retval != 0) {
 		unbind_from_irq(irq);
@@ -324,6 +383,7 @@
 	int retval;
 
 	irq = bind_virq_to_irq(virq, cpu);
+	intr_register_source(&xp->xp_pins[irq].xp_intsrc);
 	retval = intr_add_handler(devname, irq, handler, NULL, irqflags, NULL);
 	if (retval != 0) {
 		unbind_from_irq(irq);
@@ -353,9 +413,6 @@
 	return irq;
 }
 
-
-
-
 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
 {
 #ifdef notyet
@@ -371,11 +428,11 @@
 	evtchn_op_t op = { .cmd = EVTCHNOP_bind_vcpu };
 	int evtchn;
 
-	mtx_lock(&irq_mapping_update_lock);
+	mtx_lock_spin(&irq_mapping_update_lock);
 
 	evtchn = evtchn_from_irq(irq);
 	if (!VALID_EVTCHN(evtchn)) {
-		mtx_unlock(&irq_mapping_update_lock);
+		mtx_unlock_spin(&irq_mapping_update_lock);
 		return;
 	}
 
@@ -391,30 +448,17 @@
 	if (HYPERVISOR_event_channel_op(&op) >= 0)
 		bind_evtchn_to_cpu(evtchn, tcpu);
 
-	mtx_unlock(&irq_mapping_update_lock);
+	mtx_unlock_spin(&irq_mapping_update_lock);
 
-	/*
-	 * Now send the new target processor a NOP IPI. When this returns, it
-	 * will check for any pending interrupts, and so service any that got 
-	 * delivered to the wrong processor by mistake.
-	 * 
-	 * XXX: The only time this is called with interrupts disabled is from
-	 * the hotplug/hotunplug path. In that case, all cpus are stopped with 
-	 * interrupts disabled, and the missed interrupts will be picked up
-	 * when they start again. This is kind of a hack.
-	 */
-#if 0
-	if (!irqs_disabled())
-		smp_call_function(do_nothing_function, NULL, 0, 0);
-#endif
 }
 
-static void set_affinity_irq(unsigned irq, cpumask_t dest)
+static void set_affinity_irq(unsigned irq, xen_cpumask_t dest)
 {
 	unsigned tcpu = first_cpu(dest);
 	rebind_irq_to_cpu(irq, tcpu);
 }
 #endif
+
 /*
  * Interface to generic handling in intr_machdep.c
  */
@@ -423,114 +467,129 @@
 /*------------ interrupt handling --------------------------------------*/
 #define TODO            printf("%s: not implemented!\n", __func__) 
 
- struct mtx xenpic_lock;
+
+static void     xenpic_dynirq_enable_source(struct intsrc *isrc); 
+static void     xenpic_dynirq_disable_source(struct intsrc *isrc, int); 
+static void     xenpic_dynirq_eoi_source(struct intsrc *isrc); 
+static void     xenpic_dynirq_enable_intr(struct intsrc *isrc); 
 
-struct xenpic_intsrc {
-    struct intsrc xp_intsrc;
-    uint8_t       xp_vector;
-    boolean_t	  xp_masked;
-};
+static void     xenpic_pirq_enable_source(struct intsrc *isrc); 
+static void     xenpic_pirq_disable_source(struct intsrc *isrc, int); 
+static void     xenpic_pirq_eoi_source(struct intsrc *isrc); 
+static void     xenpic_pirq_enable_intr(struct intsrc *isrc); 
 
-struct xenpic { 
-    struct pic xp_pic; /* this MUST be first */
-    uint16_t xp_numintr; 
-    struct xenpic_intsrc xp_pins[0]; 
-}; 
 
-static void     xenpic_enable_dynirq_source(struct intsrc *isrc); 
-static void     xenpic_disable_dynirq_source(struct intsrc *isrc, int); 
-static void     xenpic_eoi_source(struct intsrc *isrc); 
-static void     xenpic_enable_dynirq_intr(struct intsrc *isrc); 
 static int      xenpic_vector(struct intsrc *isrc); 
 static int      xenpic_source_pending(struct intsrc *isrc); 
 static void     xenpic_suspend(struct intsrc *isrc); 
 static void     xenpic_resume(struct intsrc *isrc); 
 
 
-struct pic xenpic_template  =  { 
-    xenpic_enable_dynirq_source, 
-    xenpic_disable_dynirq_source,
-    xenpic_eoi_source, 
-    xenpic_enable_dynirq_intr, 
-    xenpic_vector, 
-    xenpic_source_pending,
-    xenpic_suspend, 
-    xenpic_resume 
+struct pic xenpic_dynirq_template  =  { 
+	xenpic_dynirq_enable_source, 
+	xenpic_dynirq_disable_source,
+	xenpic_dynirq_eoi_source, 
+	xenpic_dynirq_enable_intr, 
+	xenpic_vector, 
+	xenpic_source_pending,
+	xenpic_suspend, 
+	xenpic_resume 
+};
+
+struct pic xenpic_pirq_template  =  { 
+	xenpic_pirq_enable_source, 
+	xenpic_pirq_disable_source,
+	xenpic_pirq_eoi_source, 
+	xenpic_pirq_enable_intr, 
+	xenpic_vector, 
+	xenpic_source_pending,
+	xenpic_suspend, 
+	xenpic_resume 
 };
 
 
 void 
-xenpic_enable_dynirq_source(struct intsrc *isrc)
+xenpic_dynirq_enable_source(struct intsrc *isrc)
 {
 	unsigned int irq;
 	struct xenpic_intsrc *xp;
 
 	xp = (struct xenpic_intsrc *)isrc;
-
+	
+	mtx_lock_spin(&irq_mapping_update_lock);
 	if (xp->xp_masked) {
 		irq = xenpic_vector(isrc);
 		unmask_evtchn(evtchn_from_irq(irq));
 		xp->xp_masked = FALSE;
 	}
+	mtx_unlock_spin(&irq_mapping_update_lock);
 }
 
 static void 
-xenpic_disable_dynirq_source(struct intsrc *isrc, int foo)
+xenpic_dynirq_disable_source(struct intsrc *isrc, int foo)
 {
 	unsigned int irq;
 	struct xenpic_intsrc *xp;
 	
 	xp = (struct xenpic_intsrc *)isrc;
 	
+	mtx_lock_spin(&irq_mapping_update_lock);
 	if (!xp->xp_masked) {
 		irq = xenpic_vector(isrc);
 		mask_evtchn(evtchn_from_irq(irq));
-		clear_evtchn(evtchn_from_irq(irq));
 		xp->xp_masked = TRUE;
-	}
-	
+	}	
+	mtx_unlock_spin(&irq_mapping_update_lock);
 }
 
 static void 
-xenpic_enable_dynirq_intr(struct intsrc *isrc)
+xenpic_dynirq_enable_intr(struct intsrc *isrc)
 {
 	unsigned int irq;
-	
+	mtx_lock_spin(&irq_mapping_update_lock);
 	irq = xenpic_vector(isrc);
 	unmask_evtchn(evtchn_from_irq(irq));
+	mtx_unlock_spin(&irq_mapping_update_lock);
 }
 
 static void 
-xenpic_eoi_source(struct intsrc *isrc)
+xenpic_dynirq_eoi_source(struct intsrc *isrc)
 {
-	unsigned int irq = xenpic_vector(isrc);
+	unsigned int irq;
+
+	mtx_lock_spin(&irq_mapping_update_lock);
+	irq = xenpic_vector(isrc);
 	unmask_evtchn(evtchn_from_irq(irq));
+	mtx_unlock_spin(&irq_mapping_update_lock);
 }
 
 static int
 xenpic_vector(struct intsrc *isrc)
 {
-    struct xenpic_intsrc *pin = (struct xenpic_intsrc *)isrc;
+    struct xenpic_intsrc *pin;
+
+    pin = (struct xenpic_intsrc *)isrc;
+
     return (pin->xp_vector);
 }
 
 static int
 xenpic_source_pending(struct intsrc *isrc)
 {
-    TODO;
-    return 0;
+	TODO;
+	return 0;
 }
 
 static void 
 xenpic_suspend(struct intsrc *isrc) 
 { 
-    TODO; 
+	TODO; 
 } 
  
 static void 
 xenpic_resume(struct intsrc *isrc) 
 { 
-    TODO; 
+	TODO; 
 } 
 
 
@@ -541,143 +600,160 @@
 	if (VALID_EVTCHN(evtchn))
 		notify_remote_via_evtchn(evtchn);
 }
-#ifdef CONFIG_PHYSDEV
+
 /* required for support of physical devices */
 static inline void 
 pirq_unmask_notify(int pirq)
 {
-    physdev_op_t op;
-    if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
-    {
-        op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
-        (void)HYPERVISOR_physdev_op(&op);
-    }
+	physdev_op_t op;
+	if (unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0]))) {
+		op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
+		(void)HYPERVISOR_physdev_op(&op);
+	}
 }
 
 static inline void 
 pirq_query_unmask(int pirq)
 {
-    physdev_op_t op;
-    op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
-    op.u.irq_status_query.irq = pirq;
-    (void)HYPERVISOR_physdev_op(&op);
-    clear_bit(pirq, &pirq_needs_unmask_notify[0]);
-    if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
-        set_bit(pirq, &pirq_needs_unmask_notify[0]);
+	physdev_op_t op;
+	op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
+	op.u.irq_status_query.irq = pirq;
+	(void)HYPERVISOR_physdev_op(&op);
+	clear_bit(pirq, &pirq_needs_unmask_notify[0]);
+	if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
+		set_bit(pirq, &pirq_needs_unmask_notify[0]);
 }
 
 /*
  * On startup, if there is no action associated with the IRQ then we are
  * probing. In this case we should not share with others as it will confuse us.
  */
-#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
+#define probing_irq(_irq) (intr_lookup_source(irq) == NULL)
 
-static unsigned int startup_pirq(unsigned int irq)
+static void 
+xenpic_pirq_enable_intr(struct intsrc *isrc)
 {
-    evtchn_op_t op;
-    int evtchn;
+	evtchn_op_t op;
+	int evtchn;
+	unsigned int irq;
+	
+	mtx_lock_spin(&irq_mapping_update_lock);
+	irq = xenpic_vector(isrc);
+	evtchn = evtchn_from_irq(irq);
 
-    op.cmd               = EVTCHNOP_bind_pirq;
-    op.u.bind_pirq.pirq  = irq;
-    /* NB. We are happy to share unless we are probing. */
-    op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
-    if ( HYPERVISOR_event_channel_op(&op) != 0 )
-    {
-        if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
-            printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
-        return 0;
-    }
-    evtchn = op.u.bind_pirq.port;
+	if (VALID_EVTCHN(evtchn))
+		goto out;
 
-    pirq_query_unmask(irq_to_pirq(irq));
+	op.cmd               = EVTCHNOP_bind_pirq;
+	op.u.bind_pirq.pirq  = irq;
+	/* NB. We are happy to share unless we are probing. */
+	op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
+	
+	if (HYPERVISOR_event_channel_op(&op) != 0) {
+		if (!probing_irq(irq)) /* Some failures are expected when probing. */
+			printf("Failed to obtain physical IRQ %d\n", irq);
+		mtx_unlock_spin(&irq_mapping_update_lock);
+		return;
+	}
+	evtchn = op.u.bind_pirq.port;
 
-    evtchn_to_irq[evtchn] = irq;
-    evtchn_from_irq(irq)    = evtchn;
+	pirq_query_unmask(irq_to_pirq(irq));
 
-    unmask_evtchn(evtchn);
-    pirq_unmask_notify(irq_to_pirq(irq));
+	bind_evtchn_to_cpu(evtchn, 0);
+	evtchn_to_irq[evtchn] = irq;
+	irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
 
-    return 0;
+ out:
+	unmask_evtchn(evtchn);
+	pirq_unmask_notify(irq_to_pirq(irq));
+	mtx_unlock_spin(&irq_mapping_update_lock);
 }
 
-static void shutdown_pirq(unsigned int irq)
+#if 0
+static void 
+xenpic_pirq_disable_intr(struct intsrc *isrc)
 {
-    evtchn_op_t op;
-    int evtchn = evtchn_from_irq(irq);
+	evtchn_op_t op;
+	unsigned int irq;
+	int evtchn;
+	
+	mtx_lock_spin(&irq_mapping_update_lock);
+	irq = xenpic_vector(isrc);
+	evtchn = evtchn_from_irq(irq);
 
-    if ( !VALID_EVTCHN(evtchn) )
-        return;
+	if (!VALID_EVTCHN(evtchn)) 
+		goto done;
+	
+	mask_evtchn(evtchn);
 
-    mask_evtchn(evtchn);
+	op.cmd          = EVTCHNOP_close;
+	op.u.close.port = evtchn;
+	PANIC_IF(HYPERVISOR_event_channel_op(&op) != 0);
 
-    op.cmd          = EVTCHNOP_close;
-    op.u.close.dom  = DOMID_SELF;
-    op.u.close.port = evtchn;
-    if ( HYPERVISOR_event_channel_op(&op) != 0 )
-        panic("Failed to unbind physical IRQ %d\n", irq);
-
-    evtchn_to_irq[evtchn] = -1;
-    evtchn_from_irq(irq)    = -1;
+	bind_evtchn_to_cpu(evtchn, 0);
+	evtchn_to_irq[evtchn] = -1;
+	irq_info[irq] = IRQ_UNBOUND;
+ done:
+	mtx_unlock_spin(&irq_mapping_update_lock);
 }
+#endif
 
-static void enable_pirq(unsigned int irq)
+static void 
+xenpic_pirq_enable_source(struct intsrc *isrc)
 {
-    int evtchn = evtchn_from_irq(irq);
-    if ( !VALID_EVTCHN(evtchn) )
-        return;
-    unmask_evtchn(evtchn);
-    pirq_unmask_notify(irq_to_pirq(irq));
+	int evtchn;
+	unsigned int irq;
+
+	mtx_lock_spin(&irq_mapping_update_lock);
+	irq = xenpic_vector(isrc);
+	evtchn = evtchn_from_irq(irq);
+
+	if (!VALID_EVTCHN(evtchn))
+		goto done;
+
+	unmask_evtchn(evtchn);
+	pirq_unmask_notify(irq_to_pirq(irq));
+ done:
+	mtx_unlock_spin(&irq_mapping_update_lock);
 }
 
-static void disable_pirq(unsigned int irq)
+static void 
+xenpic_pirq_disable_source(struct intsrc *isrc, int eoi)
 {
-    int evtchn = evtchn_from_irq(irq);
-    if ( !VALID_EVTCHN(evtchn) )
-        return;
-    mask_evtchn(evtchn);
-}
+	int evtchn;
+	unsigned int irq;
+
+	mtx_lock_spin(&irq_mapping_update_lock);
+	irq = xenpic_vector(isrc);
+	evtchn = evtchn_from_irq(irq);
 
-static void ack_pirq(unsigned int irq)
-{
-    int evtchn = evtchn_from_irq(irq);
-    if ( !VALID_EVTCHN(evtchn) )
-        return;
-    mask_evtchn(evtchn);
-    clear_evtchn(evtchn);
-}
+	if (!VALID_EVTCHN(evtchn))
+		goto done;
 
-static void end_pirq(unsigned int irq)
-{
-    int evtchn = evtchn_from_irq(irq);
-    if ( !VALID_EVTCHN(evtchn) )
-        return;
-    if ( !(irq_desc[irq].status & IRQ_DISABLED) )
-    {
-        unmask_evtchn(evtchn);
-        pirq_unmask_notify(irq_to_pirq(irq));
-    }
+	mask_evtchn(evtchn);
+ done:
+	mtx_unlock_spin(&irq_mapping_update_lock);
 }
 
-static struct hw_interrupt_type pirq_type = {
-    "Phys-irq",
-    startup_pirq,
-    shutdown_pirq,
-    enable_pirq,
-    disable_pirq,
-    ack_pirq,
-    end_pirq,
-    NULL
-};
-#endif
 
-#if 0
 static void 
-misdirect_interrupt(void *sc)
+xenpic_pirq_eoi_source(struct intsrc *isrc)
 {
-}
+	int evtchn;
+	unsigned int irq;
+
+	mtx_lock_spin(&irq_mapping_update_lock);
+	irq = xenpic_vector(isrc);
+	evtchn = evtchn_from_irq(irq);
 
+	if (!VALID_EVTCHN(evtchn))
+		goto done;
 
-#endif
+	unmask_evtchn(evtchn);
+	pirq_unmask_notify(irq_to_pirq(irq));
+ done:
+	mtx_unlock_spin(&irq_mapping_update_lock);
+}
 
 void 
 mask_evtchn(int port)
@@ -717,8 +793,6 @@
 	}
 }
 
-
-
 void irq_resume(void)
 {
 	evtchn_op_t op;
@@ -798,8 +872,7 @@
 evtchn_init(void *dummy __unused)
 {
 	int i, cpu;
-	struct xenpic *xp;
-	struct xenpic_intsrc *pin;
+	struct xenpic_intsrc *pin, *tpin;
 	
 	/* No VIRQ or IPI bindings. */
 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
@@ -819,27 +892,27 @@
 	for (i = 0; i < NR_IRQS; i++)
 		irq_info[i] = IRQ_UNBOUND;
 	
-	xp = malloc(sizeof(struct xenpic) + NR_DYNIRQS*sizeof(struct xenpic_intsrc), M_DEVBUF, M_WAITOK);
-	xp->xp_pic = xenpic_template;
-	xp->xp_numintr = NR_DYNIRQS;
-	bzero(xp->xp_pins, sizeof(struct xenpic_intsrc) * NR_DYNIRQS);
+	xp = malloc(sizeof(struct xenpic) + NR_IRQS*sizeof(struct xenpic_intsrc), 
+		    M_DEVBUF, M_WAITOK);
+
+	xp->xp_dynirq_pic = &xenpic_dynirq_template;
+	xp->xp_pirq_pic = &xenpic_pirq_template;
+	xp->xp_numintr = NR_IRQS;
+	bzero(xp->xp_pins, sizeof(struct xenpic_intsrc) * NR_IRQS);
 
-	for (i = 0, pin = xp->xp_pins; i < NR_DYNIRQS; i++, pin++) {
+	for (i = 0,pin = xp->xp_pins; i < NR_DYNIRQS; i++) {
 		/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
 		irq_bindcount[dynirq_to_irq(i)] = 0;
-		
-		pin->xp_intsrc.is_pic = (struct pic *)xp;
-		pin->xp_vector = i;
-		intr_register_source(&pin->xp_intsrc);
+
+		tpin = &pin[dynirq_to_irq(i)];		
+		tpin->xp_intsrc.is_pic = xp->xp_dynirq_pic;
+		tpin->xp_vector = dynirq_to_irq(i);
 	}
-	/* We don't currently have any support for physical devices in XenoFreeBSD 
-	 * so leaving this out for the moment for the sake of expediency.
-	 */
-#ifdef notyet
-	/* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
-	for (i = 0; i < NR_PIRQS; i++)
-	{
-		irq_bindcount[pirq_to_irq(i)] = 1;
+
+
+	for (i = 0, pin = xp->xp_pins; i < NR_PIRQS; i++) {
+		/* Dynamic IRQ space is currently unbound. Zero the refcnts. */
+		irq_bindcount[pirq_to_irq(i)] = 0;
 
 #ifdef RTC_IRQ
 		/* If not domain 0, force our RTC driver to fail its probe. */
@@ -847,21 +920,18 @@
 		    !(xen_start_info->flags & SIF_INITDOMAIN))
 			continue;
 #endif
-
-		irq_desc[pirq_to_irq(i)].status  = IRQ_DISABLED;
-		irq_desc[pirq_to_irq(i)].action  = NULL;
-		irq_desc[pirq_to_irq(i)].depth   = 1;
-		irq_desc[pirq_to_irq(i)].handler = &pirq_type;
+		tpin = &pin[pirq_to_irq(i)];		
+		tpin->xp_intsrc.is_pic = xp->xp_pirq_pic;
+		tpin->xp_vector = pirq_to_irq(i);
 	}
-#endif
 }
 
 SYSINIT(evtchn_init, SI_SUB_INTR, SI_ORDER_ANY, evtchn_init, NULL);
     /*
-     * xenpic_lock: in order to allow an interrupt to occur in a critical
+     * irq_mapping_update_lock: in order to allow an interrupt to occur in a critical
      * 	        section, to set pcpu->ipending (etc...) properly, we
      *	        must be able to get the icu lock, so it can't be
      *	        under witness.
      */
 
-MTX_SYSINIT(irq_mapping_update_lock, &irq_mapping_update_lock, "xp", MTX_DEF|MTX_NOWITNESS);
+MTX_SYSINIT(irq_mapping_update_lock, &irq_mapping_update_lock, "xp", MTX_SPIN);


More information about the p4-projects mailing list