4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is recieved, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
11 * There are four kinds of events which can be mapped to an event
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
19 * 4. Hardware interrupts. Not supported at present.
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
30 #include <asm/ptrace.h>
32 #include <asm/sync_bitops.h>
33 #include <asm/xen/hypercall.h>
35 #include <xen/events.h>
36 #include <xen/interface/xen.h>
37 #include <xen/interface/event_channel.h>
42 * This lock protects updates to the following mapping and reference-count
43 * arrays. The lock does not need to be acquired to read the mapping tables.
45 static DEFINE_SPINLOCK(irq_mapping_update_lock);
47 /* IRQ <-> VIRQ mapping. */
48 static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
50 /* Packed IRQ information: binding type, sub-type index, and event channel. */
53 unsigned short evtchn;
58 static struct packed_irq irq_info[NR_IRQS];
61 enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN };
63 /* Convenient shorthand for packed representation of an unbound IRQ. */
64 #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
66 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
67 [0 ... NR_EVENT_CHANNELS-1] = -1
69 static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
70 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
72 /* Reference counts for bindings to IRQs. */
73 static int irq_bindcount[NR_IRQS];
75 /* Xen will never allocate port zero for any purpose. */
76 #define VALID_EVTCHN(chn) ((chn) != 0)
79 * Force a proper event-channel callback from Xen after clearing the
80 * callback mask. We do this in a very simple manner, by making a call
81 * down into Xen. The pending flag will be checked by Xen on return.
83 void force_evtchn_callback(void)
85 (void)HYPERVISOR_xen_version(0, NULL);
87 EXPORT_SYMBOL_GPL(force_evtchn_callback);
89 static struct irq_chip xen_dynamic_chip;
91 /* Constructor for packed IRQ information. */
92 static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn)
94 return (struct packed_irq) { evtchn, index, type };
98 * Accessors for packed IRQ information.
100 static inline unsigned int evtchn_from_irq(int irq)
102 return irq_info[irq].evtchn;
105 static inline unsigned int index_from_irq(int irq)
107 return irq_info[irq].index;
110 static inline unsigned int type_from_irq(int irq)
112 return irq_info[irq].type;
115 static inline unsigned long active_evtchns(unsigned int cpu,
116 struct shared_info *sh,
119 return (sh->evtchn_pending[idx] &
120 cpu_evtchn_mask[cpu][idx] &
121 ~sh->evtchn_mask[idx]);
124 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
126 int irq = evtchn_to_irq[chn];
130 irq_desc[irq].affinity = cpumask_of_cpu(cpu);
133 __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]);
134 __set_bit(chn, cpu_evtchn_mask[cpu]);
136 cpu_evtchn[chn] = cpu;
139 static void init_evtchn_cpu_bindings(void)
143 /* By default all event channels notify CPU#0. */
144 for (i = 0; i < NR_IRQS; i++)
145 irq_desc[i].affinity = cpumask_of_cpu(0);
148 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
149 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
152 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
154 return cpu_evtchn[evtchn];
157 static inline void clear_evtchn(int port)
159 struct shared_info *s = HYPERVISOR_shared_info;
160 sync_clear_bit(port, &s->evtchn_pending[0]);
163 static inline void set_evtchn(int port)
165 struct shared_info *s = HYPERVISOR_shared_info;
166 sync_set_bit(port, &s->evtchn_pending[0]);
171 * notify_remote_via_irq - send event to remote end of event channel via irq
172 * @irq: irq of event channel to send event to
174 * Unlike notify_remote_via_evtchn(), this is safe to use across
175 * save/restore. Notifications on a broken connection are silently
178 void notify_remote_via_irq(int irq)
180 int evtchn = evtchn_from_irq(irq);
182 if (VALID_EVTCHN(evtchn))
183 notify_remote_via_evtchn(evtchn);
185 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
187 static void mask_evtchn(int port)
189 struct shared_info *s = HYPERVISOR_shared_info;
190 sync_set_bit(port, &s->evtchn_mask[0]);
193 static void unmask_evtchn(int port)
195 struct shared_info *s = HYPERVISOR_shared_info;
196 unsigned int cpu = get_cpu();
198 BUG_ON(!irqs_disabled());
200 /* Slow path (hypercall) if this is a non-local port. */
201 if (unlikely(cpu != cpu_from_evtchn(port))) {
202 struct evtchn_unmask unmask = { .port = port };
203 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
205 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
207 sync_clear_bit(port, &s->evtchn_mask[0]);
210 * The following is basically the equivalent of
211 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
212 * the interrupt edge' if the channel is masked.
214 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
215 !sync_test_and_set_bit(port / BITS_PER_LONG,
216 &vcpu_info->evtchn_pending_sel))
217 vcpu_info->evtchn_upcall_pending = 1;
223 static int find_unbound_irq(void)
227 /* Only allocate from dynirq range */
228 for (irq = 0; irq < NR_IRQS; irq++)
229 if (irq_bindcount[irq] == 0)
233 panic("No available IRQ to bind to: increase NR_IRQS!\n");
238 static int bind_evtchn_to_irq(unsigned int evtchn)
242 spin_lock(&irq_mapping_update_lock);
244 irq = evtchn_to_irq[evtchn];
247 irq = find_unbound_irq();
249 dynamic_irq_init(irq);
250 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
251 handle_level_irq, "event");
253 evtchn_to_irq[evtchn] = irq;
254 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
257 irq_bindcount[irq]++;
259 spin_unlock(&irq_mapping_update_lock);
264 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
266 struct evtchn_bind_virq bind_virq;
269 spin_lock(&irq_mapping_update_lock);
271 irq = per_cpu(virq_to_irq, cpu)[virq];
274 bind_virq.virq = virq;
275 bind_virq.vcpu = cpu;
276 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
279 evtchn = bind_virq.port;
281 irq = find_unbound_irq();
283 dynamic_irq_init(irq);
284 set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
285 handle_level_irq, "virq");
287 evtchn_to_irq[evtchn] = irq;
288 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
290 per_cpu(virq_to_irq, cpu)[virq] = irq;
292 bind_evtchn_to_cpu(evtchn, cpu);
295 irq_bindcount[irq]++;
297 spin_unlock(&irq_mapping_update_lock);
302 static void unbind_from_irq(unsigned int irq)
304 struct evtchn_close close;
305 int evtchn = evtchn_from_irq(irq);
307 spin_lock(&irq_mapping_update_lock);
309 if (VALID_EVTCHN(evtchn) && (--irq_bindcount[irq] == 0)) {
311 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
314 switch (type_from_irq(irq)) {
316 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
317 [index_from_irq(irq)] = -1;
323 /* Closed ports are implicitly re-bound to VCPU0. */
324 bind_evtchn_to_cpu(evtchn, 0);
326 evtchn_to_irq[evtchn] = -1;
327 irq_info[irq] = IRQ_UNBOUND;
329 dynamic_irq_init(irq);
332 spin_unlock(&irq_mapping_update_lock);
335 int bind_evtchn_to_irqhandler(unsigned int evtchn,
336 irqreturn_t (*handler)(int, void *),
337 unsigned long irqflags,
338 const char *devname, void *dev_id)
343 irq = bind_evtchn_to_irq(evtchn);
344 retval = request_irq(irq, handler, irqflags, devname, dev_id);
346 unbind_from_irq(irq);
352 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
354 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
355 irqreturn_t (*handler)(int, void *),
356 unsigned long irqflags, const char *devname, void *dev_id)
361 irq = bind_virq_to_irq(virq, cpu);
362 retval = request_irq(irq, handler, irqflags, devname, dev_id);
364 unbind_from_irq(irq);
370 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
372 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
374 free_irq(irq, dev_id);
375 unbind_from_irq(irq);
377 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
380 * Search the CPUs pending events bitmasks. For each one found, map
381 * the event number to an irq, and feed it into do_IRQ() for
384 * Xen uses a two-level bitmap to speed searching. The first level is
385 * a bitset of words which contain pending event bits. The second
386 * level is a bitset of pending events themselves.
388 fastcall void xen_evtchn_do_upcall(struct pt_regs *regs)
391 struct shared_info *s = HYPERVISOR_shared_info;
392 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
393 unsigned long pending_words;
395 vcpu_info->evtchn_upcall_pending = 0;
397 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
398 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
399 while (pending_words != 0) {
400 unsigned long pending_bits;
401 int word_idx = __ffs(pending_words);
402 pending_words &= ~(1UL << word_idx);
404 while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
405 int bit_idx = __ffs(pending_bits);
406 int port = (word_idx * BITS_PER_LONG) + bit_idx;
407 int irq = evtchn_to_irq[port];
410 regs->orig_eax = ~irq;
419 /* Rebind an evtchn so that it gets delivered to a specific cpu */
420 static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
422 struct evtchn_bind_vcpu bind_vcpu;
423 int evtchn = evtchn_from_irq(irq);
425 if (!VALID_EVTCHN(evtchn))
428 /* Send future instances of this interrupt to other vcpu. */
429 bind_vcpu.port = evtchn;
430 bind_vcpu.vcpu = tcpu;
433 * If this fails, it usually just indicates that we're dealing with a
434 * virq or IPI channel, which don't actually need to be rebound. Ignore
435 * it, but don't do the xenlinux-level rebind in that case.
437 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
438 bind_evtchn_to_cpu(evtchn, tcpu);
442 static void set_affinity_irq(unsigned irq, cpumask_t dest)
444 unsigned tcpu = first_cpu(dest);
445 rebind_irq_to_cpu(irq, tcpu);
448 static void enable_dynirq(unsigned int irq)
450 int evtchn = evtchn_from_irq(irq);
452 if (VALID_EVTCHN(evtchn))
453 unmask_evtchn(evtchn);
456 static void disable_dynirq(unsigned int irq)
458 int evtchn = evtchn_from_irq(irq);
460 if (VALID_EVTCHN(evtchn))
464 static void ack_dynirq(unsigned int irq)
466 int evtchn = evtchn_from_irq(irq);
468 move_native_irq(irq);
470 if (VALID_EVTCHN(evtchn))
471 clear_evtchn(evtchn);
474 static int retrigger_dynirq(unsigned int irq)
476 int evtchn = evtchn_from_irq(irq);
479 if (VALID_EVTCHN(evtchn)) {
487 static struct irq_chip xen_dynamic_chip __read_mostly = {
489 .mask = disable_dynirq,
490 .unmask = enable_dynirq,
492 .set_affinity = set_affinity_irq,
493 .retrigger = retrigger_dynirq,
496 void __init xen_init_IRQ(void)
500 init_evtchn_cpu_bindings();
502 /* No event channels are 'live' right now. */
503 for (i = 0; i < NR_EVENT_CHANNELS; i++)
506 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
507 for (i = 0; i < NR_IRQS; i++)
508 irq_bindcount[i] = 0;
510 irq_ctx_init(smp_processor_id());