2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
5 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
8 #include "linux/cpumask.h"
9 #include "linux/hardirq.h"
10 #include "linux/interrupt.h"
11 #include "linux/kernel_stat.h"
12 #include "linux/module.h"
13 #include "linux/seq_file.h"
14 #include "as-layout.h"
15 #include "kern_util.h"
19 * Generic, controller-independent functions:
22 int show_interrupts(struct seq_file *p, void *v)
24 int i = *(loff_t *) v, j;
25 struct irqaction * action;
30 for_each_online_cpu(j)
31 seq_printf(p, "CPU%d ",j);
36 spin_lock_irqsave(&irq_desc[i].lock, flags);
37 action = irq_desc[i].action;
40 seq_printf(p, "%3d: ",i);
42 seq_printf(p, "%10u ", kstat_irqs(i));
44 for_each_online_cpu(j)
45 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
47 seq_printf(p, " %14s", irq_desc[i].chip->typename);
48 seq_printf(p, " %s", action->name);
50 for (action=action->next; action; action = action->next)
51 seq_printf(p, ", %s", action->name);
55 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
56 } else if (i == NR_IRQS)
63 * This list is accessed under irq_lock, except in sigio_handler,
64 * where it is safe from being modified. IRQ handlers won't change it -
65 * if an IRQ source has vanished, it will be freed by free_irqs just
66 * before returning from sigio_handler. That will process a separate
67 * list of irqs to free, with its own locking, coming back here to
68 * remove list elements, taking the irq_lock to do so.
70 static struct irq_fd *active_fds = NULL;
71 static struct irq_fd **last_irq_ptr = &active_fds;
73 extern void free_irqs(void);
75 void sigio_handler(int sig, struct uml_pt_regs *regs)
77 struct irq_fd *irq_fd;
80 if (smp_sigio_handler())
84 n = os_waiting_for_events(active_fds);
91 for (irq_fd = active_fds; irq_fd != NULL;
92 irq_fd = irq_fd->next) {
93 if (irq_fd->current_events != 0) {
94 irq_fd->current_events = 0;
95 do_IRQ(irq_fd->irq, regs);
103 static DEFINE_SPINLOCK(irq_lock);
105 int activate_fd(int irq, int fd, int type, void *dev_id)
107 struct pollfd *tmp_pfd;
108 struct irq_fd *new_fd, *irq_fd;
112 err = os_set_fd_async(fd);
117 new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
121 if (type == IRQ_READ)
122 events = UM_POLLIN | UM_POLLPRI;
123 else events = UM_POLLOUT;
124 *new_fd = ((struct irq_fd) { .next = NULL,
130 .current_events = 0 } );
133 spin_lock_irqsave(&irq_lock, flags);
134 for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
135 if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
136 printk(KERN_ERR "Registering fd %d twice\n", fd);
137 printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
138 printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
144 if (type == IRQ_WRITE)
151 n = os_create_pollfd(fd, events, tmp_pfd, n);
157 * It means we couldn't put new pollfd to current pollfds
158 * and tmp_fds is NULL or too small for new pollfds array.
159 * Needed size is equal to n as minimum.
161 * Here we have to drop the lock in order to call
162 * kmalloc, which might sleep.
163 * If something else came in and changed the pollfds array
164 * so we will not be able to put new pollfd struct to pollfds
165 * then we free the buffer tmp_fds and try again.
167 spin_unlock_irqrestore(&irq_lock, flags);
170 tmp_pfd = kmalloc(n, GFP_KERNEL);
174 spin_lock_irqsave(&irq_lock, flags);
177 *last_irq_ptr = new_fd;
178 last_irq_ptr = &new_fd->next;
180 spin_unlock_irqrestore(&irq_lock, flags);
183 * This calls activate_fd, so it has to be outside the critical
186 maybe_sigio_broken(fd, (type == IRQ_READ));
191 spin_unlock_irqrestore(&irq_lock, flags);
198 static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
202 spin_lock_irqsave(&irq_lock, flags);
203 os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
204 spin_unlock_irqrestore(&irq_lock, flags);
212 static int same_irq_and_dev(struct irq_fd *irq, void *d)
214 struct irq_and_dev *data = d;
216 return ((irq->irq == data->irq) && (irq->id == data->dev));
219 void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
221 struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq,
224 free_irq_by_cb(same_irq_and_dev, &data);
227 static int same_fd(struct irq_fd *irq, void *fd)
229 return (irq->fd == *((int *)fd));
232 void free_irq_by_fd(int fd)
234 free_irq_by_cb(same_fd, &fd);
237 /* Must be called with irq_lock held */
238 static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
244 for (irq = active_fds; irq != NULL; irq = irq->next) {
245 if ((irq->fd == fd) && (irq->irq == irqnum))
250 printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
254 fdi = os_get_pollfd(i);
255 if ((fdi != -1) && (fdi != fd)) {
256 printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
257 "and pollfds, fd %d vs %d, need %d\n", irq->fd,
267 void reactivate_fd(int fd, int irqnum)
273 spin_lock_irqsave(&irq_lock, flags);
274 irq = find_irq_by_fd(fd, irqnum, &i);
276 spin_unlock_irqrestore(&irq_lock, flags);
279 os_set_pollfd(i, irq->fd);
280 spin_unlock_irqrestore(&irq_lock, flags);
285 void deactivate_fd(int fd, int irqnum)
291 spin_lock_irqsave(&irq_lock, flags);
292 irq = find_irq_by_fd(fd, irqnum, &i);
294 spin_unlock_irqrestore(&irq_lock, flags);
298 os_set_pollfd(i, -1);
299 spin_unlock_irqrestore(&irq_lock, flags);
305 * Called just before shutdown in order to provide a clean exec
306 * environment in case the system is rebooting. No locking because
307 * that would cause a pointless shutdown hang if something hadn't
310 int deactivate_all_fds(void)
315 for (irq = active_fds; irq != NULL; irq = irq->next) {
316 err = os_clear_fd_async(irq->fd);
320 /* If there is a signal already queued, after unblocking ignore it */
327 * do_IRQ handles all normal device IRQs (the special
328 * SMP cross-CPU interrupts have their own specific
331 unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
333 struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
337 set_irq_regs(old_regs);
341 int um_request_irq(unsigned int irq, int fd, int type,
342 irq_handler_t handler,
343 unsigned long irqflags, const char * devname,
349 err = activate_fd(irq, fd, type, dev_id);
354 return request_irq(irq, handler, irqflags, devname, dev_id);
357 EXPORT_SYMBOL(um_request_irq);
358 EXPORT_SYMBOL(reactivate_fd);
361 * hw_interrupt_type must define (startup || enable) &&
362 * (shutdown || disable) && end
364 static void dummy(unsigned int irq)
368 /* This is used for everything else than the timer. */
369 static struct hw_interrupt_type normal_irq_type = {
371 .release = free_irq_by_irq_and_dev,
378 static struct hw_interrupt_type SIGVTALRM_irq_type = {
379 .typename = "SIGVTALRM",
380 .release = free_irq_by_irq_and_dev,
381 .shutdown = dummy, /* never called */
388 void __init init_IRQ(void)
392 irq_desc[TIMER_IRQ].status = IRQ_DISABLED;
393 irq_desc[TIMER_IRQ].action = NULL;
394 irq_desc[TIMER_IRQ].depth = 1;
395 irq_desc[TIMER_IRQ].chip = &SIGVTALRM_irq_type;
396 enable_irq(TIMER_IRQ);
397 for (i = 1; i < NR_IRQS; i++) {
398 irq_desc[i].status = IRQ_DISABLED;
399 irq_desc[i].action = NULL;
400 irq_desc[i].depth = 1;
401 irq_desc[i].chip = &normal_irq_type;
406 int init_aio_irq(int irq, char *name, irq_handler_t handler)
410 err = os_pipe(fds, 1, 1);
412 printk(KERN_ERR "init_aio_irq - os_pipe failed, err = %d\n",
417 err = um_request_irq(irq, fds[0], IRQ_READ, handler,
418 IRQF_DISABLED | IRQF_SAMPLE_RANDOM, name,
419 (void *) (long) fds[0]);
421 printk(KERN_ERR "init_aio_irq - : um_request_irq failed, "
431 os_close_file(fds[0]);
432 os_close_file(fds[1]);
438 * IRQ stack entry and exit:
440 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
441 * and switch over to the IRQ stack after some preparation. We use
442 * sigaltstack to receive signals on a separate stack from the start.
443 * These two functions make sure the rest of the kernel won't be too
444 * upset by being on a different stack. The IRQ stack has a
445 * thread_info structure at the bottom so that current et al continue
448 * to_irq_stack copies the current task's thread_info to the IRQ stack
449 * thread_info and sets the tasks's stack to point to the IRQ stack.
451 * from_irq_stack copies the thread_info struct back (flags may have
452 * been modified) and resets the task's stack pointer.
456 * What happens when two signals race each other? UML doesn't block
457 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
458 * could arrive while a previous one is still setting up the
461 * There are three cases -
462 * The first interrupt on the stack - sets up the thread_info and
463 * handles the interrupt
464 * A nested interrupt interrupting the copying of the thread_info -
465 * can't handle the interrupt, as the stack is in an unknown state
466 * A nested interrupt not interrupting the copying of the
467 * thread_info - doesn't do any setup, just handles the interrupt
469 * The first job is to figure out whether we interrupted stack setup.
470 * This is done by xchging the signal mask with thread_info->pending.
471 * If the value that comes back is zero, then there is no setup in
472 * progress, and the interrupt can be handled. If the value is
473 * non-zero, then there is stack setup in progress. In order to have
474 * the interrupt handled, we leave our signal in the mask, and it will
475 * be handled by the upper handler after it has set up the stack.
477 * Next is to figure out whether we are the outer handler or a nested
478 * one. As part of setting up the stack, thread_info->real_thread is
479 * set to non-NULL (and is reset to NULL on exit). This is the
480 * nesting indicator. If it is non-NULL, then the stack is already
481 * set up and the handler can run.
484 static unsigned long pending_mask;
486 unsigned long to_irq_stack(unsigned long *mask_out)
488 struct thread_info *ti;
489 unsigned long mask, old;
492 mask = xchg(&pending_mask, *mask_out);
495 * If any interrupts come in at this point, we want to
496 * make sure that their bits aren't lost by our
497 * putting our bit in. So, this loop accumulates bits
498 * until xchg returns the same value that we put in.
499 * When that happens, there were no new interrupts,
500 * and pending_mask contains a bit for each interrupt
506 mask = xchg(&pending_mask, old);
507 } while (mask != old);
511 ti = current_thread_info();
512 nested = (ti->real_thread != NULL);
514 struct task_struct *task;
515 struct thread_info *tti;
517 task = cpu_tasks[ti->cpu].task;
518 tti = task_thread_info(task);
521 ti->real_thread = tti;
525 mask = xchg(&pending_mask, 0);
526 *mask_out |= mask | nested;
530 unsigned long from_irq_stack(int nested)
532 struct thread_info *ti, *to;
535 ti = current_thread_info();
539 to = ti->real_thread;
541 ti->real_thread = NULL;
544 mask = xchg(&pending_mask, 0);