1 /* Support for MMIO probes.
2 * Benfit many code from kprobes
3 * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>.
4 * 2007 Alexander Eichner
5 * 2008 Pekka Paalanen <pq@iki.fi>
8 #include <linux/list.h>
9 #include <linux/rculist.h>
10 #include <linux/spinlock.h>
11 #include <linux/hash.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/uaccess.h>
16 #include <linux/ptrace.h>
17 #include <linux/preempt.h>
18 #include <linux/percpu.h>
19 #include <linux/kdebug.h>
20 #include <linux/mutex.h>
22 #include <asm/cacheflush.h>
23 #include <asm/tlbflush.h>
24 #include <linux/errno.h>
25 #include <asm/debugreg.h>
26 #include <linux/mmiotrace.h>
28 #define KMMIO_PAGE_HASH_BITS 4
29 #define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
31 struct kmmio_fault_page {
32 struct list_head list;
33 struct kmmio_fault_page *release_next;
34 unsigned long page; /* location of the fault page */
35 bool old_presence; /* page presence prior to arming */
39 * Number of times this page has been registered as a part
40 * of a probe. If zero, page is disarmed and this may be freed.
41 * Used only by writers (RCU).
46 struct kmmio_delayed_release {
48 struct kmmio_fault_page *release_list;
51 struct kmmio_context {
52 struct kmmio_fault_page *fpage;
53 struct kmmio_probe *probe;
54 unsigned long saved_flags;
59 static DEFINE_SPINLOCK(kmmio_lock);
61 /* Protected by kmmio_lock */
62 unsigned int kmmio_count;
64 /* Read-protected by RCU, write-protected by kmmio_lock. */
65 static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
66 static LIST_HEAD(kmmio_probes);
68 static struct list_head *kmmio_page_list(unsigned long page)
70 return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
73 /* Accessed per-cpu */
74 static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx);
77 * this is basically a dynamic stabbing problem:
78 * Could use the existing prio tree code or
79 * Possible better implementations:
80 * The Interval Skip List: A Data Structure for Finding All Intervals That
81 * Overlap a Point (might be simple)
82 * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup
84 /* Get the kmmio at this addr (if any). You must be holding RCU read lock. */
85 static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
87 struct kmmio_probe *p;
88 list_for_each_entry_rcu(p, &kmmio_probes, list) {
89 if (addr >= p->addr && addr <= (p->addr + p->len))
95 /* You must be holding RCU read lock. */
96 static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
98 struct list_head *head;
99 struct kmmio_fault_page *p;
102 head = kmmio_page_list(page);
103 list_for_each_entry_rcu(p, head, list) {
110 static void set_pmd_presence(pmd_t *pmd, bool present, bool *old)
112 pmdval_t v = pmd_val(*pmd);
113 *old = !!(v & _PAGE_PRESENT);
117 set_pmd(pmd, __pmd(v));
120 static void set_pte_presence(pte_t *pte, bool present, bool *old)
122 pteval_t v = pte_val(*pte);
123 *old = !!(v & _PAGE_PRESENT);
127 set_pte_atomic(pte, __pte(v));
130 static int set_page_presence(unsigned long addr, bool present, bool *old)
133 pte_t *pte = lookup_address(addr, &level);
136 pr_err("kmmio: no pte for page 0x%08lx\n", addr);
142 set_pmd_presence((pmd_t *)pte, present, old);
145 set_pte_presence(pte, present, old);
148 pr_err("kmmio: unexpected page level 0x%x.\n", level);
152 __flush_tlb_one(addr);
157 * Mark the given page as not present. Access to it will trigger a fault.
159 * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the
160 * protection is ignored here. RCU read lock is assumed held, so the struct
161 * will not disappear unexpectedly. Furthermore, the caller must guarantee,
162 * that double arming the same virtual address (page) cannot occur.
164 * Double disarming on the other hand is allowed, and may occur when a fault
165 * and mmiotrace shutdown happen simultaneously.
167 static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
170 WARN_ONCE(f->armed, KERN_ERR "kmmio page already armed.\n");
172 pr_warning("kmmio double-arm: page 0x%08lx, ref %d, old %d\n",
173 f->page, f->count, f->old_presence);
175 ret = set_page_presence(f->page, false, &f->old_presence);
176 WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", f->page);
181 /** Restore the given page to saved presence state. */
182 static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
185 int ret = set_page_presence(f->page, f->old_presence, &tmp);
187 KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
192 * This is being called from do_page_fault().
194 * We may be in an interrupt or a critical section. Also prefecthing may
195 * trigger a page fault. We may be in the middle of process switch.
196 * We cannot take any locks, because we could be executing especially
197 * within a kmmio critical section.
199 * Local interrupts are disabled, so preemption cannot happen.
200 * Do not enable interrupts, do not sleep, and watch out for other CPUs.
203 * Interrupts are disabled on entry as trap3 is an interrupt gate
204 * and they remain disabled thorough out this function.
206 int kmmio_handler(struct pt_regs *regs, unsigned long addr)
208 struct kmmio_context *ctx;
209 struct kmmio_fault_page *faultpage;
210 int ret = 0; /* default to fault not handled */
213 * Preemption is now disabled to prevent process switch during
214 * single stepping. We can only handle one active kmmio trace
215 * per cpu, so ensure that we finish it before something else
216 * gets to run. We also hold the RCU read lock over single
217 * stepping to avoid looking up the probe and kmmio_fault_page
223 faultpage = get_kmmio_fault_page(addr);
226 * Either this page fault is not caused by kmmio, or
227 * another CPU just pulled the kmmio probe from under
228 * our feet. The latter case should not be possible.
233 ctx = &get_cpu_var(kmmio_ctx);
235 disarm_kmmio_fault_page(faultpage);
236 if (addr == ctx->addr) {
238 * On SMP we sometimes get recursive probe hits on the
239 * same address. Context is already saved, fall out.
241 pr_debug("kmmio: duplicate probe hit on CPU %d, for "
242 "address 0x%08lx.\n",
243 smp_processor_id(), addr);
248 * Prevent overwriting already in-flight context.
249 * This should not happen, let's hope disarming at least
252 pr_emerg("kmmio: recursive probe hit on CPU %d, "
253 "for address 0x%08lx. Ignoring.\n",
254 smp_processor_id(), addr);
255 pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
261 ctx->fpage = faultpage;
262 ctx->probe = get_kmmio_probe(addr);
263 ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
266 if (ctx->probe && ctx->probe->pre_handler)
267 ctx->probe->pre_handler(ctx->probe, regs, addr);
270 * Enable single-stepping and disable interrupts for the faulting
271 * context. Local interrupts must not get enabled during stepping.
273 regs->flags |= X86_EFLAGS_TF;
274 regs->flags &= ~X86_EFLAGS_IF;
276 /* Now we set present bit in PTE and single step. */
277 disarm_kmmio_fault_page(ctx->fpage);
280 * If another cpu accesses the same page while we are stepping,
281 * the access will not be caught. It will simply succeed and the
282 * only downside is we lose the event. If this becomes a problem,
283 * the user should drop to single cpu before tracing.
286 put_cpu_var(kmmio_ctx);
287 return 1; /* fault handled */
290 put_cpu_var(kmmio_ctx);
293 preempt_enable_no_resched();
298 * Interrupts are disabled on entry as trap1 is an interrupt gate
299 * and they remain disabled thorough out this function.
300 * This must always get called as the pair to kmmio_handler().
302 static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
305 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
308 pr_debug("kmmio: spurious debug trap on CPU %d.\n",
313 if (ctx->probe && ctx->probe->post_handler)
314 ctx->probe->post_handler(ctx->probe, condition, regs);
316 arm_kmmio_fault_page(ctx->fpage);
318 regs->flags &= ~X86_EFLAGS_TF;
319 regs->flags |= ctx->saved_flags;
321 /* These were acquired in kmmio_handler(). */
325 preempt_enable_no_resched();
328 * if somebody else is singlestepping across a probe point, flags
329 * will have TF set, in which case, continue the remaining processing
330 * of do_debug, as if this is not a probe hit.
332 if (!(regs->flags & X86_EFLAGS_TF))
335 put_cpu_var(kmmio_ctx);
339 /* You must be holding kmmio_lock. */
340 static int add_kmmio_fault_page(unsigned long page)
342 struct kmmio_fault_page *f;
345 f = get_kmmio_fault_page(page);
348 arm_kmmio_fault_page(f);
353 f = kzalloc(sizeof(*f), GFP_ATOMIC);
360 if (arm_kmmio_fault_page(f)) {
365 list_add_rcu(&f->list, kmmio_page_list(f->page));
370 /* You must be holding kmmio_lock. */
371 static void release_kmmio_fault_page(unsigned long page,
372 struct kmmio_fault_page **release_list)
374 struct kmmio_fault_page *f;
377 f = get_kmmio_fault_page(page);
382 BUG_ON(f->count < 0);
384 disarm_kmmio_fault_page(f);
385 f->release_next = *release_list;
391 * With page-unaligned ioremaps, one or two armed pages may contain
392 * addresses from outside the intended mapping. Events for these addresses
393 * are currently silently dropped. The events may result only from programming
394 * mistakes by accessing addresses before the beginning or past the end of a
397 int register_kmmio_probe(struct kmmio_probe *p)
401 unsigned long size = 0;
402 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
404 spin_lock_irqsave(&kmmio_lock, flags);
405 if (get_kmmio_probe(p->addr)) {
410 list_add_rcu(&p->list, &kmmio_probes);
411 while (size < size_lim) {
412 if (add_kmmio_fault_page(p->addr + size))
413 pr_err("kmmio: Unable to set page fault.\n");
417 spin_unlock_irqrestore(&kmmio_lock, flags);
419 * XXX: What should I do here?
420 * Here was a call to global_flush_tlb(), but it does not exist
421 * anymore. It seems it's not needed after all.
425 EXPORT_SYMBOL(register_kmmio_probe);
427 static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
429 struct kmmio_delayed_release *dr = container_of(
431 struct kmmio_delayed_release,
433 struct kmmio_fault_page *p = dr->release_list;
435 struct kmmio_fault_page *next = p->release_next;
443 static void remove_kmmio_fault_pages(struct rcu_head *head)
445 struct kmmio_delayed_release *dr = container_of(
447 struct kmmio_delayed_release,
449 struct kmmio_fault_page *p = dr->release_list;
450 struct kmmio_fault_page **prevp = &dr->release_list;
452 spin_lock_irqsave(&kmmio_lock, flags);
455 list_del_rcu(&p->list);
457 *prevp = p->release_next;
458 prevp = &p->release_next;
461 spin_unlock_irqrestore(&kmmio_lock, flags);
462 /* This is the real RCU destroy call. */
463 call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
467 * Remove a kmmio probe. You have to synchronize_rcu() before you can be
468 * sure that the callbacks will not be called anymore. Only after that
469 * you may actually release your struct kmmio_probe.
471 * Unregistering a kmmio fault page has three steps:
472 * 1. release_kmmio_fault_page()
473 * Disarm the page, wait a grace period to let all faults finish.
474 * 2. remove_kmmio_fault_pages()
475 * Remove the pages from kmmio_page_table.
476 * 3. rcu_free_kmmio_fault_pages()
477 * Actally free the kmmio_fault_page structs as with RCU.
479 void unregister_kmmio_probe(struct kmmio_probe *p)
482 unsigned long size = 0;
483 const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
484 struct kmmio_fault_page *release_list = NULL;
485 struct kmmio_delayed_release *drelease;
487 spin_lock_irqsave(&kmmio_lock, flags);
488 while (size < size_lim) {
489 release_kmmio_fault_page(p->addr + size, &release_list);
492 list_del_rcu(&p->list);
494 spin_unlock_irqrestore(&kmmio_lock, flags);
496 drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
498 pr_crit("kmmio: leaking kmmio_fault_page objects.\n");
501 drelease->release_list = release_list;
504 * This is not really RCU here. We have just disarmed a set of
505 * pages so that they cannot trigger page faults anymore. However,
506 * we cannot remove the pages from kmmio_page_table,
507 * because a probe hit might be in flight on another CPU. The
508 * pages are collected into a list, and they will be removed from
509 * kmmio_page_table when it is certain that no probe hit related to
510 * these pages can be in flight. RCU grace period sounds like a
513 * If we removed the pages too early, kmmio page fault handler might
514 * not find the respective kmmio_fault_page and determine it's not
515 * a kmmio fault, when it actually is. This would lead to madness.
517 call_rcu(&drelease->rcu, remove_kmmio_fault_pages);
519 EXPORT_SYMBOL(unregister_kmmio_probe);
521 static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val,
524 struct die_args *arg = args;
526 if (val == DIE_DEBUG && (arg->err & DR_STEP))
527 if (post_kmmio_handler(arg->err, arg->regs) == 1)
533 static struct notifier_block nb_die = {
534 .notifier_call = kmmio_die_notifier
537 static int __init init_kmmio(void)
540 for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
541 INIT_LIST_HEAD(&kmmio_page_table[i]);
542 return register_die_notifier(&nb_die);
544 fs_initcall(init_kmmio); /* should be before device_initcall() */