2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
32 #include <asm/ftrace.h>
36 #define FTRACE_WARN_ON(cond) \
42 #define FTRACE_WARN_ON_ONCE(cond) \
44 if (WARN_ON_ONCE(cond)) \
48 /* hash bits for specific function selection */
49 #define FTRACE_HASH_BITS 7
50 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
52 /* ftrace_enabled is a method to turn ftrace on or off */
53 int ftrace_enabled __read_mostly;
54 static int last_ftrace_enabled;
56 /* Quick disabling of function tracer. */
57 int function_trace_stop;
60 * ftrace_disabled is set when an anomaly is discovered.
61 * ftrace_disabled is much stronger than ftrace_enabled.
63 static int ftrace_disabled __read_mostly;
65 static DEFINE_MUTEX(ftrace_lock);
67 static struct ftrace_ops ftrace_list_end __read_mostly =
72 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
73 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
74 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
75 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
77 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
79 struct ftrace_ops *op = ftrace_list;
81 /* in case someone actually ports this to alpha! */
82 read_barrier_depends();
84 while (op != &ftrace_list_end) {
86 read_barrier_depends();
87 op->func(ip, parent_ip);
92 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
94 if (!test_tsk_trace_trace(current))
97 ftrace_pid_function(ip, parent_ip);
100 static void set_ftrace_pid_function(ftrace_func_t func)
102 /* do not set ftrace_pid_function to itself! */
103 if (func != ftrace_pid_func)
104 ftrace_pid_function = func;
108 * clear_ftrace_function - reset the ftrace function
110 * This NULLs the ftrace function and in essence stops
111 * tracing. There may be lag
113 void clear_ftrace_function(void)
115 ftrace_trace_function = ftrace_stub;
116 __ftrace_trace_function = ftrace_stub;
117 ftrace_pid_function = ftrace_stub;
120 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
122 * For those archs that do not test ftrace_trace_stop in their
123 * mcount call site, we need to do it from C.
125 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
127 if (function_trace_stop)
130 __ftrace_trace_function(ip, parent_ip);
134 static int __register_ftrace_function(struct ftrace_ops *ops)
136 ops->next = ftrace_list;
138 * We are entering ops into the ftrace_list but another
139 * CPU might be walking that list. We need to make sure
140 * the ops->next pointer is valid before another CPU sees
141 * the ops pointer included into the ftrace_list.
146 if (ftrace_enabled) {
149 if (ops->next == &ftrace_list_end)
152 func = ftrace_list_func;
154 if (ftrace_pid_trace) {
155 set_ftrace_pid_function(func);
156 func = ftrace_pid_func;
160 * For one func, simply call it directly.
161 * For more than one func, call the chain.
163 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
164 ftrace_trace_function = func;
166 __ftrace_trace_function = func;
167 ftrace_trace_function = ftrace_test_stop_func;
174 static int __unregister_ftrace_function(struct ftrace_ops *ops)
176 struct ftrace_ops **p;
179 * If we are removing the last function, then simply point
180 * to the ftrace_stub.
182 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
183 ftrace_trace_function = ftrace_stub;
184 ftrace_list = &ftrace_list_end;
188 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
197 if (ftrace_enabled) {
198 /* If we only have one func left, then call that directly */
199 if (ftrace_list->next == &ftrace_list_end) {
200 ftrace_func_t func = ftrace_list->func;
202 if (ftrace_pid_trace) {
203 set_ftrace_pid_function(func);
204 func = ftrace_pid_func;
206 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
207 ftrace_trace_function = func;
209 __ftrace_trace_function = func;
217 static void ftrace_update_pid_func(void)
221 if (ftrace_trace_function == ftrace_stub)
224 func = ftrace_trace_function;
226 if (ftrace_pid_trace) {
227 set_ftrace_pid_function(func);
228 func = ftrace_pid_func;
230 if (func == ftrace_pid_func)
231 func = ftrace_pid_function;
234 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
235 ftrace_trace_function = func;
237 __ftrace_trace_function = func;
241 /* set when tracing only a pid */
242 struct pid *ftrace_pid_trace;
243 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
245 #ifdef CONFIG_DYNAMIC_FTRACE
247 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
248 # error Dynamic ftrace depends on MCOUNT_RECORD
251 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
253 struct ftrace_func_probe {
254 struct hlist_node node;
255 struct ftrace_probe_ops *ops;
264 FTRACE_ENABLE_CALLS = (1 << 0),
265 FTRACE_DISABLE_CALLS = (1 << 1),
266 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
267 FTRACE_ENABLE_MCOUNT = (1 << 3),
268 FTRACE_DISABLE_MCOUNT = (1 << 4),
269 FTRACE_START_FUNC_RET = (1 << 5),
270 FTRACE_STOP_FUNC_RET = (1 << 6),
273 static int ftrace_filtered;
275 static struct dyn_ftrace *ftrace_new_addrs;
277 static DEFINE_MUTEX(ftrace_regex_lock);
280 struct ftrace_page *next;
282 struct dyn_ftrace records[];
285 #define ENTRIES_PER_PAGE \
286 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
288 /* estimate from running different kernels */
289 #define NR_TO_INIT 10000
291 static struct ftrace_page *ftrace_pages_start;
292 static struct ftrace_page *ftrace_pages;
294 static struct dyn_ftrace *ftrace_free_records;
297 * This is a double for. Do not use 'break' to break out of the loop,
298 * you must use a goto.
300 #define do_for_each_ftrace_rec(pg, rec) \
301 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
303 for (_____i = 0; _____i < pg->index; _____i++) { \
304 rec = &pg->records[_____i];
306 #define while_for_each_ftrace_rec() \
310 #ifdef CONFIG_KPROBES
312 static int frozen_record_count;
314 static inline void freeze_record(struct dyn_ftrace *rec)
316 if (!(rec->flags & FTRACE_FL_FROZEN)) {
317 rec->flags |= FTRACE_FL_FROZEN;
318 frozen_record_count++;
322 static inline void unfreeze_record(struct dyn_ftrace *rec)
324 if (rec->flags & FTRACE_FL_FROZEN) {
325 rec->flags &= ~FTRACE_FL_FROZEN;
326 frozen_record_count--;
330 static inline int record_frozen(struct dyn_ftrace *rec)
332 return rec->flags & FTRACE_FL_FROZEN;
335 # define freeze_record(rec) ({ 0; })
336 # define unfreeze_record(rec) ({ 0; })
337 # define record_frozen(rec) ({ 0; })
338 #endif /* CONFIG_KPROBES */
340 static void ftrace_free_rec(struct dyn_ftrace *rec)
342 rec->ip = (unsigned long)ftrace_free_records;
343 ftrace_free_records = rec;
344 rec->flags |= FTRACE_FL_FREE;
347 void ftrace_release(void *start, unsigned long size)
349 struct dyn_ftrace *rec;
350 struct ftrace_page *pg;
351 unsigned long s = (unsigned long)start;
352 unsigned long e = s + size;
354 if (ftrace_disabled || !start)
357 mutex_lock(&ftrace_lock);
358 do_for_each_ftrace_rec(pg, rec) {
359 if ((rec->ip >= s) && (rec->ip < e) &&
360 !(rec->flags & FTRACE_FL_FREE))
361 ftrace_free_rec(rec);
362 } while_for_each_ftrace_rec();
363 mutex_unlock(&ftrace_lock);
366 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
368 struct dyn_ftrace *rec;
370 /* First check for freed records */
371 if (ftrace_free_records) {
372 rec = ftrace_free_records;
374 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
375 FTRACE_WARN_ON_ONCE(1);
376 ftrace_free_records = NULL;
380 ftrace_free_records = (void *)rec->ip;
381 memset(rec, 0, sizeof(*rec));
385 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
386 if (!ftrace_pages->next) {
387 /* allocate another page */
389 (void *)get_zeroed_page(GFP_KERNEL);
390 if (!ftrace_pages->next)
393 ftrace_pages = ftrace_pages->next;
396 return &ftrace_pages->records[ftrace_pages->index++];
399 static struct dyn_ftrace *
400 ftrace_record_ip(unsigned long ip)
402 struct dyn_ftrace *rec;
407 rec = ftrace_alloc_dyn_node(ip);
412 rec->flags = (unsigned long)ftrace_new_addrs;
413 ftrace_new_addrs = rec;
418 static void print_ip_ins(const char *fmt, unsigned char *p)
422 printk(KERN_CONT "%s", fmt);
424 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
425 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
428 static void ftrace_bug(int failed, unsigned long ip)
432 FTRACE_WARN_ON_ONCE(1);
433 pr_info("ftrace faulted on modifying ");
437 FTRACE_WARN_ON_ONCE(1);
438 pr_info("ftrace failed to modify ");
440 print_ip_ins(" actual: ", (unsigned char *)ip);
441 printk(KERN_CONT "\n");
444 FTRACE_WARN_ON_ONCE(1);
445 pr_info("ftrace faulted on writing ");
449 FTRACE_WARN_ON_ONCE(1);
450 pr_info("ftrace faulted on unknown error ");
457 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
459 unsigned long ftrace_addr;
460 unsigned long ip, fl;
462 ftrace_addr = (unsigned long)FTRACE_ADDR;
467 * If this record is not to be traced and
468 * it is not enabled then do nothing.
470 * If this record is not to be traced and
471 * it is enabled then disable it.
474 if (rec->flags & FTRACE_FL_NOTRACE) {
475 if (rec->flags & FTRACE_FL_ENABLED)
476 rec->flags &= ~FTRACE_FL_ENABLED;
480 } else if (ftrace_filtered && enable) {
485 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
487 /* Record is filtered and enabled, do nothing */
488 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
491 /* Record is not filtered or enabled, do nothing */
495 /* Record is not filtered but enabled, disable it */
496 if (fl == FTRACE_FL_ENABLED)
497 rec->flags &= ~FTRACE_FL_ENABLED;
499 /* Otherwise record is filtered but not enabled, enable it */
500 rec->flags |= FTRACE_FL_ENABLED;
502 /* Disable or not filtered */
505 /* if record is enabled, do nothing */
506 if (rec->flags & FTRACE_FL_ENABLED)
509 rec->flags |= FTRACE_FL_ENABLED;
513 /* if record is not enabled, do nothing */
514 if (!(rec->flags & FTRACE_FL_ENABLED))
517 rec->flags &= ~FTRACE_FL_ENABLED;
521 if (rec->flags & FTRACE_FL_ENABLED)
522 return ftrace_make_call(rec, ftrace_addr);
524 return ftrace_make_nop(NULL, rec, ftrace_addr);
527 static void ftrace_replace_code(int enable)
529 struct dyn_ftrace *rec;
530 struct ftrace_page *pg;
533 do_for_each_ftrace_rec(pg, rec) {
535 * Skip over free records, records that have
536 * failed and not converted.
538 if (rec->flags & FTRACE_FL_FREE ||
539 rec->flags & FTRACE_FL_FAILED ||
540 rec->flags & FTRACE_FL_CONVERTED)
543 /* ignore updates to this record's mcount site */
544 if (get_kprobe((void *)rec->ip)) {
548 unfreeze_record(rec);
551 failed = __ftrace_replace_code(rec, enable);
553 rec->flags |= FTRACE_FL_FAILED;
554 if ((system_state == SYSTEM_BOOTING) ||
555 !core_kernel_text(rec->ip)) {
556 ftrace_free_rec(rec);
558 ftrace_bug(failed, rec->ip);
559 /* Stop processing */
563 } while_for_each_ftrace_rec();
567 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
574 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
577 rec->flags |= FTRACE_FL_FAILED;
584 * archs can override this function if they must do something
585 * before the modifying code is performed.
587 int __weak ftrace_arch_code_modify_prepare(void)
593 * archs can override this function if they must do something
594 * after the modifying code is performed.
596 int __weak ftrace_arch_code_modify_post_process(void)
601 static int __ftrace_modify_code(void *data)
605 if (*command & FTRACE_ENABLE_CALLS)
606 ftrace_replace_code(1);
607 else if (*command & FTRACE_DISABLE_CALLS)
608 ftrace_replace_code(0);
610 if (*command & FTRACE_UPDATE_TRACE_FUNC)
611 ftrace_update_ftrace_func(ftrace_trace_function);
613 if (*command & FTRACE_START_FUNC_RET)
614 ftrace_enable_ftrace_graph_caller();
615 else if (*command & FTRACE_STOP_FUNC_RET)
616 ftrace_disable_ftrace_graph_caller();
621 static void ftrace_run_update_code(int command)
625 ret = ftrace_arch_code_modify_prepare();
630 stop_machine(__ftrace_modify_code, &command, NULL);
632 ret = ftrace_arch_code_modify_post_process();
636 static ftrace_func_t saved_ftrace_func;
637 static int ftrace_start_up;
639 static void ftrace_startup_enable(int command)
641 if (saved_ftrace_func != ftrace_trace_function) {
642 saved_ftrace_func = ftrace_trace_function;
643 command |= FTRACE_UPDATE_TRACE_FUNC;
646 if (!command || !ftrace_enabled)
649 ftrace_run_update_code(command);
652 static void ftrace_startup(int command)
654 if (unlikely(ftrace_disabled))
658 command |= FTRACE_ENABLE_CALLS;
660 ftrace_startup_enable(command);
663 static void ftrace_shutdown(int command)
665 if (unlikely(ftrace_disabled))
669 if (!ftrace_start_up)
670 command |= FTRACE_DISABLE_CALLS;
672 if (saved_ftrace_func != ftrace_trace_function) {
673 saved_ftrace_func = ftrace_trace_function;
674 command |= FTRACE_UPDATE_TRACE_FUNC;
677 if (!command || !ftrace_enabled)
680 ftrace_run_update_code(command);
683 static void ftrace_startup_sysctl(void)
685 int command = FTRACE_ENABLE_MCOUNT;
687 if (unlikely(ftrace_disabled))
690 /* Force update next time */
691 saved_ftrace_func = NULL;
692 /* ftrace_start_up is true if we want ftrace running */
694 command |= FTRACE_ENABLE_CALLS;
696 ftrace_run_update_code(command);
699 static void ftrace_shutdown_sysctl(void)
701 int command = FTRACE_DISABLE_MCOUNT;
703 if (unlikely(ftrace_disabled))
706 /* ftrace_start_up is true if ftrace is running */
708 command |= FTRACE_DISABLE_CALLS;
710 ftrace_run_update_code(command);
713 static cycle_t ftrace_update_time;
714 static unsigned long ftrace_update_cnt;
715 unsigned long ftrace_update_tot_cnt;
717 static int ftrace_update_code(struct module *mod)
719 struct dyn_ftrace *p;
722 start = ftrace_now(raw_smp_processor_id());
723 ftrace_update_cnt = 0;
725 while (ftrace_new_addrs) {
727 /* If something went wrong, bail without enabling anything */
728 if (unlikely(ftrace_disabled))
731 p = ftrace_new_addrs;
732 ftrace_new_addrs = (struct dyn_ftrace *)p->flags;
735 /* convert record (i.e, patch mcount-call with NOP) */
736 if (ftrace_code_disable(mod, p)) {
737 p->flags |= FTRACE_FL_CONVERTED;
743 stop = ftrace_now(raw_smp_processor_id());
744 ftrace_update_time = stop - start;
745 ftrace_update_tot_cnt += ftrace_update_cnt;
750 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
752 struct ftrace_page *pg;
756 /* allocate a few pages */
757 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
758 if (!ftrace_pages_start)
762 * Allocate a few more pages.
764 * TODO: have some parser search vmlinux before
765 * final linking to find all calls to ftrace.
767 * a) know how many pages to allocate.
769 * b) set up the table then.
771 * The dynamic code is still necessary for
775 pg = ftrace_pages = ftrace_pages_start;
777 cnt = num_to_init / ENTRIES_PER_PAGE;
778 pr_info("ftrace: allocating %ld entries in %d pages\n",
779 num_to_init, cnt + 1);
781 for (i = 0; i < cnt; i++) {
782 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
784 /* If we fail, we'll try later anyway */
795 FTRACE_ITER_FILTER = (1 << 0),
796 FTRACE_ITER_CONT = (1 << 1),
797 FTRACE_ITER_NOTRACE = (1 << 2),
798 FTRACE_ITER_FAILURES = (1 << 3),
799 FTRACE_ITER_PRINTALL = (1 << 4),
800 FTRACE_ITER_HASH = (1 << 5),
803 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
805 struct ftrace_iterator {
806 struct ftrace_page *pg;
810 unsigned char buffer[FTRACE_BUFF_MAX+1];
816 t_hash_next(struct seq_file *m, void *v, loff_t *pos)
818 struct ftrace_iterator *iter = m->private;
819 struct hlist_node *hnd = v;
820 struct hlist_head *hhd;
822 WARN_ON(!(iter->flags & FTRACE_ITER_HASH));
827 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
830 hhd = &ftrace_func_hash[iter->hidx];
832 if (hlist_empty(hhd)) {
851 static void *t_hash_start(struct seq_file *m, loff_t *pos)
853 struct ftrace_iterator *iter = m->private;
856 iter->flags |= FTRACE_ITER_HASH;
858 return t_hash_next(m, p, pos);
861 static int t_hash_show(struct seq_file *m, void *v)
863 struct ftrace_func_probe *rec;
864 struct hlist_node *hnd = v;
865 char str[KSYM_SYMBOL_LEN];
867 rec = hlist_entry(hnd, struct ftrace_func_probe, node);
870 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
872 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
873 seq_printf(m, "%s:", str);
875 kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str);
876 seq_printf(m, "%s", str);
879 seq_printf(m, ":%p", rec->data);
886 t_next(struct seq_file *m, void *v, loff_t *pos)
888 struct ftrace_iterator *iter = m->private;
889 struct dyn_ftrace *rec = NULL;
891 if (iter->flags & FTRACE_ITER_HASH)
892 return t_hash_next(m, v, pos);
896 if (iter->flags & FTRACE_ITER_PRINTALL)
900 if (iter->idx >= iter->pg->index) {
901 if (iter->pg->next) {
902 iter->pg = iter->pg->next;
909 rec = &iter->pg->records[iter->idx++];
910 if ((rec->flags & FTRACE_FL_FREE) ||
912 (!(iter->flags & FTRACE_ITER_FAILURES) &&
913 (rec->flags & FTRACE_FL_FAILED)) ||
915 ((iter->flags & FTRACE_ITER_FAILURES) &&
916 !(rec->flags & FTRACE_FL_FAILED)) ||
918 ((iter->flags & FTRACE_ITER_FILTER) &&
919 !(rec->flags & FTRACE_FL_FILTER)) ||
921 ((iter->flags & FTRACE_ITER_NOTRACE) &&
922 !(rec->flags & FTRACE_FL_NOTRACE))) {
931 static void *t_start(struct seq_file *m, loff_t *pos)
933 struct ftrace_iterator *iter = m->private;
936 mutex_lock(&ftrace_lock);
938 * For set_ftrace_filter reading, if we have the filter
939 * off, we can short cut and just print out that all
940 * functions are enabled.
942 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
944 return t_hash_start(m, pos);
945 iter->flags |= FTRACE_ITER_PRINTALL;
950 if (iter->flags & FTRACE_ITER_HASH)
951 return t_hash_start(m, pos);
960 p = t_next(m, p, pos);
963 return t_hash_start(m, pos);
968 static void t_stop(struct seq_file *m, void *p)
970 mutex_unlock(&ftrace_lock);
973 static int t_show(struct seq_file *m, void *v)
975 struct ftrace_iterator *iter = m->private;
976 struct dyn_ftrace *rec = v;
977 char str[KSYM_SYMBOL_LEN];
979 if (iter->flags & FTRACE_ITER_HASH)
980 return t_hash_show(m, v);
982 if (iter->flags & FTRACE_ITER_PRINTALL) {
983 seq_printf(m, "#### all functions enabled ####\n");
990 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
992 seq_printf(m, "%s\n", str);
997 static struct seq_operations show_ftrace_seq_ops = {
1005 ftrace_avail_open(struct inode *inode, struct file *file)
1007 struct ftrace_iterator *iter;
1010 if (unlikely(ftrace_disabled))
1013 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1017 iter->pg = ftrace_pages_start;
1019 ret = seq_open(file, &show_ftrace_seq_ops);
1021 struct seq_file *m = file->private_data;
1031 int ftrace_avail_release(struct inode *inode, struct file *file)
1033 struct seq_file *m = (struct seq_file *)file->private_data;
1034 struct ftrace_iterator *iter = m->private;
1036 seq_release(inode, file);
1043 ftrace_failures_open(struct inode *inode, struct file *file)
1047 struct ftrace_iterator *iter;
1049 ret = ftrace_avail_open(inode, file);
1051 m = (struct seq_file *)file->private_data;
1052 iter = (struct ftrace_iterator *)m->private;
1053 iter->flags = FTRACE_ITER_FAILURES;
1060 static void ftrace_filter_reset(int enable)
1062 struct ftrace_page *pg;
1063 struct dyn_ftrace *rec;
1064 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1066 mutex_lock(&ftrace_lock);
1068 ftrace_filtered = 0;
1069 do_for_each_ftrace_rec(pg, rec) {
1070 if (rec->flags & FTRACE_FL_FAILED)
1072 rec->flags &= ~type;
1073 } while_for_each_ftrace_rec();
1074 mutex_unlock(&ftrace_lock);
1078 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1080 struct ftrace_iterator *iter;
1083 if (unlikely(ftrace_disabled))
1086 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1090 mutex_lock(&ftrace_regex_lock);
1091 if ((file->f_mode & FMODE_WRITE) &&
1092 !(file->f_flags & O_APPEND))
1093 ftrace_filter_reset(enable);
1095 if (file->f_mode & FMODE_READ) {
1096 iter->pg = ftrace_pages_start;
1097 iter->flags = enable ? FTRACE_ITER_FILTER :
1098 FTRACE_ITER_NOTRACE;
1100 ret = seq_open(file, &show_ftrace_seq_ops);
1102 struct seq_file *m = file->private_data;
1107 file->private_data = iter;
1108 mutex_unlock(&ftrace_regex_lock);
1114 ftrace_filter_open(struct inode *inode, struct file *file)
1116 return ftrace_regex_open(inode, file, 1);
1120 ftrace_notrace_open(struct inode *inode, struct file *file)
1122 return ftrace_regex_open(inode, file, 0);
1126 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1130 if (file->f_mode & FMODE_READ)
1131 ret = seq_lseek(file, offset, origin);
1133 file->f_pos = ret = 1;
1146 * (static function - no need for kernel doc)
1148 * Pass in a buffer containing a glob and this function will
1149 * set search to point to the search part of the buffer and
1150 * return the type of search it is (see enum above).
1151 * This does modify buff.
1153 * Returns enum type.
1154 * search returns the pointer to use for comparison.
1155 * not returns 1 if buff started with a '!'
1159 ftrace_setup_glob(char *buff, int len, char **search, int *not)
1161 int type = MATCH_FULL;
1164 if (buff[0] == '!') {
1173 for (i = 0; i < len; i++) {
1174 if (buff[i] == '*') {
1177 type = MATCH_END_ONLY;
1179 if (type == MATCH_END_ONLY)
1180 type = MATCH_MIDDLE_ONLY;
1182 type = MATCH_FRONT_ONLY;
1192 static int ftrace_match(char *str, char *regex, int len, int type)
1199 if (strcmp(str, regex) == 0)
1202 case MATCH_FRONT_ONLY:
1203 if (strncmp(str, regex, len) == 0)
1206 case MATCH_MIDDLE_ONLY:
1207 if (strstr(str, regex))
1210 case MATCH_END_ONLY:
1211 ptr = strstr(str, regex);
1212 if (ptr && (ptr[len] == 0))
1221 ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1223 char str[KSYM_SYMBOL_LEN];
1225 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1226 return ftrace_match(str, regex, len, type);
1229 static void ftrace_match_records(char *buff, int len, int enable)
1231 unsigned int search_len;
1232 struct ftrace_page *pg;
1233 struct dyn_ftrace *rec;
1239 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1240 type = ftrace_setup_glob(buff, len, &search, ¬);
1242 search_len = strlen(search);
1244 mutex_lock(&ftrace_lock);
1245 do_for_each_ftrace_rec(pg, rec) {
1247 if (rec->flags & FTRACE_FL_FAILED)
1250 if (ftrace_match_record(rec, search, search_len, type)) {
1252 rec->flags &= ~flag;
1257 * Only enable filtering if we have a function that
1260 if (enable && (rec->flags & FTRACE_FL_FILTER))
1261 ftrace_filtered = 1;
1262 } while_for_each_ftrace_rec();
1263 mutex_unlock(&ftrace_lock);
1267 ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1268 char *regex, int len, int type)
1270 char str[KSYM_SYMBOL_LEN];
1273 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1275 if (!modname || strcmp(modname, mod))
1278 /* blank search means to match all funcs in the mod */
1280 return ftrace_match(str, regex, len, type);
1285 static void ftrace_match_module_records(char *buff, char *mod, int enable)
1287 unsigned search_len = 0;
1288 struct ftrace_page *pg;
1289 struct dyn_ftrace *rec;
1290 int type = MATCH_FULL;
1291 char *search = buff;
1295 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1297 /* blank or '*' mean the same */
1298 if (strcmp(buff, "*") == 0)
1301 /* handle the case of 'dont filter this module' */
1302 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1308 type = ftrace_setup_glob(buff, strlen(buff), &search, ¬);
1309 search_len = strlen(search);
1312 mutex_lock(&ftrace_lock);
1313 do_for_each_ftrace_rec(pg, rec) {
1315 if (rec->flags & FTRACE_FL_FAILED)
1318 if (ftrace_match_module_record(rec, mod,
1319 search, search_len, type)) {
1321 rec->flags &= ~flag;
1325 if (enable && (rec->flags & FTRACE_FL_FILTER))
1326 ftrace_filtered = 1;
1328 } while_for_each_ftrace_rec();
1329 mutex_unlock(&ftrace_lock);
1333 * We register the module command as a template to show others how
1334 * to register the a command as well.
1338 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1343 * cmd == 'mod' because we only registered this func
1344 * for the 'mod' ftrace_func_command.
1345 * But if you register one func with multiple commands,
1346 * you can tell which command was used by the cmd
1350 /* we must have a module name */
1354 mod = strsep(¶m, ":");
1358 ftrace_match_module_records(func, mod, enable);
1362 static struct ftrace_func_command ftrace_mod_cmd = {
1364 .func = ftrace_mod_callback,
1367 static int __init ftrace_mod_cmd_init(void)
1369 return register_ftrace_command(&ftrace_mod_cmd);
1371 device_initcall(ftrace_mod_cmd_init);
1374 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
1376 struct ftrace_func_probe *entry;
1377 struct hlist_head *hhd;
1378 struct hlist_node *n;
1382 key = hash_long(ip, FTRACE_HASH_BITS);
1384 hhd = &ftrace_func_hash[key];
1386 if (hlist_empty(hhd))
1390 * Disable preemption for these calls to prevent a RCU grace
1391 * period. This syncs the hash iteration and freeing of items
1392 * on the hash. rcu_read_lock is too dangerous here.
1394 resched = ftrace_preempt_disable();
1395 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1396 if (entry->ip == ip)
1397 entry->ops->func(ip, parent_ip, &entry->data);
1399 ftrace_preempt_enable(resched);
1402 static struct ftrace_ops trace_probe_ops __read_mostly =
1404 .func = function_trace_probe_call,
1407 static int ftrace_probe_registered;
1409 static void __enable_ftrace_function_probe(void)
1413 if (ftrace_probe_registered)
1416 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1417 struct hlist_head *hhd = &ftrace_func_hash[i];
1421 /* Nothing registered? */
1422 if (i == FTRACE_FUNC_HASHSIZE)
1425 __register_ftrace_function(&trace_probe_ops);
1427 ftrace_probe_registered = 1;
1430 static void __disable_ftrace_function_probe(void)
1434 if (!ftrace_probe_registered)
1437 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1438 struct hlist_head *hhd = &ftrace_func_hash[i];
1443 /* no more funcs left */
1444 __unregister_ftrace_function(&trace_probe_ops);
1446 ftrace_probe_registered = 0;
1450 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1452 struct ftrace_func_probe *entry =
1453 container_of(rhp, struct ftrace_func_probe, rcu);
1455 if (entry->ops->free)
1456 entry->ops->free(&entry->data);
1462 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1465 struct ftrace_func_probe *entry;
1466 struct ftrace_page *pg;
1467 struct dyn_ftrace *rec;
1473 type = ftrace_setup_glob(glob, strlen(glob), &search, ¬);
1474 len = strlen(search);
1476 /* we do not support '!' for function probes */
1480 mutex_lock(&ftrace_lock);
1481 do_for_each_ftrace_rec(pg, rec) {
1483 if (rec->flags & FTRACE_FL_FAILED)
1486 if (!ftrace_match_record(rec, search, len, type))
1489 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1491 /* If we did not process any, then return error */
1502 * The caller might want to do something special
1503 * for each function we find. We call the callback
1504 * to give the caller an opportunity to do so.
1506 if (ops->callback) {
1507 if (ops->callback(rec->ip, &entry->data) < 0) {
1508 /* caller does not like this func */
1515 entry->ip = rec->ip;
1517 key = hash_long(entry->ip, FTRACE_HASH_BITS);
1518 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
1520 } while_for_each_ftrace_rec();
1521 __enable_ftrace_function_probe();
1524 mutex_unlock(&ftrace_lock);
1530 PROBE_TEST_FUNC = 1,
1535 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1536 void *data, int flags)
1538 struct ftrace_func_probe *entry;
1539 struct hlist_node *n, *tmp;
1540 char str[KSYM_SYMBOL_LEN];
1541 int type = MATCH_FULL;
1545 if (glob && (strcmp(glob, "*") || !strlen(glob)))
1550 type = ftrace_setup_glob(glob, strlen(glob), &search, ¬);
1551 len = strlen(search);
1553 /* we do not support '!' for function probes */
1558 mutex_lock(&ftrace_lock);
1559 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1560 struct hlist_head *hhd = &ftrace_func_hash[i];
1562 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
1564 /* break up if statements for readability */
1565 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
1568 if ((flags & PROBE_TEST_DATA) && entry->data != data)
1571 /* do this last, since it is the most expensive */
1573 kallsyms_lookup(entry->ip, NULL, NULL,
1575 if (!ftrace_match(str, glob, len, type))
1579 hlist_del(&entry->node);
1580 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
1583 __disable_ftrace_function_probe();
1584 mutex_unlock(&ftrace_lock);
1588 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1591 __unregister_ftrace_function_probe(glob, ops, data,
1592 PROBE_TEST_FUNC | PROBE_TEST_DATA);
1596 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
1598 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
1601 void unregister_ftrace_function_probe_all(char *glob)
1603 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
1606 static LIST_HEAD(ftrace_commands);
1607 static DEFINE_MUTEX(ftrace_cmd_mutex);
1609 int register_ftrace_command(struct ftrace_func_command *cmd)
1611 struct ftrace_func_command *p;
1614 mutex_lock(&ftrace_cmd_mutex);
1615 list_for_each_entry(p, &ftrace_commands, list) {
1616 if (strcmp(cmd->name, p->name) == 0) {
1621 list_add(&cmd->list, &ftrace_commands);
1623 mutex_unlock(&ftrace_cmd_mutex);
1628 int unregister_ftrace_command(struct ftrace_func_command *cmd)
1630 struct ftrace_func_command *p, *n;
1633 mutex_lock(&ftrace_cmd_mutex);
1634 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
1635 if (strcmp(cmd->name, p->name) == 0) {
1637 list_del_init(&p->list);
1642 mutex_unlock(&ftrace_cmd_mutex);
1647 static int ftrace_process_regex(char *buff, int len, int enable)
1649 char *func, *command, *next = buff;
1650 struct ftrace_func_command *p;
1653 func = strsep(&next, ":");
1656 ftrace_match_records(func, len, enable);
1662 command = strsep(&next, ":");
1664 mutex_lock(&ftrace_cmd_mutex);
1665 list_for_each_entry(p, &ftrace_commands, list) {
1666 if (strcmp(p->name, command) == 0) {
1667 ret = p->func(func, command, next, enable);
1672 mutex_unlock(&ftrace_cmd_mutex);
1678 ftrace_regex_write(struct file *file, const char __user *ubuf,
1679 size_t cnt, loff_t *ppos, int enable)
1681 struct ftrace_iterator *iter;
1686 if (!cnt || cnt < 0)
1689 mutex_lock(&ftrace_regex_lock);
1691 if (file->f_mode & FMODE_READ) {
1692 struct seq_file *m = file->private_data;
1695 iter = file->private_data;
1698 iter->flags &= ~FTRACE_ITER_CONT;
1699 iter->buffer_idx = 0;
1702 ret = get_user(ch, ubuf++);
1708 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1709 /* skip white space */
1710 while (cnt && isspace(ch)) {
1711 ret = get_user(ch, ubuf++);
1719 file->f_pos += read;
1724 iter->buffer_idx = 0;
1727 while (cnt && !isspace(ch)) {
1728 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1729 iter->buffer[iter->buffer_idx++] = ch;
1734 ret = get_user(ch, ubuf++);
1743 iter->buffer[iter->buffer_idx] = 0;
1744 ret = ftrace_process_regex(iter->buffer,
1745 iter->buffer_idx, enable);
1748 iter->buffer_idx = 0;
1750 iter->flags |= FTRACE_ITER_CONT;
1753 file->f_pos += read;
1757 mutex_unlock(&ftrace_regex_lock);
1763 ftrace_filter_write(struct file *file, const char __user *ubuf,
1764 size_t cnt, loff_t *ppos)
1766 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1770 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1771 size_t cnt, loff_t *ppos)
1773 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1777 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1779 if (unlikely(ftrace_disabled))
1782 mutex_lock(&ftrace_regex_lock);
1784 ftrace_filter_reset(enable);
1786 ftrace_match_records(buf, len, enable);
1787 mutex_unlock(&ftrace_regex_lock);
1791 * ftrace_set_filter - set a function to filter on in ftrace
1792 * @buf - the string that holds the function filter text.
1793 * @len - the length of the string.
1794 * @reset - non zero to reset all filters before applying this filter.
1796 * Filters denote which functions should be enabled when tracing is enabled.
1797 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1799 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1801 ftrace_set_regex(buf, len, reset, 1);
1805 * ftrace_set_notrace - set a function to not trace in ftrace
1806 * @buf - the string that holds the function notrace text.
1807 * @len - the length of the string.
1808 * @reset - non zero to reset all filters before applying this filter.
1810 * Notrace Filters denote which functions should not be enabled when tracing
1811 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1814 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1816 ftrace_set_regex(buf, len, reset, 0);
1820 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1822 struct seq_file *m = (struct seq_file *)file->private_data;
1823 struct ftrace_iterator *iter;
1825 mutex_lock(&ftrace_regex_lock);
1826 if (file->f_mode & FMODE_READ) {
1829 seq_release(inode, file);
1831 iter = file->private_data;
1833 if (iter->buffer_idx) {
1835 iter->buffer[iter->buffer_idx] = 0;
1836 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
1839 mutex_lock(&ftrace_lock);
1840 if (ftrace_start_up && ftrace_enabled)
1841 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1842 mutex_unlock(&ftrace_lock);
1845 mutex_unlock(&ftrace_regex_lock);
1850 ftrace_filter_release(struct inode *inode, struct file *file)
1852 return ftrace_regex_release(inode, file, 1);
1856 ftrace_notrace_release(struct inode *inode, struct file *file)
1858 return ftrace_regex_release(inode, file, 0);
1861 static const struct file_operations ftrace_avail_fops = {
1862 .open = ftrace_avail_open,
1864 .llseek = seq_lseek,
1865 .release = ftrace_avail_release,
1868 static const struct file_operations ftrace_failures_fops = {
1869 .open = ftrace_failures_open,
1871 .llseek = seq_lseek,
1872 .release = ftrace_avail_release,
1875 static const struct file_operations ftrace_filter_fops = {
1876 .open = ftrace_filter_open,
1878 .write = ftrace_filter_write,
1879 .llseek = ftrace_regex_lseek,
1880 .release = ftrace_filter_release,
1883 static const struct file_operations ftrace_notrace_fops = {
1884 .open = ftrace_notrace_open,
1886 .write = ftrace_notrace_write,
1887 .llseek = ftrace_regex_lseek,
1888 .release = ftrace_notrace_release,
1891 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1893 static DEFINE_MUTEX(graph_lock);
1895 int ftrace_graph_count;
1896 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1899 g_next(struct seq_file *m, void *v, loff_t *pos)
1901 unsigned long *array = m->private;
1906 if (index >= ftrace_graph_count)
1909 return &array[index];
1912 static void *g_start(struct seq_file *m, loff_t *pos)
1916 mutex_lock(&graph_lock);
1918 /* Nothing, tell g_show to print all functions are enabled */
1919 if (!ftrace_graph_count && !*pos)
1922 p = g_next(m, p, pos);
1927 static void g_stop(struct seq_file *m, void *p)
1929 mutex_unlock(&graph_lock);
1932 static int g_show(struct seq_file *m, void *v)
1934 unsigned long *ptr = v;
1935 char str[KSYM_SYMBOL_LEN];
1940 if (ptr == (unsigned long *)1) {
1941 seq_printf(m, "#### all functions enabled ####\n");
1945 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1947 seq_printf(m, "%s\n", str);
1952 static struct seq_operations ftrace_graph_seq_ops = {
1960 ftrace_graph_open(struct inode *inode, struct file *file)
1964 if (unlikely(ftrace_disabled))
1967 mutex_lock(&graph_lock);
1968 if ((file->f_mode & FMODE_WRITE) &&
1969 !(file->f_flags & O_APPEND)) {
1970 ftrace_graph_count = 0;
1971 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1974 if (file->f_mode & FMODE_READ) {
1975 ret = seq_open(file, &ftrace_graph_seq_ops);
1977 struct seq_file *m = file->private_data;
1978 m->private = ftrace_graph_funcs;
1981 file->private_data = ftrace_graph_funcs;
1982 mutex_unlock(&graph_lock);
1988 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
1990 struct dyn_ftrace *rec;
1991 struct ftrace_page *pg;
1999 if (ftrace_disabled)
2003 type = ftrace_setup_glob(buffer, strlen(buffer), &search, ¬);
2007 search_len = strlen(search);
2009 mutex_lock(&ftrace_lock);
2010 do_for_each_ftrace_rec(pg, rec) {
2012 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2015 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2018 if (ftrace_match_record(rec, search, search_len, type)) {
2019 /* ensure it is not already in the array */
2021 for (i = 0; i < *idx; i++)
2022 if (array[i] == rec->ip) {
2027 array[(*idx)++] = rec->ip;
2031 } while_for_each_ftrace_rec();
2033 mutex_unlock(&ftrace_lock);
2035 return found ? 0 : -EINVAL;
2039 ftrace_graph_write(struct file *file, const char __user *ubuf,
2040 size_t cnt, loff_t *ppos)
2042 unsigned char buffer[FTRACE_BUFF_MAX+1];
2043 unsigned long *array;
2049 if (!cnt || cnt < 0)
2052 mutex_lock(&graph_lock);
2054 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2059 if (file->f_mode & FMODE_READ) {
2060 struct seq_file *m = file->private_data;
2063 array = file->private_data;
2065 ret = get_user(ch, ubuf++);
2071 /* skip white space */
2072 while (cnt && isspace(ch)) {
2073 ret = get_user(ch, ubuf++);
2086 while (cnt && !isspace(ch)) {
2087 if (index < FTRACE_BUFF_MAX)
2088 buffer[index++] = ch;
2093 ret = get_user(ch, ubuf++);
2101 /* we allow only one expression at a time */
2102 ret = ftrace_set_func(array, &ftrace_graph_count, buffer);
2106 file->f_pos += read;
2110 mutex_unlock(&graph_lock);
2115 static const struct file_operations ftrace_graph_fops = {
2116 .open = ftrace_graph_open,
2118 .write = ftrace_graph_write,
2120 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2122 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2124 struct dentry *entry;
2126 entry = debugfs_create_file("available_filter_functions", 0444,
2127 d_tracer, NULL, &ftrace_avail_fops);
2129 pr_warning("Could not create debugfs "
2130 "'available_filter_functions' entry\n");
2132 entry = debugfs_create_file("failures", 0444,
2133 d_tracer, NULL, &ftrace_failures_fops);
2135 pr_warning("Could not create debugfs 'failures' entry\n");
2137 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
2138 NULL, &ftrace_filter_fops);
2140 pr_warning("Could not create debugfs "
2141 "'set_ftrace_filter' entry\n");
2143 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
2144 NULL, &ftrace_notrace_fops);
2146 pr_warning("Could not create debugfs "
2147 "'set_ftrace_notrace' entry\n");
2149 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2150 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
2152 &ftrace_graph_fops);
2154 pr_warning("Could not create debugfs "
2155 "'set_graph_function' entry\n");
2156 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2161 static int ftrace_convert_nops(struct module *mod,
2162 unsigned long *start,
2167 unsigned long flags;
2169 mutex_lock(&ftrace_lock);
2172 addr = ftrace_call_adjust(*p++);
2174 * Some architecture linkers will pad between
2175 * the different mcount_loc sections of different
2176 * object files to satisfy alignments.
2177 * Skip any NULL pointers.
2181 ftrace_record_ip(addr);
2184 /* disable interrupts to prevent kstop machine */
2185 local_irq_save(flags);
2186 ftrace_update_code(mod);
2187 local_irq_restore(flags);
2188 mutex_unlock(&ftrace_lock);
2193 void ftrace_init_module(struct module *mod,
2194 unsigned long *start, unsigned long *end)
2196 if (ftrace_disabled || start == end)
2198 ftrace_convert_nops(mod, start, end);
2201 extern unsigned long __start_mcount_loc[];
2202 extern unsigned long __stop_mcount_loc[];
2204 void __init ftrace_init(void)
2206 unsigned long count, addr, flags;
2209 /* Keep the ftrace pointer to the stub */
2210 addr = (unsigned long)ftrace_stub;
2212 local_irq_save(flags);
2213 ftrace_dyn_arch_init(&addr);
2214 local_irq_restore(flags);
2216 /* ftrace_dyn_arch_init places the return code in addr */
2220 count = __stop_mcount_loc - __start_mcount_loc;
2222 ret = ftrace_dyn_table_alloc(count);
2226 last_ftrace_enabled = ftrace_enabled = 1;
2228 ret = ftrace_convert_nops(NULL,
2234 ftrace_disabled = 1;
2239 static int __init ftrace_nodyn_init(void)
2244 device_initcall(ftrace_nodyn_init);
2246 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2247 static inline void ftrace_startup_enable(int command) { }
2248 /* Keep as macros so we do not need to define the commands */
2249 # define ftrace_startup(command) do { } while (0)
2250 # define ftrace_shutdown(command) do { } while (0)
2251 # define ftrace_startup_sysctl() do { } while (0)
2252 # define ftrace_shutdown_sysctl() do { } while (0)
2253 #endif /* CONFIG_DYNAMIC_FTRACE */
2256 ftrace_pid_read(struct file *file, char __user *ubuf,
2257 size_t cnt, loff_t *ppos)
2262 if (ftrace_pid_trace == ftrace_swapper_pid)
2263 r = sprintf(buf, "swapper tasks\n");
2264 else if (ftrace_pid_trace)
2265 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
2267 r = sprintf(buf, "no pid\n");
2269 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2272 static void clear_ftrace_swapper(void)
2274 struct task_struct *p;
2278 for_each_online_cpu(cpu) {
2280 clear_tsk_trace_trace(p);
2285 static void set_ftrace_swapper(void)
2287 struct task_struct *p;
2291 for_each_online_cpu(cpu) {
2293 set_tsk_trace_trace(p);
2298 static void clear_ftrace_pid(struct pid *pid)
2300 struct task_struct *p;
2303 do_each_pid_task(pid, PIDTYPE_PID, p) {
2304 clear_tsk_trace_trace(p);
2305 } while_each_pid_task(pid, PIDTYPE_PID, p);
2311 static void set_ftrace_pid(struct pid *pid)
2313 struct task_struct *p;
2316 do_each_pid_task(pid, PIDTYPE_PID, p) {
2317 set_tsk_trace_trace(p);
2318 } while_each_pid_task(pid, PIDTYPE_PID, p);
2322 static void clear_ftrace_pid_task(struct pid **pid)
2324 if (*pid == ftrace_swapper_pid)
2325 clear_ftrace_swapper();
2327 clear_ftrace_pid(*pid);
2332 static void set_ftrace_pid_task(struct pid *pid)
2334 if (pid == ftrace_swapper_pid)
2335 set_ftrace_swapper();
2337 set_ftrace_pid(pid);
2341 ftrace_pid_write(struct file *filp, const char __user *ubuf,
2342 size_t cnt, loff_t *ppos)
2349 if (cnt >= sizeof(buf))
2352 if (copy_from_user(&buf, ubuf, cnt))
2357 ret = strict_strtol(buf, 10, &val);
2361 mutex_lock(&ftrace_lock);
2363 /* disable pid tracing */
2364 if (!ftrace_pid_trace)
2367 clear_ftrace_pid_task(&ftrace_pid_trace);
2370 /* swapper task is special */
2372 pid = ftrace_swapper_pid;
2373 if (pid == ftrace_pid_trace)
2376 pid = find_get_pid(val);
2378 if (pid == ftrace_pid_trace) {
2384 if (ftrace_pid_trace)
2385 clear_ftrace_pid_task(&ftrace_pid_trace);
2390 ftrace_pid_trace = pid;
2392 set_ftrace_pid_task(ftrace_pid_trace);
2395 /* update the function call */
2396 ftrace_update_pid_func();
2397 ftrace_startup_enable(0);
2400 mutex_unlock(&ftrace_lock);
2405 static const struct file_operations ftrace_pid_fops = {
2406 .read = ftrace_pid_read,
2407 .write = ftrace_pid_write,
2410 static __init int ftrace_init_debugfs(void)
2412 struct dentry *d_tracer;
2413 struct dentry *entry;
2415 d_tracer = tracing_init_dentry();
2419 ftrace_init_dyn_debugfs(d_tracer);
2421 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
2422 NULL, &ftrace_pid_fops);
2424 pr_warning("Could not create debugfs "
2425 "'set_ftrace_pid' entry\n");
2428 fs_initcall(ftrace_init_debugfs);
2431 * ftrace_kill - kill ftrace
2433 * This function should be used by panic code. It stops ftrace
2434 * but in a not so nice way. If you need to simply kill ftrace
2435 * from a non-atomic section, use ftrace_kill.
2437 void ftrace_kill(void)
2439 ftrace_disabled = 1;
2441 clear_ftrace_function();
2445 * register_ftrace_function - register a function for profiling
2446 * @ops - ops structure that holds the function for profiling.
2448 * Register a function to be called by all functions in the
2451 * Note: @ops->func and all the functions it calls must be labeled
2452 * with "notrace", otherwise it will go into a
2455 int register_ftrace_function(struct ftrace_ops *ops)
2459 if (unlikely(ftrace_disabled))
2462 mutex_lock(&ftrace_lock);
2464 ret = __register_ftrace_function(ops);
2467 mutex_unlock(&ftrace_lock);
2472 * unregister_ftrace_function - unregister a function for profiling.
2473 * @ops - ops structure that holds the function to unregister
2475 * Unregister a function that was added to be called by ftrace profiling.
2477 int unregister_ftrace_function(struct ftrace_ops *ops)
2481 mutex_lock(&ftrace_lock);
2482 ret = __unregister_ftrace_function(ops);
2484 mutex_unlock(&ftrace_lock);
2490 ftrace_enable_sysctl(struct ctl_table *table, int write,
2491 struct file *file, void __user *buffer, size_t *lenp,
2496 if (unlikely(ftrace_disabled))
2499 mutex_lock(&ftrace_lock);
2501 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
2503 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
2506 last_ftrace_enabled = ftrace_enabled;
2508 if (ftrace_enabled) {
2510 ftrace_startup_sysctl();
2512 /* we are starting ftrace again */
2513 if (ftrace_list != &ftrace_list_end) {
2514 if (ftrace_list->next == &ftrace_list_end)
2515 ftrace_trace_function = ftrace_list->func;
2517 ftrace_trace_function = ftrace_list_func;
2521 /* stopping ftrace calls (just send to ftrace_stub) */
2522 ftrace_trace_function = ftrace_stub;
2524 ftrace_shutdown_sysctl();
2528 mutex_unlock(&ftrace_lock);
2532 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2534 static atomic_t ftrace_graph_active;
2535 static struct notifier_block ftrace_suspend_notifier;
2537 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
2542 /* The callbacks that hook a function */
2543 trace_func_graph_ret_t ftrace_graph_return =
2544 (trace_func_graph_ret_t)ftrace_stub;
2545 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
2547 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2548 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2552 unsigned long flags;
2553 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
2554 struct task_struct *g, *t;
2556 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
2557 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
2558 * sizeof(struct ftrace_ret_stack),
2560 if (!ret_stack_list[i]) {
2568 read_lock_irqsave(&tasklist_lock, flags);
2569 do_each_thread(g, t) {
2575 if (t->ret_stack == NULL) {
2576 t->curr_ret_stack = -1;
2577 /* Make sure IRQs see the -1 first: */
2579 t->ret_stack = ret_stack_list[start++];
2580 atomic_set(&t->tracing_graph_pause, 0);
2581 atomic_set(&t->trace_overrun, 0);
2583 } while_each_thread(g, t);
2586 read_unlock_irqrestore(&tasklist_lock, flags);
2588 for (i = start; i < end; i++)
2589 kfree(ret_stack_list[i]);
2593 /* Allocate a return stack for each task */
2594 static int start_graph_tracing(void)
2596 struct ftrace_ret_stack **ret_stack_list;
2599 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2600 sizeof(struct ftrace_ret_stack *),
2603 if (!ret_stack_list)
2606 /* The cpu_boot init_task->ret_stack will never be freed */
2607 for_each_online_cpu(cpu)
2608 ftrace_graph_init_task(idle_task(cpu));
2611 ret = alloc_retstack_tasklist(ret_stack_list);
2612 } while (ret == -EAGAIN);
2614 kfree(ret_stack_list);
2619 * Hibernation protection.
2620 * The state of the current task is too much unstable during
2621 * suspend/restore to disk. We want to protect against that.
2624 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2628 case PM_HIBERNATION_PREPARE:
2629 pause_graph_tracing();
2632 case PM_POST_HIBERNATION:
2633 unpause_graph_tracing();
2639 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2640 trace_func_graph_ent_t entryfunc)
2644 mutex_lock(&ftrace_lock);
2646 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2647 register_pm_notifier(&ftrace_suspend_notifier);
2649 atomic_inc(&ftrace_graph_active);
2650 ret = start_graph_tracing();
2652 atomic_dec(&ftrace_graph_active);
2656 ftrace_graph_return = retfunc;
2657 ftrace_graph_entry = entryfunc;
2659 ftrace_startup(FTRACE_START_FUNC_RET);
2662 mutex_unlock(&ftrace_lock);
2666 void unregister_ftrace_graph(void)
2668 mutex_lock(&ftrace_lock);
2670 atomic_dec(&ftrace_graph_active);
2671 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2672 ftrace_graph_entry = ftrace_graph_entry_stub;
2673 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2674 unregister_pm_notifier(&ftrace_suspend_notifier);
2676 mutex_unlock(&ftrace_lock);
2679 /* Allocate a return stack for newly created task */
2680 void ftrace_graph_init_task(struct task_struct *t)
2682 if (atomic_read(&ftrace_graph_active)) {
2683 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2684 * sizeof(struct ftrace_ret_stack),
2688 t->curr_ret_stack = -1;
2689 atomic_set(&t->tracing_graph_pause, 0);
2690 atomic_set(&t->trace_overrun, 0);
2692 t->ret_stack = NULL;
2695 void ftrace_graph_exit_task(struct task_struct *t)
2697 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2699 t->ret_stack = NULL;
2700 /* NULL must become visible to IRQs before we free it: */
2706 void ftrace_graph_stop(void)