2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
30 #include <asm/ftrace.h>
34 #define FTRACE_WARN_ON(cond) \
40 #define FTRACE_WARN_ON_ONCE(cond) \
42 if (WARN_ON_ONCE(cond)) \
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly;
48 static int last_ftrace_enabled;
50 /* ftrace_pid_trace >= 0 will only trace threads with this pid */
51 static int ftrace_pid_trace = -1;
53 /* Quick disabling of function tracer. */
54 int function_trace_stop;
57 * ftrace_disabled is set when an anomaly is discovered.
58 * ftrace_disabled is much stronger than ftrace_enabled.
60 static int ftrace_disabled __read_mostly;
62 static DEFINE_SPINLOCK(ftrace_lock);
63 static DEFINE_MUTEX(ftrace_sysctl_lock);
64 static DEFINE_MUTEX(ftrace_start_lock);
66 static struct ftrace_ops ftrace_list_end __read_mostly =
71 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
72 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
73 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
74 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
76 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
78 struct ftrace_ops *op = ftrace_list;
80 /* in case someone actually ports this to alpha! */
81 read_barrier_depends();
83 while (op != &ftrace_list_end) {
85 read_barrier_depends();
86 op->func(ip, parent_ip);
91 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
93 if (current->pid != ftrace_pid_trace)
96 ftrace_pid_function(ip, parent_ip);
99 static void set_ftrace_pid_function(ftrace_func_t func)
101 /* do not set ftrace_pid_function to itself! */
102 if (func != ftrace_pid_func)
103 ftrace_pid_function = func;
107 * clear_ftrace_function - reset the ftrace function
109 * This NULLs the ftrace function and in essence stops
110 * tracing. There may be lag
112 void clear_ftrace_function(void)
114 ftrace_trace_function = ftrace_stub;
115 __ftrace_trace_function = ftrace_stub;
116 ftrace_pid_function = ftrace_stub;
119 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
121 * For those archs that do not test ftrace_trace_stop in their
122 * mcount call site, we need to do it from C.
124 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
126 if (function_trace_stop)
129 __ftrace_trace_function(ip, parent_ip);
133 static int __register_ftrace_function(struct ftrace_ops *ops)
135 /* should not be called from interrupt context */
136 spin_lock(&ftrace_lock);
138 ops->next = ftrace_list;
140 * We are entering ops into the ftrace_list but another
141 * CPU might be walking that list. We need to make sure
142 * the ops->next pointer is valid before another CPU sees
143 * the ops pointer included into the ftrace_list.
148 if (ftrace_enabled) {
151 if (ops->next == &ftrace_list_end)
154 func = ftrace_list_func;
156 if (ftrace_pid_trace >= 0) {
157 set_ftrace_pid_function(func);
158 func = ftrace_pid_func;
162 * For one func, simply call it directly.
163 * For more than one func, call the chain.
165 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
166 ftrace_trace_function = func;
168 __ftrace_trace_function = func;
169 ftrace_trace_function = ftrace_test_stop_func;
173 spin_unlock(&ftrace_lock);
178 static int __unregister_ftrace_function(struct ftrace_ops *ops)
180 struct ftrace_ops **p;
183 /* should not be called from interrupt context */
184 spin_lock(&ftrace_lock);
187 * If we are removing the last function, then simply point
188 * to the ftrace_stub.
190 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
191 ftrace_trace_function = ftrace_stub;
192 ftrace_list = &ftrace_list_end;
196 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
207 if (ftrace_enabled) {
208 /* If we only have one func left, then call that directly */
209 if (ftrace_list->next == &ftrace_list_end) {
210 ftrace_func_t func = ftrace_list->func;
212 if (ftrace_pid_trace >= 0) {
213 set_ftrace_pid_function(func);
214 func = ftrace_pid_func;
216 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
217 ftrace_trace_function = func;
219 __ftrace_trace_function = func;
225 spin_unlock(&ftrace_lock);
230 static void ftrace_update_pid_func(void)
234 /* should not be called from interrupt context */
235 spin_lock(&ftrace_lock);
237 if (ftrace_trace_function == ftrace_stub)
240 func = ftrace_trace_function;
242 if (ftrace_pid_trace >= 0) {
243 set_ftrace_pid_function(func);
244 func = ftrace_pid_func;
246 if (func != ftrace_pid_func)
249 set_ftrace_pid_function(func);
252 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
253 ftrace_trace_function = func;
255 __ftrace_trace_function = func;
259 spin_unlock(&ftrace_lock);
262 #ifdef CONFIG_DYNAMIC_FTRACE
263 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
264 # error Dynamic ftrace depends on MCOUNT_RECORD
268 * Since MCOUNT_ADDR may point to mcount itself, we do not want
269 * to get it confused by reading a reference in the code as we
270 * are parsing on objcopy output of text. Use a variable for
273 static unsigned long mcount_addr = MCOUNT_ADDR;
276 FTRACE_ENABLE_CALLS = (1 << 0),
277 FTRACE_DISABLE_CALLS = (1 << 1),
278 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
279 FTRACE_ENABLE_MCOUNT = (1 << 3),
280 FTRACE_DISABLE_MCOUNT = (1 << 4),
281 FTRACE_START_FUNC_RET = (1 << 5),
282 FTRACE_STOP_FUNC_RET = (1 << 6),
285 static int ftrace_filtered;
287 static LIST_HEAD(ftrace_new_addrs);
289 static DEFINE_MUTEX(ftrace_regex_lock);
292 struct ftrace_page *next;
294 struct dyn_ftrace records[];
297 #define ENTRIES_PER_PAGE \
298 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
300 /* estimate from running different kernels */
301 #define NR_TO_INIT 10000
303 static struct ftrace_page *ftrace_pages_start;
304 static struct ftrace_page *ftrace_pages;
306 static struct dyn_ftrace *ftrace_free_records;
309 #ifdef CONFIG_KPROBES
311 static int frozen_record_count;
313 static inline void freeze_record(struct dyn_ftrace *rec)
315 if (!(rec->flags & FTRACE_FL_FROZEN)) {
316 rec->flags |= FTRACE_FL_FROZEN;
317 frozen_record_count++;
321 static inline void unfreeze_record(struct dyn_ftrace *rec)
323 if (rec->flags & FTRACE_FL_FROZEN) {
324 rec->flags &= ~FTRACE_FL_FROZEN;
325 frozen_record_count--;
329 static inline int record_frozen(struct dyn_ftrace *rec)
331 return rec->flags & FTRACE_FL_FROZEN;
334 # define freeze_record(rec) ({ 0; })
335 # define unfreeze_record(rec) ({ 0; })
336 # define record_frozen(rec) ({ 0; })
337 #endif /* CONFIG_KPROBES */
339 static void ftrace_free_rec(struct dyn_ftrace *rec)
341 rec->ip = (unsigned long)ftrace_free_records;
342 ftrace_free_records = rec;
343 rec->flags |= FTRACE_FL_FREE;
346 void ftrace_release(void *start, unsigned long size)
348 struct dyn_ftrace *rec;
349 struct ftrace_page *pg;
350 unsigned long s = (unsigned long)start;
351 unsigned long e = s + size;
354 if (ftrace_disabled || !start)
357 /* should not be called from interrupt context */
358 spin_lock(&ftrace_lock);
360 for (pg = ftrace_pages_start; pg; pg = pg->next) {
361 for (i = 0; i < pg->index; i++) {
362 rec = &pg->records[i];
364 if ((rec->ip >= s) && (rec->ip < e))
365 ftrace_free_rec(rec);
368 spin_unlock(&ftrace_lock);
371 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
373 struct dyn_ftrace *rec;
375 /* First check for freed records */
376 if (ftrace_free_records) {
377 rec = ftrace_free_records;
379 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
380 FTRACE_WARN_ON_ONCE(1);
381 ftrace_free_records = NULL;
385 ftrace_free_records = (void *)rec->ip;
386 memset(rec, 0, sizeof(*rec));
390 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
391 if (!ftrace_pages->next) {
392 /* allocate another page */
394 (void *)get_zeroed_page(GFP_KERNEL);
395 if (!ftrace_pages->next)
398 ftrace_pages = ftrace_pages->next;
401 return &ftrace_pages->records[ftrace_pages->index++];
404 static struct dyn_ftrace *
405 ftrace_record_ip(unsigned long ip)
407 struct dyn_ftrace *rec;
412 rec = ftrace_alloc_dyn_node(ip);
418 list_add(&rec->list, &ftrace_new_addrs);
423 static void print_ip_ins(const char *fmt, unsigned char *p)
427 printk(KERN_CONT "%s", fmt);
429 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
430 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
433 static void ftrace_bug(int failed, unsigned long ip)
437 FTRACE_WARN_ON_ONCE(1);
438 pr_info("ftrace faulted on modifying ");
442 FTRACE_WARN_ON_ONCE(1);
443 pr_info("ftrace failed to modify ");
445 print_ip_ins(" actual: ", (unsigned char *)ip);
446 printk(KERN_CONT "\n");
449 FTRACE_WARN_ON_ONCE(1);
450 pr_info("ftrace faulted on writing ");
454 FTRACE_WARN_ON_ONCE(1);
455 pr_info("ftrace faulted on unknown error ");
462 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
464 unsigned long ip, fl;
465 unsigned long ftrace_addr;
467 ftrace_addr = (unsigned long)ftrace_caller;
472 * If this record is not to be traced and
473 * it is not enabled then do nothing.
475 * If this record is not to be traced and
476 * it is enabled then disabled it.
479 if (rec->flags & FTRACE_FL_NOTRACE) {
480 if (rec->flags & FTRACE_FL_ENABLED)
481 rec->flags &= ~FTRACE_FL_ENABLED;
485 } else if (ftrace_filtered && enable) {
490 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
492 /* Record is filtered and enabled, do nothing */
493 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
496 /* Record is not filtered and is not enabled do nothing */
500 /* Record is not filtered but enabled, disable it */
501 if (fl == FTRACE_FL_ENABLED)
502 rec->flags &= ~FTRACE_FL_ENABLED;
504 /* Otherwise record is filtered but not enabled, enable it */
505 rec->flags |= FTRACE_FL_ENABLED;
507 /* Disable or not filtered */
510 /* if record is enabled, do nothing */
511 if (rec->flags & FTRACE_FL_ENABLED)
514 rec->flags |= FTRACE_FL_ENABLED;
518 /* if record is not enabled do nothing */
519 if (!(rec->flags & FTRACE_FL_ENABLED))
522 rec->flags &= ~FTRACE_FL_ENABLED;
526 if (rec->flags & FTRACE_FL_ENABLED)
527 return ftrace_make_call(rec, ftrace_addr);
529 return ftrace_make_nop(NULL, rec, ftrace_addr);
532 static void ftrace_replace_code(int enable)
535 struct dyn_ftrace *rec;
536 struct ftrace_page *pg;
538 for (pg = ftrace_pages_start; pg; pg = pg->next) {
539 for (i = 0; i < pg->index; i++) {
540 rec = &pg->records[i];
543 * Skip over free records and records that have
546 if (rec->flags & FTRACE_FL_FREE ||
547 rec->flags & FTRACE_FL_FAILED)
550 /* ignore updates to this record's mcount site */
551 if (get_kprobe((void *)rec->ip)) {
555 unfreeze_record(rec);
558 failed = __ftrace_replace_code(rec, enable);
559 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
560 rec->flags |= FTRACE_FL_FAILED;
561 if ((system_state == SYSTEM_BOOTING) ||
562 !core_kernel_text(rec->ip)) {
563 ftrace_free_rec(rec);
565 ftrace_bug(failed, rec->ip);
572 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
579 ret = ftrace_make_nop(mod, rec, mcount_addr);
582 rec->flags |= FTRACE_FL_FAILED;
588 static int __ftrace_modify_code(void *data)
592 if (*command & FTRACE_ENABLE_CALLS)
593 ftrace_replace_code(1);
594 else if (*command & FTRACE_DISABLE_CALLS)
595 ftrace_replace_code(0);
597 if (*command & FTRACE_UPDATE_TRACE_FUNC)
598 ftrace_update_ftrace_func(ftrace_trace_function);
600 if (*command & FTRACE_START_FUNC_RET)
601 ftrace_enable_ftrace_graph_caller();
602 else if (*command & FTRACE_STOP_FUNC_RET)
603 ftrace_disable_ftrace_graph_caller();
608 static void ftrace_run_update_code(int command)
610 stop_machine(__ftrace_modify_code, &command, NULL);
613 static ftrace_func_t saved_ftrace_func;
614 static int ftrace_start_up;
616 static void ftrace_startup_enable(int command)
618 if (saved_ftrace_func != ftrace_trace_function) {
619 saved_ftrace_func = ftrace_trace_function;
620 command |= FTRACE_UPDATE_TRACE_FUNC;
623 if (!command || !ftrace_enabled)
626 ftrace_run_update_code(command);
629 static void ftrace_startup(int command)
631 if (unlikely(ftrace_disabled))
634 mutex_lock(&ftrace_start_lock);
636 command |= FTRACE_ENABLE_CALLS;
638 ftrace_startup_enable(command);
640 mutex_unlock(&ftrace_start_lock);
643 static void ftrace_shutdown(int command)
645 if (unlikely(ftrace_disabled))
648 mutex_lock(&ftrace_start_lock);
650 if (!ftrace_start_up)
651 command |= FTRACE_DISABLE_CALLS;
653 if (saved_ftrace_func != ftrace_trace_function) {
654 saved_ftrace_func = ftrace_trace_function;
655 command |= FTRACE_UPDATE_TRACE_FUNC;
658 if (!command || !ftrace_enabled)
661 ftrace_run_update_code(command);
663 mutex_unlock(&ftrace_start_lock);
666 static void ftrace_startup_sysctl(void)
668 int command = FTRACE_ENABLE_MCOUNT;
670 if (unlikely(ftrace_disabled))
673 mutex_lock(&ftrace_start_lock);
674 /* Force update next time */
675 saved_ftrace_func = NULL;
676 /* ftrace_start_up is true if we want ftrace running */
678 command |= FTRACE_ENABLE_CALLS;
680 ftrace_run_update_code(command);
681 mutex_unlock(&ftrace_start_lock);
684 static void ftrace_shutdown_sysctl(void)
686 int command = FTRACE_DISABLE_MCOUNT;
688 if (unlikely(ftrace_disabled))
691 mutex_lock(&ftrace_start_lock);
692 /* ftrace_start_up is true if ftrace is running */
694 command |= FTRACE_DISABLE_CALLS;
696 ftrace_run_update_code(command);
697 mutex_unlock(&ftrace_start_lock);
700 static cycle_t ftrace_update_time;
701 static unsigned long ftrace_update_cnt;
702 unsigned long ftrace_update_tot_cnt;
704 static int ftrace_update_code(struct module *mod)
706 struct dyn_ftrace *p, *t;
709 start = ftrace_now(raw_smp_processor_id());
710 ftrace_update_cnt = 0;
712 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
714 /* If something went wrong, bail without enabling anything */
715 if (unlikely(ftrace_disabled))
718 list_del_init(&p->list);
720 /* convert record (i.e, patch mcount-call with NOP) */
721 if (ftrace_code_disable(mod, p)) {
722 p->flags |= FTRACE_FL_CONVERTED;
728 stop = ftrace_now(raw_smp_processor_id());
729 ftrace_update_time = stop - start;
730 ftrace_update_tot_cnt += ftrace_update_cnt;
735 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
737 struct ftrace_page *pg;
741 /* allocate a few pages */
742 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
743 if (!ftrace_pages_start)
747 * Allocate a few more pages.
749 * TODO: have some parser search vmlinux before
750 * final linking to find all calls to ftrace.
752 * a) know how many pages to allocate.
754 * b) set up the table then.
756 * The dynamic code is still necessary for
760 pg = ftrace_pages = ftrace_pages_start;
762 cnt = num_to_init / ENTRIES_PER_PAGE;
763 pr_info("ftrace: allocating %ld entries in %d pages\n",
764 num_to_init, cnt + 1);
766 for (i = 0; i < cnt; i++) {
767 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
769 /* If we fail, we'll try later anyway */
780 FTRACE_ITER_FILTER = (1 << 0),
781 FTRACE_ITER_CONT = (1 << 1),
782 FTRACE_ITER_NOTRACE = (1 << 2),
783 FTRACE_ITER_FAILURES = (1 << 3),
786 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
788 struct ftrace_iterator {
790 struct ftrace_page *pg;
793 unsigned char buffer[FTRACE_BUFF_MAX+1];
799 t_next(struct seq_file *m, void *v, loff_t *pos)
801 struct ftrace_iterator *iter = m->private;
802 struct dyn_ftrace *rec = NULL;
806 /* should not be called from interrupt context */
807 spin_lock(&ftrace_lock);
809 if (iter->idx >= iter->pg->index) {
810 if (iter->pg->next) {
811 iter->pg = iter->pg->next;
816 rec = &iter->pg->records[iter->idx++];
817 if ((rec->flags & FTRACE_FL_FREE) ||
819 (!(iter->flags & FTRACE_ITER_FAILURES) &&
820 (rec->flags & FTRACE_FL_FAILED)) ||
822 ((iter->flags & FTRACE_ITER_FAILURES) &&
823 !(rec->flags & FTRACE_FL_FAILED)) ||
825 ((iter->flags & FTRACE_ITER_FILTER) &&
826 !(rec->flags & FTRACE_FL_FILTER)) ||
828 ((iter->flags & FTRACE_ITER_NOTRACE) &&
829 !(rec->flags & FTRACE_FL_NOTRACE))) {
834 spin_unlock(&ftrace_lock);
841 static void *t_start(struct seq_file *m, loff_t *pos)
843 struct ftrace_iterator *iter = m->private;
847 if (*pos > iter->pos)
851 p = t_next(m, p, &l);
856 static void t_stop(struct seq_file *m, void *p)
860 static int t_show(struct seq_file *m, void *v)
862 struct ftrace_iterator *iter = m->private;
863 struct dyn_ftrace *rec = v;
864 char str[KSYM_SYMBOL_LEN];
870 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
872 ret = seq_printf(m, "%s\n", str);
881 static struct seq_operations show_ftrace_seq_ops = {
889 ftrace_avail_open(struct inode *inode, struct file *file)
891 struct ftrace_iterator *iter;
894 if (unlikely(ftrace_disabled))
897 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
901 iter->pg = ftrace_pages_start;
904 ret = seq_open(file, &show_ftrace_seq_ops);
906 struct seq_file *m = file->private_data;
916 int ftrace_avail_release(struct inode *inode, struct file *file)
918 struct seq_file *m = (struct seq_file *)file->private_data;
919 struct ftrace_iterator *iter = m->private;
921 seq_release(inode, file);
928 ftrace_failures_open(struct inode *inode, struct file *file)
932 struct ftrace_iterator *iter;
934 ret = ftrace_avail_open(inode, file);
936 m = (struct seq_file *)file->private_data;
937 iter = (struct ftrace_iterator *)m->private;
938 iter->flags = FTRACE_ITER_FAILURES;
945 static void ftrace_filter_reset(int enable)
947 struct ftrace_page *pg;
948 struct dyn_ftrace *rec;
949 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
952 /* should not be called from interrupt context */
953 spin_lock(&ftrace_lock);
956 pg = ftrace_pages_start;
958 for (i = 0; i < pg->index; i++) {
959 rec = &pg->records[i];
960 if (rec->flags & FTRACE_FL_FAILED)
966 spin_unlock(&ftrace_lock);
970 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
972 struct ftrace_iterator *iter;
975 if (unlikely(ftrace_disabled))
978 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
982 mutex_lock(&ftrace_regex_lock);
983 if ((file->f_mode & FMODE_WRITE) &&
984 !(file->f_flags & O_APPEND))
985 ftrace_filter_reset(enable);
987 if (file->f_mode & FMODE_READ) {
988 iter->pg = ftrace_pages_start;
990 iter->flags = enable ? FTRACE_ITER_FILTER :
993 ret = seq_open(file, &show_ftrace_seq_ops);
995 struct seq_file *m = file->private_data;
1000 file->private_data = iter;
1001 mutex_unlock(&ftrace_regex_lock);
1007 ftrace_filter_open(struct inode *inode, struct file *file)
1009 return ftrace_regex_open(inode, file, 1);
1013 ftrace_notrace_open(struct inode *inode, struct file *file)
1015 return ftrace_regex_open(inode, file, 0);
1019 ftrace_regex_read(struct file *file, char __user *ubuf,
1020 size_t cnt, loff_t *ppos)
1022 if (file->f_mode & FMODE_READ)
1023 return seq_read(file, ubuf, cnt, ppos);
1029 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1033 if (file->f_mode & FMODE_READ)
1034 ret = seq_lseek(file, offset, origin);
1036 file->f_pos = ret = 1;
1049 ftrace_match(unsigned char *buff, int len, int enable)
1051 char str[KSYM_SYMBOL_LEN];
1052 char *search = NULL;
1053 struct ftrace_page *pg;
1054 struct dyn_ftrace *rec;
1055 int type = MATCH_FULL;
1056 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1057 unsigned i, match = 0, search_len = 0;
1059 for (i = 0; i < len; i++) {
1060 if (buff[i] == '*') {
1062 search = buff + i + 1;
1063 type = MATCH_END_ONLY;
1064 search_len = len - (i + 1);
1066 if (type == MATCH_END_ONLY) {
1067 type = MATCH_MIDDLE_ONLY;
1070 type = MATCH_FRONT_ONLY;
1078 /* should not be called from interrupt context */
1079 spin_lock(&ftrace_lock);
1081 ftrace_filtered = 1;
1082 pg = ftrace_pages_start;
1084 for (i = 0; i < pg->index; i++) {
1088 rec = &pg->records[i];
1089 if (rec->flags & FTRACE_FL_FAILED)
1091 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1094 if (strcmp(str, buff) == 0)
1097 case MATCH_FRONT_ONLY:
1098 if (memcmp(str, buff, match) == 0)
1101 case MATCH_MIDDLE_ONLY:
1102 if (strstr(str, search))
1105 case MATCH_END_ONLY:
1106 ptr = strstr(str, search);
1107 if (ptr && (ptr[search_len] == 0))
1116 spin_unlock(&ftrace_lock);
1120 ftrace_regex_write(struct file *file, const char __user *ubuf,
1121 size_t cnt, loff_t *ppos, int enable)
1123 struct ftrace_iterator *iter;
1128 if (!cnt || cnt < 0)
1131 mutex_lock(&ftrace_regex_lock);
1133 if (file->f_mode & FMODE_READ) {
1134 struct seq_file *m = file->private_data;
1137 iter = file->private_data;
1140 iter->flags &= ~FTRACE_ITER_CONT;
1141 iter->buffer_idx = 0;
1144 ret = get_user(ch, ubuf++);
1150 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1151 /* skip white space */
1152 while (cnt && isspace(ch)) {
1153 ret = get_user(ch, ubuf++);
1161 file->f_pos += read;
1166 iter->buffer_idx = 0;
1169 while (cnt && !isspace(ch)) {
1170 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1171 iter->buffer[iter->buffer_idx++] = ch;
1176 ret = get_user(ch, ubuf++);
1185 iter->buffer[iter->buffer_idx] = 0;
1186 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1187 iter->buffer_idx = 0;
1189 iter->flags |= FTRACE_ITER_CONT;
1192 file->f_pos += read;
1196 mutex_unlock(&ftrace_regex_lock);
1202 ftrace_filter_write(struct file *file, const char __user *ubuf,
1203 size_t cnt, loff_t *ppos)
1205 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1209 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1210 size_t cnt, loff_t *ppos)
1212 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1216 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1218 if (unlikely(ftrace_disabled))
1221 mutex_lock(&ftrace_regex_lock);
1223 ftrace_filter_reset(enable);
1225 ftrace_match(buf, len, enable);
1226 mutex_unlock(&ftrace_regex_lock);
1230 * ftrace_set_filter - set a function to filter on in ftrace
1231 * @buf - the string that holds the function filter text.
1232 * @len - the length of the string.
1233 * @reset - non zero to reset all filters before applying this filter.
1235 * Filters denote which functions should be enabled when tracing is enabled.
1236 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1238 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1240 ftrace_set_regex(buf, len, reset, 1);
1244 * ftrace_set_notrace - set a function to not trace in ftrace
1245 * @buf - the string that holds the function notrace text.
1246 * @len - the length of the string.
1247 * @reset - non zero to reset all filters before applying this filter.
1249 * Notrace Filters denote which functions should not be enabled when tracing
1250 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1253 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1255 ftrace_set_regex(buf, len, reset, 0);
1259 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1261 struct seq_file *m = (struct seq_file *)file->private_data;
1262 struct ftrace_iterator *iter;
1264 mutex_lock(&ftrace_regex_lock);
1265 if (file->f_mode & FMODE_READ) {
1268 seq_release(inode, file);
1270 iter = file->private_data;
1272 if (iter->buffer_idx) {
1274 iter->buffer[iter->buffer_idx] = 0;
1275 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1278 mutex_lock(&ftrace_sysctl_lock);
1279 mutex_lock(&ftrace_start_lock);
1280 if (ftrace_start_up && ftrace_enabled)
1281 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1282 mutex_unlock(&ftrace_start_lock);
1283 mutex_unlock(&ftrace_sysctl_lock);
1286 mutex_unlock(&ftrace_regex_lock);
1291 ftrace_filter_release(struct inode *inode, struct file *file)
1293 return ftrace_regex_release(inode, file, 1);
1297 ftrace_notrace_release(struct inode *inode, struct file *file)
1299 return ftrace_regex_release(inode, file, 0);
1302 static struct file_operations ftrace_avail_fops = {
1303 .open = ftrace_avail_open,
1305 .llseek = seq_lseek,
1306 .release = ftrace_avail_release,
1309 static struct file_operations ftrace_failures_fops = {
1310 .open = ftrace_failures_open,
1312 .llseek = seq_lseek,
1313 .release = ftrace_avail_release,
1316 static struct file_operations ftrace_filter_fops = {
1317 .open = ftrace_filter_open,
1318 .read = ftrace_regex_read,
1319 .write = ftrace_filter_write,
1320 .llseek = ftrace_regex_lseek,
1321 .release = ftrace_filter_release,
1324 static struct file_operations ftrace_notrace_fops = {
1325 .open = ftrace_notrace_open,
1326 .read = ftrace_regex_read,
1327 .write = ftrace_notrace_write,
1328 .llseek = ftrace_regex_lseek,
1329 .release = ftrace_notrace_release,
1332 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
1334 struct dentry *entry;
1336 entry = debugfs_create_file("available_filter_functions", 0444,
1337 d_tracer, NULL, &ftrace_avail_fops);
1339 pr_warning("Could not create debugfs "
1340 "'available_filter_functions' entry\n");
1342 entry = debugfs_create_file("failures", 0444,
1343 d_tracer, NULL, &ftrace_failures_fops);
1345 pr_warning("Could not create debugfs 'failures' entry\n");
1347 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1348 NULL, &ftrace_filter_fops);
1350 pr_warning("Could not create debugfs "
1351 "'set_ftrace_filter' entry\n");
1353 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1354 NULL, &ftrace_notrace_fops);
1356 pr_warning("Could not create debugfs "
1357 "'set_ftrace_notrace' entry\n");
1362 static int ftrace_convert_nops(struct module *mod,
1363 unsigned long *start,
1368 unsigned long flags;
1370 mutex_lock(&ftrace_start_lock);
1373 addr = ftrace_call_adjust(*p++);
1375 * Some architecture linkers will pad between
1376 * the different mcount_loc sections of different
1377 * object files to satisfy alignments.
1378 * Skip any NULL pointers.
1382 ftrace_record_ip(addr);
1385 /* disable interrupts to prevent kstop machine */
1386 local_irq_save(flags);
1387 ftrace_update_code(mod);
1388 local_irq_restore(flags);
1389 mutex_unlock(&ftrace_start_lock);
1394 void ftrace_init_module(struct module *mod,
1395 unsigned long *start, unsigned long *end)
1397 if (ftrace_disabled || start == end)
1399 ftrace_convert_nops(mod, start, end);
1402 extern unsigned long __start_mcount_loc[];
1403 extern unsigned long __stop_mcount_loc[];
1405 void __init ftrace_init(void)
1407 unsigned long count, addr, flags;
1410 /* Keep the ftrace pointer to the stub */
1411 addr = (unsigned long)ftrace_stub;
1413 local_irq_save(flags);
1414 ftrace_dyn_arch_init(&addr);
1415 local_irq_restore(flags);
1417 /* ftrace_dyn_arch_init places the return code in addr */
1421 count = __stop_mcount_loc - __start_mcount_loc;
1423 ret = ftrace_dyn_table_alloc(count);
1427 last_ftrace_enabled = ftrace_enabled = 1;
1429 ret = ftrace_convert_nops(NULL,
1435 ftrace_disabled = 1;
1440 static int __init ftrace_nodyn_init(void)
1445 device_initcall(ftrace_nodyn_init);
1447 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
1448 static inline void ftrace_startup_enable(int command) { }
1449 /* Keep as macros so we do not need to define the commands */
1450 # define ftrace_startup(command) do { } while (0)
1451 # define ftrace_shutdown(command) do { } while (0)
1452 # define ftrace_startup_sysctl() do { } while (0)
1453 # define ftrace_shutdown_sysctl() do { } while (0)
1454 #endif /* CONFIG_DYNAMIC_FTRACE */
1457 ftrace_pid_read(struct file *file, char __user *ubuf,
1458 size_t cnt, loff_t *ppos)
1463 if (ftrace_pid_trace >= 0)
1464 r = sprintf(buf, "%u\n", ftrace_pid_trace);
1466 r = sprintf(buf, "no pid\n");
1468 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1472 ftrace_pid_write(struct file *filp, const char __user *ubuf,
1473 size_t cnt, loff_t *ppos)
1479 if (cnt >= sizeof(buf))
1482 if (copy_from_user(&buf, ubuf, cnt))
1487 ret = strict_strtol(buf, 10, &val);
1491 mutex_lock(&ftrace_start_lock);
1493 /* disable pid tracing */
1494 if (ftrace_pid_trace < 0)
1496 ftrace_pid_trace = -1;
1500 if (ftrace_pid_trace == val)
1503 ftrace_pid_trace = val;
1506 /* update the function call */
1507 ftrace_update_pid_func();
1508 ftrace_startup_enable(0);
1511 mutex_unlock(&ftrace_start_lock);
1516 static struct file_operations ftrace_pid_fops = {
1517 .read = ftrace_pid_read,
1518 .write = ftrace_pid_write,
1521 static __init int ftrace_init_debugfs(void)
1523 struct dentry *d_tracer;
1524 struct dentry *entry;
1526 d_tracer = tracing_init_dentry();
1530 ftrace_init_dyn_debugfs(d_tracer);
1532 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
1533 NULL, &ftrace_pid_fops);
1535 pr_warning("Could not create debugfs "
1536 "'set_ftrace_pid' entry\n");
1540 fs_initcall(ftrace_init_debugfs);
1543 * ftrace_kill - kill ftrace
1545 * This function should be used by panic code. It stops ftrace
1546 * but in a not so nice way. If you need to simply kill ftrace
1547 * from a non-atomic section, use ftrace_kill.
1549 void ftrace_kill(void)
1551 ftrace_disabled = 1;
1553 clear_ftrace_function();
1557 * register_ftrace_function - register a function for profiling
1558 * @ops - ops structure that holds the function for profiling.
1560 * Register a function to be called by all functions in the
1563 * Note: @ops->func and all the functions it calls must be labeled
1564 * with "notrace", otherwise it will go into a
1567 int register_ftrace_function(struct ftrace_ops *ops)
1571 if (unlikely(ftrace_disabled))
1574 mutex_lock(&ftrace_sysctl_lock);
1576 ret = __register_ftrace_function(ops);
1579 mutex_unlock(&ftrace_sysctl_lock);
1584 * unregister_ftrace_function - unresgister a function for profiling.
1585 * @ops - ops structure that holds the function to unregister
1587 * Unregister a function that was added to be called by ftrace profiling.
1589 int unregister_ftrace_function(struct ftrace_ops *ops)
1593 mutex_lock(&ftrace_sysctl_lock);
1594 ret = __unregister_ftrace_function(ops);
1596 mutex_unlock(&ftrace_sysctl_lock);
1602 ftrace_enable_sysctl(struct ctl_table *table, int write,
1603 struct file *file, void __user *buffer, size_t *lenp,
1608 if (unlikely(ftrace_disabled))
1611 mutex_lock(&ftrace_sysctl_lock);
1613 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
1615 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1618 last_ftrace_enabled = ftrace_enabled;
1620 if (ftrace_enabled) {
1622 ftrace_startup_sysctl();
1624 /* we are starting ftrace again */
1625 if (ftrace_list != &ftrace_list_end) {
1626 if (ftrace_list->next == &ftrace_list_end)
1627 ftrace_trace_function = ftrace_list->func;
1629 ftrace_trace_function = ftrace_list_func;
1633 /* stopping ftrace calls (just send to ftrace_stub) */
1634 ftrace_trace_function = ftrace_stub;
1636 ftrace_shutdown_sysctl();
1640 mutex_unlock(&ftrace_sysctl_lock);
1644 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1646 static atomic_t ftrace_graph_active;
1648 /* The callbacks that hook a function */
1649 trace_func_graph_ret_t ftrace_graph_return =
1650 (trace_func_graph_ret_t)ftrace_stub;
1651 trace_func_graph_ent_t ftrace_graph_entry =
1652 (trace_func_graph_ent_t)ftrace_stub;
1654 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1655 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1659 unsigned long flags;
1660 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1661 struct task_struct *g, *t;
1663 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1664 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1665 * sizeof(struct ftrace_ret_stack),
1667 if (!ret_stack_list[i]) {
1675 read_lock_irqsave(&tasklist_lock, flags);
1676 do_each_thread(g, t) {
1682 if (t->ret_stack == NULL) {
1683 t->ret_stack = ret_stack_list[start++];
1684 t->curr_ret_stack = -1;
1685 atomic_set(&t->trace_overrun, 0);
1687 } while_each_thread(g, t);
1690 read_unlock_irqrestore(&tasklist_lock, flags);
1692 for (i = start; i < end; i++)
1693 kfree(ret_stack_list[i]);
1697 /* Allocate a return stack for each task */
1698 static int start_graph_tracing(void)
1700 struct ftrace_ret_stack **ret_stack_list;
1703 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
1704 sizeof(struct ftrace_ret_stack *),
1707 if (!ret_stack_list)
1711 ret = alloc_retstack_tasklist(ret_stack_list);
1712 } while (ret == -EAGAIN);
1714 kfree(ret_stack_list);
1718 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
1719 trace_func_graph_ent_t entryfunc)
1723 mutex_lock(&ftrace_sysctl_lock);
1725 atomic_inc(&ftrace_graph_active);
1726 ret = start_graph_tracing();
1728 atomic_dec(&ftrace_graph_active);
1732 ftrace_graph_return = retfunc;
1733 ftrace_graph_entry = entryfunc;
1735 ftrace_startup(FTRACE_START_FUNC_RET);
1738 mutex_unlock(&ftrace_sysctl_lock);
1742 void unregister_ftrace_graph(void)
1744 mutex_lock(&ftrace_sysctl_lock);
1746 atomic_dec(&ftrace_graph_active);
1747 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
1748 ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub;
1749 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
1751 mutex_unlock(&ftrace_sysctl_lock);
1754 /* Allocate a return stack for newly created task */
1755 void ftrace_graph_init_task(struct task_struct *t)
1757 if (atomic_read(&ftrace_graph_active)) {
1758 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
1759 * sizeof(struct ftrace_ret_stack),
1763 t->curr_ret_stack = -1;
1764 atomic_set(&t->trace_overrun, 0);
1766 t->ret_stack = NULL;
1769 void ftrace_graph_exit_task(struct task_struct *t)
1771 struct ftrace_ret_stack *ret_stack = t->ret_stack;
1773 t->ret_stack = NULL;
1774 /* NULL must become visible to IRQs before we free it: */