2 * ring buffer based function tracer
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III
14 #include <linux/utsrelease.h>
15 #include <linux/kallsyms.h>
16 #include <linux/seq_file.h>
17 #include <linux/debugfs.h>
18 #include <linux/hardirq.h>
19 #include <linux/linkage.h>
20 #include <linux/uaccess.h>
21 #include <linux/ftrace.h>
22 #include <linux/module.h>
23 #include <linux/percpu.h>
24 #include <linux/ctype.h>
25 #include <linux/init.h>
26 #include <linux/gfp.h>
31 unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
32 unsigned long __read_mostly tracing_thresh;
35 ns2usecs(cycle_t nsec)
42 static atomic_t tracer_counter;
43 static struct trace_array global_trace;
45 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
47 static struct trace_array max_tr;
49 static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
51 static int tracer_enabled;
52 static unsigned long trace_nr_entries = 4096UL;
54 static struct tracer *trace_types __read_mostly;
55 static struct tracer *current_trace __read_mostly;
56 static int max_tracer_type_len;
58 static DEFINE_MUTEX(trace_types_lock);
60 static int __init set_nr_entries(char *str)
64 trace_nr_entries = simple_strtoul(str, &str, 0);
67 __setup("trace_entries=", set_nr_entries);
70 __TRACE_FIRST_TYPE = 0,
78 enum trace_flag_type {
79 TRACE_FLAG_IRQS_OFF = 0x01,
80 TRACE_FLAG_NEED_RESCHED = 0x02,
81 TRACE_FLAG_HARDIRQ = 0x04,
82 TRACE_FLAG_SOFTIRQ = 0x08,
85 enum trace_iterator_flags {
86 TRACE_ITER_PRINT_PARENT = 0x01,
87 TRACE_ITER_SYM_OFFSET = 0x02,
88 TRACE_ITER_SYM_ADDR = 0x04,
89 TRACE_ITER_VERBOSE = 0x08,
92 #define TRACE_ITER_SYM_MASK \
93 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
95 /* These must match the bit postions above */
96 static const char *trace_options[] = {
104 static unsigned trace_flags;
108 * Copy the new maximum trace into the separate maximum-trace
109 * structure. (this way the maximum trace is permanently saved,
110 * for later retrieval via /debugfs/tracing/latency_trace)
113 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
115 struct trace_array_cpu *data = tr->data[cpu];
118 max_tr.time_start = data->preempt_timestamp;
120 data = max_tr.data[cpu];
121 data->saved_latency = tracing_max_latency;
123 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
124 data->pid = tsk->pid;
125 data->uid = tsk->uid;
126 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
127 data->policy = tsk->policy;
128 data->rt_priority = tsk->rt_priority;
130 /* record this tasks comm */
131 tracing_record_cmdline(current);
135 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
137 struct trace_array_cpu *data;
141 /* clear out all the previous traces */
142 for_each_possible_cpu(i) {
144 save_trace = max_tr.data[i]->trace;
145 memcpy(max_tr.data[i], data, sizeof(*data));
146 data->trace = save_trace;
149 __update_max_tr(tr, tsk, cpu);
153 * update_max_tr_single - only copy one trace over, and reset the rest
155 * @tsk - task with the latency
156 * @cpu - the cpu of the buffer to copy.
159 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
161 struct trace_array_cpu *data = tr->data[cpu];
165 for_each_possible_cpu(i)
166 tracing_reset(max_tr.data[i]);
168 save_trace = max_tr.data[cpu]->trace;
169 memcpy(max_tr.data[cpu], data, sizeof(*data));
170 data->trace = save_trace;
172 __update_max_tr(tr, tsk, cpu);
175 int register_tracer(struct tracer *type)
182 pr_info("Tracer must have a name\n");
186 mutex_lock(&trace_types_lock);
187 for (t = trace_types; t; t = t->next) {
188 if (strcmp(type->name, t->name) == 0) {
190 pr_info("Trace %s already registered\n",
197 type->next = trace_types;
199 len = strlen(type->name);
200 if (len > max_tracer_type_len)
201 max_tracer_type_len = len;
203 mutex_unlock(&trace_types_lock);
208 void unregister_tracer(struct tracer *type)
213 mutex_lock(&trace_types_lock);
214 for (t = &trace_types; *t; t = &(*t)->next) {
218 pr_info("Trace %s not registered\n", type->name);
223 if (strlen(type->name) != max_tracer_type_len)
226 max_tracer_type_len = 0;
227 for (t = &trace_types; *t; t = &(*t)->next) {
228 len = strlen((*t)->name);
229 if (len > max_tracer_type_len)
230 max_tracer_type_len = len;
233 mutex_unlock(&trace_types_lock);
236 void notrace tracing_reset(struct trace_array_cpu *data)
239 atomic_set(&data->underrun, 0);
244 function_trace_call(unsigned long ip, unsigned long parent_ip)
246 struct trace_array *tr = &global_trace;
247 struct trace_array_cpu *data;
252 if (unlikely(!tracer_enabled))
255 raw_local_irq_save(flags);
256 cpu = raw_smp_processor_id();
257 data = tr->data[cpu];
258 disabled = atomic_inc_return(&data->disabled);
260 if (likely(disabled == 1))
261 ftrace(tr, data, ip, parent_ip, flags);
263 atomic_dec(&data->disabled);
264 raw_local_irq_restore(flags);
267 static struct ftrace_ops trace_ops __read_mostly =
269 .func = function_trace_call,
273 notrace void tracing_start_function_trace(void)
275 register_ftrace_function(&trace_ops);
278 notrace void tracing_stop_function_trace(void)
280 unregister_ftrace_function(&trace_ops);
283 #define SAVED_CMDLINES 128
284 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
285 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
286 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
287 static int cmdline_idx;
288 static DEFINE_SPINLOCK(trace_cmdline_lock);
289 atomic_t trace_record_cmdline_disabled;
291 static void trace_init_cmdlines(void)
293 memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
294 memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
298 notrace void trace_stop_cmdline_recording(void);
300 static void notrace trace_save_cmdline(struct task_struct *tsk)
305 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
309 * It's not the end of the world if we don't get
310 * the lock, but we also don't want to spin
311 * nor do we want to disable interrupts,
312 * so if we miss here, then better luck next time.
314 if (!spin_trylock(&trace_cmdline_lock))
317 idx = map_pid_to_cmdline[tsk->pid];
318 if (idx >= SAVED_CMDLINES) {
319 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
321 map = map_cmdline_to_pid[idx];
322 if (map <= PID_MAX_DEFAULT)
323 map_pid_to_cmdline[map] = (unsigned)-1;
325 map_pid_to_cmdline[tsk->pid] = idx;
330 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
332 spin_unlock(&trace_cmdline_lock);
335 static notrace char *trace_find_cmdline(int pid)
337 char *cmdline = "<...>";
343 if (pid > PID_MAX_DEFAULT)
346 map = map_pid_to_cmdline[pid];
347 if (map >= SAVED_CMDLINES)
350 cmdline = saved_cmdlines[map];
356 notrace void tracing_record_cmdline(struct task_struct *tsk)
358 if (atomic_read(&trace_record_cmdline_disabled))
361 trace_save_cmdline(tsk);
364 static inline notrace struct trace_entry *
365 tracing_get_trace_entry(struct trace_array *tr,
366 struct trace_array_cpu *data)
368 unsigned long idx, idx_next;
369 struct trace_entry *entry;
371 idx = data->trace_idx;
374 if (unlikely(idx_next >= tr->entries)) {
375 atomic_inc(&data->underrun);
379 data->trace_idx = idx_next;
381 if (unlikely(idx_next != 0 && atomic_read(&data->underrun)))
382 atomic_inc(&data->underrun);
384 entry = data->trace + idx * TRACE_ENTRY_SIZE;
389 static inline notrace void
390 tracing_generic_entry_update(struct trace_entry *entry,
393 struct task_struct *tsk = current;
396 pc = preempt_count();
398 entry->idx = atomic_inc_return(&tracer_counter);
399 entry->preempt_count = pc & 0xff;
400 entry->pid = tsk->pid;
401 entry->t = now(raw_smp_processor_id());
402 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
403 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
404 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
405 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
409 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
410 unsigned long ip, unsigned long parent_ip,
413 struct trace_entry *entry;
415 entry = tracing_get_trace_entry(tr, data);
416 tracing_generic_entry_update(entry, flags);
417 entry->type = TRACE_FN;
419 entry->fn.parent_ip = parent_ip;
423 tracing_sched_switch_trace(struct trace_array *tr,
424 struct trace_array_cpu *data,
425 struct task_struct *prev, struct task_struct *next,
428 struct trace_entry *entry;
430 entry = tracing_get_trace_entry(tr, data);
431 tracing_generic_entry_update(entry, flags);
432 entry->type = TRACE_CTX;
433 entry->ctx.prev_pid = prev->pid;
434 entry->ctx.prev_prio = prev->prio;
435 entry->ctx.prev_state = prev->state;
436 entry->ctx.next_pid = next->pid;
437 entry->ctx.next_prio = next->prio;
440 enum trace_file_type {
441 TRACE_FILE_LAT_FMT = 1,
444 static struct trace_entry *
445 trace_entry_idx(struct trace_array *tr, unsigned long idx, int cpu)
447 struct trace_entry *array = tr->data[cpu]->trace;
448 unsigned long underrun;
450 if (idx >= tr->entries)
453 underrun = atomic_read(&tr->data[cpu]->underrun);
455 idx = ((underrun - 1) + idx) % tr->entries;
456 else if (idx >= tr->data[cpu]->trace_idx)
462 static struct notrace trace_entry *
463 find_next_entry(struct trace_iterator *iter, int *ent_cpu)
465 struct trace_array *tr = iter->tr;
466 struct trace_entry *ent, *next = NULL;
470 for_each_possible_cpu(cpu) {
471 if (!tr->data[cpu]->trace)
473 ent = trace_entry_idx(tr, iter->next_idx[cpu], cpu);
475 (!next || (long)(next->idx - ent->idx) > 0)) {
487 static void *find_next_entry_inc(struct trace_iterator *iter)
489 struct trace_entry *next;
492 next = find_next_entry(iter, &next_cpu);
495 iter->next_idx[next_cpu]++;
499 iter->cpu = next_cpu;
501 return next ? iter : NULL;
504 static void notrace *
505 s_next(struct seq_file *m, void *v, loff_t *pos)
507 struct trace_iterator *iter = m->private;
509 void *last_ent = iter->ent;
514 /* can't go backwards */
519 ent = find_next_entry_inc(iter);
523 while (ent && iter->idx < i)
524 ent = find_next_entry_inc(iter);
528 if (last_ent && !ent)
529 seq_puts(m, "\n\nvim:ft=help\n");
534 static void *s_start(struct seq_file *m, loff_t *pos)
536 struct trace_iterator *iter = m->private;
541 mutex_lock(&trace_types_lock);
543 if (!current_trace || current_trace != iter->trace)
546 atomic_inc(&trace_record_cmdline_disabled);
548 /* let the tracer grab locks here if needed */
549 if (current_trace->start)
550 current_trace->start(iter);
552 if (*pos != iter->pos) {
557 for (i = 0; i < NR_CPUS; i++)
558 iter->next_idx[i] = 0;
560 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
565 p = s_next(m, p, &l);
571 static void s_stop(struct seq_file *m, void *p)
573 struct trace_iterator *iter = m->private;
575 atomic_dec(&trace_record_cmdline_disabled);
577 /* let the tracer release locks here if needed */
578 if (current_trace && current_trace == iter->trace && iter->trace->stop)
579 iter->trace->stop(iter);
581 mutex_unlock(&trace_types_lock);
585 seq_print_sym_short(struct seq_file *m, const char *fmt, unsigned long address)
587 #ifdef CONFIG_KALLSYMS
588 char str[KSYM_SYMBOL_LEN];
590 kallsyms_lookup(address, NULL, NULL, NULL, str);
592 seq_printf(m, fmt, str);
597 seq_print_sym_offset(struct seq_file *m, const char *fmt, unsigned long address)
599 #ifdef CONFIG_KALLSYMS
600 char str[KSYM_SYMBOL_LEN];
602 sprint_symbol(str, address);
603 seq_printf(m, fmt, str);
608 # define IP_FMT "%08lx"
610 # define IP_FMT "%016lx"
614 seq_print_ip_sym(struct seq_file *m, unsigned long ip, unsigned long sym_flags)
621 if (sym_flags & TRACE_ITER_SYM_OFFSET)
622 seq_print_sym_offset(m, "%s", ip);
624 seq_print_sym_short(m, "%s", ip);
626 if (sym_flags & TRACE_ITER_SYM_ADDR)
627 seq_printf(m, " <" IP_FMT ">", ip);
630 static void notrace print_lat_help_header(struct seq_file *m)
632 seq_puts(m, "# _------=> CPU# \n");
633 seq_puts(m, "# / _-----=> irqs-off \n");
634 seq_puts(m, "# | / _----=> need-resched \n");
635 seq_puts(m, "# || / _---=> hardirq/softirq \n");
636 seq_puts(m, "# ||| / _--=> preempt-depth \n");
637 seq_puts(m, "# |||| / \n");
638 seq_puts(m, "# ||||| delay \n");
639 seq_puts(m, "# cmd pid ||||| time | caller \n");
640 seq_puts(m, "# \\ / ||||| \\ | / \n");
643 static void notrace print_func_help_header(struct seq_file *m)
645 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
646 seq_puts(m, "# | | | | |\n");
651 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
653 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
654 struct trace_array *tr = iter->tr;
655 struct trace_array_cpu *data = tr->data[tr->cpu];
656 struct tracer *type = current_trace;
657 unsigned long underruns = 0;
658 unsigned long underrun;
659 unsigned long entries = 0;
661 const char *name = "preemption";
666 for_each_possible_cpu(cpu) {
667 if (tr->data[cpu]->trace) {
668 underrun = atomic_read(&tr->data[cpu]->underrun);
670 underruns += underrun;
671 entries += tr->entries;
673 entries += tr->data[cpu]->trace_idx;
677 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
679 seq_puts(m, "-----------------------------------"
680 "---------------------------------\n");
681 seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
682 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
685 (entries + underruns),
687 #if defined(CONFIG_PREEMPT_NONE)
689 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
691 #elif defined(CONFIG_PREEMPT_DESKTOP)
696 /* These are reserved for later use */
699 seq_printf(m, " #P:%d)\n", num_online_cpus());
703 seq_puts(m, " -----------------\n");
704 seq_printf(m, " | task: %.16s-%d "
705 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
706 data->comm, data->pid, data->uid, data->nice,
707 data->policy, data->rt_priority);
708 seq_puts(m, " -----------------\n");
710 if (data->critical_start) {
711 seq_puts(m, " => started at: ");
712 seq_print_ip_sym(m, data->critical_start, sym_flags);
713 seq_puts(m, "\n => ended at: ");
714 seq_print_ip_sym(m, data->critical_end, sym_flags);
721 unsigned long nsecs_to_usecs(unsigned long nsecs)
727 lat_print_generic(struct seq_file *m, struct trace_entry *entry, int cpu)
729 int hardirq, softirq;
732 comm = trace_find_cmdline(entry->pid);
734 seq_printf(m, "%8.8s-%-5d ", comm, entry->pid);
735 seq_printf(m, "%d", cpu);
736 seq_printf(m, "%c%c",
737 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
738 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
740 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
741 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
742 if (hardirq && softirq)
755 if (entry->preempt_count)
756 seq_printf(m, "%x", entry->preempt_count);
761 unsigned long preempt_mark_thresh = 100;
764 lat_print_timestamp(struct seq_file *m, unsigned long long abs_usecs,
765 unsigned long rel_usecs)
767 seq_printf(m, " %4lldus", abs_usecs);
768 if (rel_usecs > preempt_mark_thresh)
770 else if (rel_usecs > 1)
776 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
779 print_lat_fmt(struct seq_file *m, struct trace_iterator *iter,
780 unsigned int trace_idx, int cpu)
782 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
783 struct trace_entry *next_entry = find_next_entry(iter, NULL);
784 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
785 struct trace_entry *entry = iter->ent;
786 unsigned long abs_usecs;
787 unsigned long rel_usecs;
793 rel_usecs = ns2usecs(next_entry->t - entry->t);
794 abs_usecs = ns2usecs(entry->t - iter->tr->time_start);
797 comm = trace_find_cmdline(entry->pid);
798 seq_printf(m, "%16s %5d %d %d %08x %08x [%08lx]"
799 " %ld.%03ldms (+%ld.%03ldms): ",
801 entry->pid, cpu, entry->flags,
802 entry->preempt_count, trace_idx,
805 abs_usecs % 1000, rel_usecs/1000, rel_usecs % 1000);
807 lat_print_generic(m, entry, cpu);
808 lat_print_timestamp(m, abs_usecs, rel_usecs);
810 switch (entry->type) {
812 seq_print_ip_sym(m, entry->fn.ip, sym_flags);
814 seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags);
818 S = entry->ctx.prev_state < sizeof(state_to_char) ?
819 state_to_char[entry->ctx.prev_state] : 'X';
820 comm = trace_find_cmdline(entry->ctx.next_pid);
821 seq_printf(m, " %d:%d:%c --> %d:%d %s\n",
823 entry->ctx.prev_prio,
826 entry->ctx.next_prio,
833 print_trace_fmt(struct seq_file *m, struct trace_iterator *iter)
835 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
836 struct trace_entry *entry = iter->ent;
837 unsigned long usec_rem;
838 unsigned long long t;
843 comm = trace_find_cmdline(iter->ent->pid);
845 t = ns2usecs(entry->t);
846 usec_rem = do_div(t, 1000000ULL);
847 secs = (unsigned long)t;
849 seq_printf(m, "%16s-%-5d ", comm, entry->pid);
850 seq_printf(m, "[%02d] ", iter->cpu);
851 seq_printf(m, "%5lu.%06lu: ", secs, usec_rem);
853 switch (entry->type) {
855 seq_print_ip_sym(m, entry->fn.ip, sym_flags);
856 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
857 entry->fn.parent_ip) {
858 seq_printf(m, " <-");
859 seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags);
863 S = entry->ctx.prev_state < sizeof(state_to_char) ?
864 state_to_char[entry->ctx.prev_state] : 'X';
865 seq_printf(m, " %d:%d:%c ==> %d:%d\n",
867 entry->ctx.prev_prio,
870 entry->ctx.next_prio);
876 static int trace_empty(struct trace_iterator *iter)
878 struct trace_array_cpu *data;
881 for_each_possible_cpu(cpu) {
882 data = iter->tr->data[cpu];
886 atomic_read(&data->underrun)))
892 static int s_show(struct seq_file *m, void *v)
894 struct trace_iterator *iter = v;
896 if (iter->ent == NULL) {
898 seq_printf(m, "# tracer: %s\n", iter->trace->name);
901 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
902 /* print nothing if the buffers are empty */
903 if (trace_empty(iter))
905 print_trace_header(m, iter);
906 if (!(trace_flags & TRACE_ITER_VERBOSE))
907 print_lat_help_header(m);
909 if (!(trace_flags & TRACE_ITER_VERBOSE))
910 print_func_help_header(m);
913 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
914 print_lat_fmt(m, iter, iter->idx, iter->cpu);
916 print_trace_fmt(m, iter);
922 static struct seq_operations tracer_seq_ops = {
929 static struct trace_iterator notrace *
930 __tracing_open(struct inode *inode, struct file *file, int *ret)
932 struct trace_iterator *iter;
934 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
940 mutex_lock(&trace_types_lock);
941 if (current_trace && current_trace->print_max)
944 iter->tr = inode->i_private;
945 iter->trace = current_trace;
948 /* TODO stop tracer */
949 *ret = seq_open(file, &tracer_seq_ops);
951 struct seq_file *m = file->private_data;
954 /* stop the trace while dumping */
958 if (iter->trace && iter->trace->open)
959 iter->trace->open(iter);
964 mutex_unlock(&trace_types_lock);
970 int tracing_open_generic(struct inode *inode, struct file *filp)
972 filp->private_data = inode->i_private;
976 int tracing_release(struct inode *inode, struct file *file)
978 struct seq_file *m = (struct seq_file *)file->private_data;
979 struct trace_iterator *iter = m->private;
981 mutex_lock(&trace_types_lock);
982 if (iter->trace && iter->trace->close)
983 iter->trace->close(iter);
985 /* reenable tracing if it was previously enabled */
988 mutex_unlock(&trace_types_lock);
990 seq_release(inode, file);
995 static int tracing_open(struct inode *inode, struct file *file)
999 __tracing_open(inode, file, &ret);
1004 static int tracing_lt_open(struct inode *inode, struct file *file)
1006 struct trace_iterator *iter;
1009 iter = __tracing_open(inode, file, &ret);
1012 iter->iter_flags |= TRACE_FILE_LAT_FMT;
1018 static void notrace *
1019 t_next(struct seq_file *m, void *v, loff_t *pos)
1021 struct tracer *t = m->private;
1033 static void *t_start(struct seq_file *m, loff_t *pos)
1035 struct tracer *t = m->private;
1038 mutex_lock(&trace_types_lock);
1039 for (; t && l < *pos; t = t_next(m, t, &l))
1045 static void t_stop(struct seq_file *m, void *p)
1047 mutex_unlock(&trace_types_lock);
1050 static int t_show(struct seq_file *m, void *v)
1052 struct tracer *t = v;
1057 seq_printf(m, "%s", t->name);
1066 static struct seq_operations show_traces_seq_ops = {
1073 static int show_traces_open(struct inode *inode, struct file *file)
1077 ret = seq_open(file, &show_traces_seq_ops);
1079 struct seq_file *m = file->private_data;
1080 m->private = trace_types;
1086 static struct file_operations tracing_fops = {
1087 .open = tracing_open,
1089 .llseek = seq_lseek,
1090 .release = tracing_release,
1093 static struct file_operations tracing_lt_fops = {
1094 .open = tracing_lt_open,
1096 .llseek = seq_lseek,
1097 .release = tracing_release,
1100 static struct file_operations show_traces_fops = {
1101 .open = show_traces_open,
1103 .release = seq_release,
1107 tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
1108 size_t cnt, loff_t *ppos)
1115 /* calulate max size */
1116 for (i = 0; trace_options[i]; i++) {
1117 len += strlen(trace_options[i]);
1118 len += 3; /* "no" and space */
1121 /* +2 for \n and \0 */
1122 buf = kmalloc(len + 2, GFP_KERNEL);
1126 for (i = 0; trace_options[i]; i++) {
1127 if (trace_flags & (1 << i))
1128 r += sprintf(buf + r, "%s ", trace_options[i]);
1130 r += sprintf(buf + r, "no%s ", trace_options[i]);
1133 r += sprintf(buf + r, "\n");
1134 WARN_ON(r >= len + 2);
1136 r = simple_read_from_buffer(ubuf, cnt, ppos,
1145 tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
1146 size_t cnt, loff_t *ppos)
1156 if (copy_from_user(&buf, ubuf, cnt))
1161 if (strncmp(buf, "no", 2) == 0) {
1166 for (i = 0; trace_options[i]; i++) {
1167 int len = strlen(trace_options[i]);
1169 if (strncmp(cmp, trace_options[i], len) == 0) {
1171 trace_flags &= ~(1 << i);
1173 trace_flags |= (1 << i);
1183 static struct file_operations tracing_iter_fops = {
1184 .open = tracing_open_generic,
1185 .read = tracing_iter_ctrl_read,
1186 .write = tracing_iter_ctrl_write,
1190 tracing_ctrl_read(struct file *filp, char __user *ubuf,
1191 size_t cnt, loff_t *ppos)
1193 struct trace_array *tr = filp->private_data;
1197 r = sprintf(buf, "%ld\n", tr->ctrl);
1198 return simple_read_from_buffer(ubuf, cnt, ppos,
1203 tracing_ctrl_write(struct file *filp, const char __user *ubuf,
1204 size_t cnt, loff_t *ppos)
1206 struct trace_array *tr = filp->private_data;
1213 if (copy_from_user(&buf, ubuf, cnt))
1218 val = simple_strtoul(buf, NULL, 10);
1222 mutex_lock(&trace_types_lock);
1223 if (tr->ctrl ^ val) {
1231 if (current_trace && current_trace->ctrl_update)
1232 current_trace->ctrl_update(tr);
1234 mutex_unlock(&trace_types_lock);
1242 tracing_set_trace_read(struct file *filp, char __user *ubuf,
1243 size_t cnt, loff_t *ppos)
1245 char buf[max_tracer_type_len+2];
1248 mutex_lock(&trace_types_lock);
1250 r = sprintf(buf, "%s\n", current_trace->name);
1252 r = sprintf(buf, "\n");
1253 mutex_unlock(&trace_types_lock);
1255 return simple_read_from_buffer(ubuf, cnt, ppos,
1260 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
1261 size_t cnt, loff_t *ppos)
1263 struct trace_array *tr = &global_trace;
1265 char buf[max_tracer_type_len+1];
1268 if (cnt > max_tracer_type_len)
1269 cnt = max_tracer_type_len;
1271 if (copy_from_user(&buf, ubuf, cnt))
1276 /* strip ending whitespace. */
1277 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
1280 mutex_lock(&trace_types_lock);
1281 for (t = trace_types; t; t = t->next) {
1282 if (strcmp(t->name, buf) == 0)
1285 if (!t || t == current_trace)
1288 if (current_trace && current_trace->reset)
1289 current_trace->reset(tr);
1296 mutex_unlock(&trace_types_lock);
1304 tracing_max_lat_read(struct file *filp, char __user *ubuf,
1305 size_t cnt, loff_t *ppos)
1307 unsigned long *ptr = filp->private_data;
1311 r = snprintf(buf, 64, "%ld\n",
1312 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
1315 return simple_read_from_buffer(ubuf, cnt, ppos,
1320 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
1321 size_t cnt, loff_t *ppos)
1323 long *ptr = filp->private_data;
1330 if (copy_from_user(&buf, ubuf, cnt))
1335 val = simple_strtoul(buf, NULL, 10);
1342 static struct file_operations tracing_max_lat_fops = {
1343 .open = tracing_open_generic,
1344 .read = tracing_max_lat_read,
1345 .write = tracing_max_lat_write,
1348 static struct file_operations tracing_ctrl_fops = {
1349 .open = tracing_open_generic,
1350 .read = tracing_ctrl_read,
1351 .write = tracing_ctrl_write,
1354 static struct file_operations set_tracer_fops = {
1355 .open = tracing_open_generic,
1356 .read = tracing_set_trace_read,
1357 .write = tracing_set_trace_write,
1360 #ifdef CONFIG_DYNAMIC_FTRACE
1363 tracing_read_long(struct file *filp, char __user *ubuf,
1364 size_t cnt, loff_t *ppos)
1366 unsigned long *p = filp->private_data;
1370 r = sprintf(buf, "%ld\n", *p);
1371 return simple_read_from_buffer(ubuf, cnt, ppos,
1375 static struct file_operations tracing_read_long_fops = {
1376 .open = tracing_open_generic,
1377 .read = tracing_read_long,
1381 static struct dentry *d_tracer;
1383 struct dentry *tracing_init_dentry(void)
1390 d_tracer = debugfs_create_dir("tracing", NULL);
1392 if (!d_tracer && !once) {
1394 pr_warning("Could not create debugfs directory 'tracing'\n");
1401 static __init void tracer_init_debugfs(void)
1403 struct dentry *d_tracer;
1404 struct dentry *entry;
1406 d_tracer = tracing_init_dentry();
1408 entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
1409 &global_trace, &tracing_ctrl_fops);
1411 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
1413 entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
1414 NULL, &tracing_iter_fops);
1416 pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
1418 entry = debugfs_create_file("latency_trace", 0444, d_tracer,
1419 &global_trace, &tracing_lt_fops);
1421 pr_warning("Could not create debugfs 'latency_trace' entry\n");
1423 entry = debugfs_create_file("trace", 0444, d_tracer,
1424 &global_trace, &tracing_fops);
1426 pr_warning("Could not create debugfs 'trace' entry\n");
1428 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
1429 &global_trace, &show_traces_fops);
1431 pr_warning("Could not create debugfs 'trace' entry\n");
1433 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
1434 &global_trace, &set_tracer_fops);
1436 pr_warning("Could not create debugfs 'trace' entry\n");
1438 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
1439 &tracing_max_latency,
1440 &tracing_max_lat_fops);
1442 pr_warning("Could not create debugfs "
1443 "'tracing_max_latency' entry\n");
1445 entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
1446 &tracing_thresh, &tracing_max_lat_fops);
1448 pr_warning("Could not create debugfs "
1449 "'tracing_threash' entry\n");
1451 #ifdef CONFIG_DYNAMIC_FTRACE
1452 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
1453 &ftrace_update_tot_cnt,
1454 &tracing_read_long_fops);
1456 pr_warning("Could not create debugfs "
1457 "'dyn_ftrace_total_info' entry\n");
1461 /* dummy trace to disable tracing */
1462 static struct tracer no_tracer __read_mostly =
1467 static inline notrace int page_order(const unsigned long size)
1469 const unsigned long nr_pages = DIV_ROUND_UP(size, PAGE_SIZE);
1470 return ilog2(roundup_pow_of_two(nr_pages));
1473 __init static int tracer_alloc_buffers(void)
1475 const int order = page_order(trace_nr_entries * TRACE_ENTRY_SIZE);
1476 const unsigned long size = (1UL << order) << PAGE_SHIFT;
1477 struct trace_entry *array;
1480 for_each_possible_cpu(i) {
1481 global_trace.data[i] = &per_cpu(global_trace_cpu, i);
1482 max_tr.data[i] = &per_cpu(max_data, i);
1484 array = (struct trace_entry *)
1485 __get_free_pages(GFP_KERNEL, order);
1486 if (array == NULL) {
1487 printk(KERN_ERR "tracer: failed to allocate"
1488 " %ld bytes for trace buffer!\n", size);
1491 global_trace.data[i]->trace = array;
1493 /* Only allocate if we are actually using the max trace */
1494 #ifdef CONFIG_TRACER_MAX_TRACE
1495 array = (struct trace_entry *)
1496 __get_free_pages(GFP_KERNEL, order);
1497 if (array == NULL) {
1498 printk(KERN_ERR "wakeup tracer: failed to allocate"
1499 " %ld bytes for trace buffer!\n", size);
1502 max_tr.data[i]->trace = array;
1507 * Since we allocate by orders of pages, we may be able to
1510 global_trace.entries = size / TRACE_ENTRY_SIZE;
1511 max_tr.entries = global_trace.entries;
1513 pr_info("tracer: %ld bytes allocated for %ld",
1514 size, trace_nr_entries);
1515 pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE);
1516 pr_info(" actual entries %ld\n", global_trace.entries);
1518 tracer_init_debugfs();
1520 trace_init_cmdlines();
1522 register_tracer(&no_tracer);
1523 current_trace = &no_tracer;
1528 for (i-- ; i >= 0; i--) {
1529 struct trace_array_cpu *data = global_trace.data[i];
1531 if (data && data->trace) {
1532 free_pages((unsigned long)data->trace, order);
1536 #ifdef CONFIG_TRACER_MAX_TRACE
1537 data = max_tr.data[i];
1538 if (data && data->trace) {
1539 free_pages((unsigned long)data->trace, order);
1547 device_initcall(tracer_alloc_buffers);