2 * ring buffer based function tracer
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III
14 #include <linux/utsrelease.h>
15 #include <linux/kallsyms.h>
16 #include <linux/seq_file.h>
17 #include <linux/debugfs.h>
18 #include <linux/pagemap.h>
19 #include <linux/hardirq.h>
20 #include <linux/linkage.h>
21 #include <linux/uaccess.h>
22 #include <linux/ftrace.h>
23 #include <linux/module.h>
24 #include <linux/percpu.h>
25 #include <linux/ctype.h>
26 #include <linux/init.h>
27 #include <linux/gfp.h>
32 unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
33 unsigned long __read_mostly tracing_thresh;
36 ns2usecs(cycle_t nsec)
43 static atomic_t tracer_counter;
44 static struct trace_array global_trace;
46 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
48 static struct trace_array max_tr;
50 static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
52 static int tracer_enabled;
53 static unsigned long trace_nr_entries = 16384UL;
55 static struct tracer *trace_types __read_mostly;
56 static struct tracer *current_trace __read_mostly;
57 static int max_tracer_type_len;
59 static DEFINE_MUTEX(trace_types_lock);
61 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
63 static int __init set_nr_entries(char *str)
67 trace_nr_entries = simple_strtoul(str, &str, 0);
70 __setup("trace_entries=", set_nr_entries);
73 __TRACE_FIRST_TYPE = 0,
81 enum trace_flag_type {
82 TRACE_FLAG_IRQS_OFF = 0x01,
83 TRACE_FLAG_NEED_RESCHED = 0x02,
84 TRACE_FLAG_HARDIRQ = 0x04,
85 TRACE_FLAG_SOFTIRQ = 0x08,
88 enum trace_iterator_flags {
89 TRACE_ITER_PRINT_PARENT = 0x01,
90 TRACE_ITER_SYM_OFFSET = 0x02,
91 TRACE_ITER_SYM_ADDR = 0x04,
92 TRACE_ITER_VERBOSE = 0x08,
95 #define TRACE_ITER_SYM_MASK \
96 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
98 /* These must match the bit postions above */
99 static const char *trace_options[] = {
107 static unsigned trace_flags;
109 static DEFINE_SPINLOCK(ftrace_max_lock);
112 * Copy the new maximum trace into the separate maximum-trace
113 * structure. (this way the maximum trace is permanently saved,
114 * for later retrieval via /debugfs/tracing/latency_trace)
117 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
119 struct trace_array_cpu *data = tr->data[cpu];
122 max_tr.time_start = data->preempt_timestamp;
124 data = max_tr.data[cpu];
125 data->saved_latency = tracing_max_latency;
127 memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
128 data->pid = tsk->pid;
129 data->uid = tsk->uid;
130 data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
131 data->policy = tsk->policy;
132 data->rt_priority = tsk->rt_priority;
134 /* record this tasks comm */
135 tracing_record_cmdline(current);
139 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
141 struct trace_array_cpu *data;
143 struct list_head save_pages;
146 WARN_ON_ONCE(!irqs_disabled());
147 spin_lock(&ftrace_max_lock);
148 /* clear out all the previous traces */
149 for_each_possible_cpu(i) {
151 save_trace = max_tr.data[i]->trace;
152 save_pages = max_tr.data[i]->trace_pages;
153 memcpy(max_tr.data[i], data, sizeof(*data));
154 data->trace = save_trace;
155 data->trace_pages = save_pages;
158 __update_max_tr(tr, tsk, cpu);
159 spin_unlock(&ftrace_max_lock);
163 * update_max_tr_single - only copy one trace over, and reset the rest
165 * @tsk - task with the latency
166 * @cpu - the cpu of the buffer to copy.
169 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
171 struct trace_array_cpu *data = tr->data[cpu];
173 struct list_head save_pages;
176 WARN_ON_ONCE(!irqs_disabled());
177 spin_lock(&ftrace_max_lock);
178 for_each_possible_cpu(i)
179 tracing_reset(max_tr.data[i]);
181 save_trace = max_tr.data[cpu]->trace;
182 save_pages = max_tr.data[cpu]->trace_pages;
183 memcpy(max_tr.data[cpu], data, sizeof(*data));
184 data->trace = save_trace;
185 data->trace_pages = save_pages;
187 __update_max_tr(tr, tsk, cpu);
188 spin_unlock(&ftrace_max_lock);
191 int register_tracer(struct tracer *type)
198 pr_info("Tracer must have a name\n");
202 mutex_lock(&trace_types_lock);
203 for (t = trace_types; t; t = t->next) {
204 if (strcmp(type->name, t->name) == 0) {
206 pr_info("Trace %s already registered\n",
213 type->next = trace_types;
215 len = strlen(type->name);
216 if (len > max_tracer_type_len)
217 max_tracer_type_len = len;
219 mutex_unlock(&trace_types_lock);
224 void unregister_tracer(struct tracer *type)
229 mutex_lock(&trace_types_lock);
230 for (t = &trace_types; *t; t = &(*t)->next) {
234 pr_info("Trace %s not registered\n", type->name);
239 if (strlen(type->name) != max_tracer_type_len)
242 max_tracer_type_len = 0;
243 for (t = &trace_types; *t; t = &(*t)->next) {
244 len = strlen((*t)->name);
245 if (len > max_tracer_type_len)
246 max_tracer_type_len = len;
249 mutex_unlock(&trace_types_lock);
252 void notrace tracing_reset(struct trace_array_cpu *data)
255 data->trace_current = data->trace;
256 data->trace_current_idx = 0;
261 function_trace_call(unsigned long ip, unsigned long parent_ip)
263 struct trace_array *tr = &global_trace;
264 struct trace_array_cpu *data;
269 if (unlikely(!tracer_enabled))
272 local_irq_save(flags);
273 cpu = raw_smp_processor_id();
274 data = tr->data[cpu];
275 disabled = atomic_inc_return(&data->disabled);
277 if (likely(disabled == 1))
278 ftrace(tr, data, ip, parent_ip, flags);
280 atomic_dec(&data->disabled);
281 local_irq_restore(flags);
284 static struct ftrace_ops trace_ops __read_mostly =
286 .func = function_trace_call,
290 notrace void tracing_start_function_trace(void)
292 register_ftrace_function(&trace_ops);
295 notrace void tracing_stop_function_trace(void)
297 unregister_ftrace_function(&trace_ops);
300 #define SAVED_CMDLINES 128
301 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
302 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
303 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
304 static int cmdline_idx;
305 static DEFINE_SPINLOCK(trace_cmdline_lock);
306 atomic_t trace_record_cmdline_disabled;
308 static void trace_init_cmdlines(void)
310 memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
311 memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
315 notrace void trace_stop_cmdline_recording(void);
317 static void notrace trace_save_cmdline(struct task_struct *tsk)
322 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
326 * It's not the end of the world if we don't get
327 * the lock, but we also don't want to spin
328 * nor do we want to disable interrupts,
329 * so if we miss here, then better luck next time.
331 if (!spin_trylock(&trace_cmdline_lock))
334 idx = map_pid_to_cmdline[tsk->pid];
335 if (idx >= SAVED_CMDLINES) {
336 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
338 map = map_cmdline_to_pid[idx];
339 if (map <= PID_MAX_DEFAULT)
340 map_pid_to_cmdline[map] = (unsigned)-1;
342 map_pid_to_cmdline[tsk->pid] = idx;
347 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
349 spin_unlock(&trace_cmdline_lock);
352 static notrace char *trace_find_cmdline(int pid)
354 char *cmdline = "<...>";
360 if (pid > PID_MAX_DEFAULT)
363 map = map_pid_to_cmdline[pid];
364 if (map >= SAVED_CMDLINES)
367 cmdline = saved_cmdlines[map];
373 notrace void tracing_record_cmdline(struct task_struct *tsk)
375 if (atomic_read(&trace_record_cmdline_disabled))
378 trace_save_cmdline(tsk);
381 static inline notrace struct trace_entry *
382 tracing_get_trace_entry(struct trace_array *tr,
383 struct trace_array_cpu *data)
385 unsigned long idx, idx_next;
386 struct trace_entry *entry;
388 struct list_head *next;
391 idx = data->trace_current_idx;
394 entry = data->trace_current + idx * TRACE_ENTRY_SIZE;
396 if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
397 page = virt_to_page(data->trace_current);
398 if (unlikely(&page->lru == data->trace_pages.prev))
399 next = data->trace_pages.next;
401 next = page->lru.next;
402 page = list_entry(next, struct page, lru);
403 data->trace_current = page_address(page);
407 data->trace_current_idx = idx_next;
412 static inline notrace void
413 tracing_generic_entry_update(struct trace_entry *entry,
416 struct task_struct *tsk = current;
419 pc = preempt_count();
421 entry->idx = atomic_inc_return(&tracer_counter);
422 entry->preempt_count = pc & 0xff;
423 entry->pid = tsk->pid;
424 entry->t = now(raw_smp_processor_id());
425 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
426 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
427 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
428 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
432 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
433 unsigned long ip, unsigned long parent_ip,
436 struct trace_entry *entry;
438 entry = tracing_get_trace_entry(tr, data);
439 tracing_generic_entry_update(entry, flags);
440 entry->type = TRACE_FN;
442 entry->fn.parent_ip = parent_ip;
446 tracing_sched_switch_trace(struct trace_array *tr,
447 struct trace_array_cpu *data,
448 struct task_struct *prev, struct task_struct *next,
451 struct trace_entry *entry;
453 entry = tracing_get_trace_entry(tr, data);
454 tracing_generic_entry_update(entry, flags);
455 entry->type = TRACE_CTX;
456 entry->ctx.prev_pid = prev->pid;
457 entry->ctx.prev_prio = prev->prio;
458 entry->ctx.prev_state = prev->state;
459 entry->ctx.next_pid = next->pid;
460 entry->ctx.next_prio = next->prio;
463 enum trace_file_type {
464 TRACE_FILE_LAT_FMT = 1,
467 static struct trace_entry *
468 trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
469 struct trace_iterator *iter, int cpu)
472 struct trace_entry *array;
474 if (iter->next_idx[cpu] >= tr->entries ||
475 iter->next_idx[cpu] >= data->trace_idx)
478 if (!iter->next_page[cpu]) {
480 * Initialize. If the count of elements in
481 * this buffer is greater than the max entries
482 * we had an underrun. Which means we looped around.
483 * We can simply use the current pointer as our
486 if (data->trace_idx >= tr->entries) {
487 page = virt_to_page(data->trace_current);
488 iter->next_page[cpu] = &page->lru;
489 iter->next_page_idx[cpu] = data->trace_current_idx;
491 iter->next_page[cpu] = data->trace_pages.next;
492 iter->next_page_idx[cpu] = 0;
496 page = list_entry(iter->next_page[cpu], struct page, lru);
497 array = page_address(page);
499 return &array[iter->next_page_idx[cpu]];
502 static struct notrace trace_entry *
503 find_next_entry(struct trace_iterator *iter, int *ent_cpu)
505 struct trace_array *tr = iter->tr;
506 struct trace_entry *ent, *next = NULL;
510 for_each_possible_cpu(cpu) {
511 if (!tr->data[cpu]->trace)
513 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
515 (!next || (long)(next->idx - ent->idx) > 0)) {
527 static void *find_next_entry_inc(struct trace_iterator *iter)
529 struct trace_entry *next;
532 next = find_next_entry(iter, &next_cpu);
536 iter->next_idx[next_cpu]++;
537 iter->next_page_idx[next_cpu]++;
538 if (iter->next_page_idx[next_cpu] >= ENTRIES_PER_PAGE) {
539 struct trace_array_cpu *data = iter->tr->data[next_cpu];
541 iter->next_page_idx[next_cpu] = 0;
542 iter->next_page[next_cpu] =
543 iter->next_page[next_cpu]->next;
544 if (iter->next_page[next_cpu] == &data->trace_pages)
545 iter->next_page[next_cpu] =
546 data->trace_pages.next;
550 iter->cpu = next_cpu;
552 return next ? iter : NULL;
555 static void notrace *
556 s_next(struct seq_file *m, void *v, loff_t *pos)
558 struct trace_iterator *iter = m->private;
560 void *last_ent = iter->ent;
565 /* can't go backwards */
570 ent = find_next_entry_inc(iter);
574 while (ent && iter->idx < i)
575 ent = find_next_entry_inc(iter);
579 if (last_ent && !ent)
580 seq_puts(m, "\n\nvim:ft=help\n");
585 static void *s_start(struct seq_file *m, loff_t *pos)
587 struct trace_iterator *iter = m->private;
592 mutex_lock(&trace_types_lock);
594 if (!current_trace || current_trace != iter->trace)
597 atomic_inc(&trace_record_cmdline_disabled);
599 /* let the tracer grab locks here if needed */
600 if (current_trace->start)
601 current_trace->start(iter);
603 if (*pos != iter->pos) {
608 for_each_possible_cpu(i) {
609 iter->next_idx[i] = 0;
610 iter->next_page[i] = NULL;
613 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
618 p = s_next(m, p, &l);
624 static void s_stop(struct seq_file *m, void *p)
626 struct trace_iterator *iter = m->private;
628 atomic_dec(&trace_record_cmdline_disabled);
630 /* let the tracer release locks here if needed */
631 if (current_trace && current_trace == iter->trace && iter->trace->stop)
632 iter->trace->stop(iter);
634 mutex_unlock(&trace_types_lock);
638 seq_print_sym_short(struct seq_file *m, const char *fmt, unsigned long address)
640 #ifdef CONFIG_KALLSYMS
641 char str[KSYM_SYMBOL_LEN];
643 kallsyms_lookup(address, NULL, NULL, NULL, str);
645 seq_printf(m, fmt, str);
650 seq_print_sym_offset(struct seq_file *m, const char *fmt, unsigned long address)
652 #ifdef CONFIG_KALLSYMS
653 char str[KSYM_SYMBOL_LEN];
655 sprint_symbol(str, address);
656 seq_printf(m, fmt, str);
661 # define IP_FMT "%08lx"
663 # define IP_FMT "%016lx"
667 seq_print_ip_sym(struct seq_file *m, unsigned long ip, unsigned long sym_flags)
674 if (sym_flags & TRACE_ITER_SYM_OFFSET)
675 seq_print_sym_offset(m, "%s", ip);
677 seq_print_sym_short(m, "%s", ip);
679 if (sym_flags & TRACE_ITER_SYM_ADDR)
680 seq_printf(m, " <" IP_FMT ">", ip);
683 static void notrace print_lat_help_header(struct seq_file *m)
685 seq_puts(m, "# _------=> CPU# \n");
686 seq_puts(m, "# / _-----=> irqs-off \n");
687 seq_puts(m, "# | / _----=> need-resched \n");
688 seq_puts(m, "# || / _---=> hardirq/softirq \n");
689 seq_puts(m, "# ||| / _--=> preempt-depth \n");
690 seq_puts(m, "# |||| / \n");
691 seq_puts(m, "# ||||| delay \n");
692 seq_puts(m, "# cmd pid ||||| time | caller \n");
693 seq_puts(m, "# \\ / ||||| \\ | / \n");
696 static void notrace print_func_help_header(struct seq_file *m)
698 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
699 seq_puts(m, "# | | | | |\n");
704 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
706 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
707 struct trace_array *tr = iter->tr;
708 struct trace_array_cpu *data = tr->data[tr->cpu];
709 struct tracer *type = current_trace;
710 unsigned long total = 0;
711 unsigned long entries = 0;
713 const char *name = "preemption";
718 for_each_possible_cpu(cpu) {
719 if (tr->data[cpu]->trace) {
720 total += tr->data[cpu]->trace_idx;
721 if (tr->data[cpu]->trace_idx > tr->entries)
722 entries += tr->entries;
724 entries += tr->data[cpu]->trace_idx;
728 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
730 seq_puts(m, "-----------------------------------"
731 "---------------------------------\n");
732 seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
733 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
738 #if defined(CONFIG_PREEMPT_NONE)
740 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
742 #elif defined(CONFIG_PREEMPT_DESKTOP)
747 /* These are reserved for later use */
750 seq_printf(m, " #P:%d)\n", num_online_cpus());
754 seq_puts(m, " -----------------\n");
755 seq_printf(m, " | task: %.16s-%d "
756 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
757 data->comm, data->pid, data->uid, data->nice,
758 data->policy, data->rt_priority);
759 seq_puts(m, " -----------------\n");
761 if (data->critical_start) {
762 seq_puts(m, " => started at: ");
763 seq_print_ip_sym(m, data->critical_start, sym_flags);
764 seq_puts(m, "\n => ended at: ");
765 seq_print_ip_sym(m, data->critical_end, sym_flags);
772 unsigned long nsecs_to_usecs(unsigned long nsecs)
778 lat_print_generic(struct seq_file *m, struct trace_entry *entry, int cpu)
780 int hardirq, softirq;
783 comm = trace_find_cmdline(entry->pid);
785 seq_printf(m, "%8.8s-%-5d ", comm, entry->pid);
786 seq_printf(m, "%d", cpu);
787 seq_printf(m, "%c%c",
788 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
789 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
791 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
792 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
793 if (hardirq && softirq)
806 if (entry->preempt_count)
807 seq_printf(m, "%x", entry->preempt_count);
812 unsigned long preempt_mark_thresh = 100;
815 lat_print_timestamp(struct seq_file *m, unsigned long long abs_usecs,
816 unsigned long rel_usecs)
818 seq_printf(m, " %4lldus", abs_usecs);
819 if (rel_usecs > preempt_mark_thresh)
821 else if (rel_usecs > 1)
827 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
830 print_lat_fmt(struct seq_file *m, struct trace_iterator *iter,
831 unsigned int trace_idx, int cpu)
833 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
834 struct trace_entry *next_entry = find_next_entry(iter, NULL);
835 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
836 struct trace_entry *entry = iter->ent;
837 unsigned long abs_usecs;
838 unsigned long rel_usecs;
844 rel_usecs = ns2usecs(next_entry->t - entry->t);
845 abs_usecs = ns2usecs(entry->t - iter->tr->time_start);
848 comm = trace_find_cmdline(entry->pid);
849 seq_printf(m, "%16s %5d %d %d %08x %08x [%08lx]"
850 " %ld.%03ldms (+%ld.%03ldms): ",
852 entry->pid, cpu, entry->flags,
853 entry->preempt_count, trace_idx,
856 abs_usecs % 1000, rel_usecs/1000, rel_usecs % 1000);
858 lat_print_generic(m, entry, cpu);
859 lat_print_timestamp(m, abs_usecs, rel_usecs);
861 switch (entry->type) {
863 seq_print_ip_sym(m, entry->fn.ip, sym_flags);
865 seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags);
869 S = entry->ctx.prev_state < sizeof(state_to_char) ?
870 state_to_char[entry->ctx.prev_state] : 'X';
871 comm = trace_find_cmdline(entry->ctx.next_pid);
872 seq_printf(m, " %d:%d:%c --> %d:%d %s\n",
874 entry->ctx.prev_prio,
877 entry->ctx.next_prio,
884 print_trace_fmt(struct seq_file *m, struct trace_iterator *iter)
886 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
887 struct trace_entry *entry = iter->ent;
888 unsigned long usec_rem;
889 unsigned long long t;
894 comm = trace_find_cmdline(iter->ent->pid);
896 t = ns2usecs(entry->t);
897 usec_rem = do_div(t, 1000000ULL);
898 secs = (unsigned long)t;
900 seq_printf(m, "%16s-%-5d ", comm, entry->pid);
901 seq_printf(m, "[%02d] ", iter->cpu);
902 seq_printf(m, "%5lu.%06lu: ", secs, usec_rem);
904 switch (entry->type) {
906 seq_print_ip_sym(m, entry->fn.ip, sym_flags);
907 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
908 entry->fn.parent_ip) {
909 seq_printf(m, " <-");
910 seq_print_ip_sym(m, entry->fn.parent_ip, sym_flags);
914 S = entry->ctx.prev_state < sizeof(state_to_char) ?
915 state_to_char[entry->ctx.prev_state] : 'X';
916 seq_printf(m, " %d:%d:%c ==> %d:%d\n",
918 entry->ctx.prev_prio,
921 entry->ctx.next_prio);
927 static int trace_empty(struct trace_iterator *iter)
929 struct trace_array_cpu *data;
932 for_each_possible_cpu(cpu) {
933 data = iter->tr->data[cpu];
942 static int s_show(struct seq_file *m, void *v)
944 struct trace_iterator *iter = v;
946 if (iter->ent == NULL) {
948 seq_printf(m, "# tracer: %s\n", iter->trace->name);
951 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
952 /* print nothing if the buffers are empty */
953 if (trace_empty(iter))
955 print_trace_header(m, iter);
956 if (!(trace_flags & TRACE_ITER_VERBOSE))
957 print_lat_help_header(m);
959 if (!(trace_flags & TRACE_ITER_VERBOSE))
960 print_func_help_header(m);
963 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
964 print_lat_fmt(m, iter, iter->idx, iter->cpu);
966 print_trace_fmt(m, iter);
972 static struct seq_operations tracer_seq_ops = {
979 static struct trace_iterator notrace *
980 __tracing_open(struct inode *inode, struct file *file, int *ret)
982 struct trace_iterator *iter;
984 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
990 mutex_lock(&trace_types_lock);
991 if (current_trace && current_trace->print_max)
994 iter->tr = inode->i_private;
995 iter->trace = current_trace;
998 /* TODO stop tracer */
999 *ret = seq_open(file, &tracer_seq_ops);
1001 struct seq_file *m = file->private_data;
1004 /* stop the trace while dumping */
1008 if (iter->trace && iter->trace->open)
1009 iter->trace->open(iter);
1014 mutex_unlock(&trace_types_lock);
1020 int tracing_open_generic(struct inode *inode, struct file *filp)
1022 filp->private_data = inode->i_private;
1026 int tracing_release(struct inode *inode, struct file *file)
1028 struct seq_file *m = (struct seq_file *)file->private_data;
1029 struct trace_iterator *iter = m->private;
1031 mutex_lock(&trace_types_lock);
1032 if (iter->trace && iter->trace->close)
1033 iter->trace->close(iter);
1035 /* reenable tracing if it was previously enabled */
1038 mutex_unlock(&trace_types_lock);
1040 seq_release(inode, file);
1045 static int tracing_open(struct inode *inode, struct file *file)
1049 __tracing_open(inode, file, &ret);
1054 static int tracing_lt_open(struct inode *inode, struct file *file)
1056 struct trace_iterator *iter;
1059 iter = __tracing_open(inode, file, &ret);
1062 iter->iter_flags |= TRACE_FILE_LAT_FMT;
1068 static void notrace *
1069 t_next(struct seq_file *m, void *v, loff_t *pos)
1071 struct tracer *t = m->private;
1083 static void *t_start(struct seq_file *m, loff_t *pos)
1085 struct tracer *t = m->private;
1088 mutex_lock(&trace_types_lock);
1089 for (; t && l < *pos; t = t_next(m, t, &l))
1095 static void t_stop(struct seq_file *m, void *p)
1097 mutex_unlock(&trace_types_lock);
1100 static int t_show(struct seq_file *m, void *v)
1102 struct tracer *t = v;
1107 seq_printf(m, "%s", t->name);
1116 static struct seq_operations show_traces_seq_ops = {
1123 static int show_traces_open(struct inode *inode, struct file *file)
1127 ret = seq_open(file, &show_traces_seq_ops);
1129 struct seq_file *m = file->private_data;
1130 m->private = trace_types;
1136 static struct file_operations tracing_fops = {
1137 .open = tracing_open,
1139 .llseek = seq_lseek,
1140 .release = tracing_release,
1143 static struct file_operations tracing_lt_fops = {
1144 .open = tracing_lt_open,
1146 .llseek = seq_lseek,
1147 .release = tracing_release,
1150 static struct file_operations show_traces_fops = {
1151 .open = show_traces_open,
1153 .release = seq_release,
1157 tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
1158 size_t cnt, loff_t *ppos)
1165 /* calulate max size */
1166 for (i = 0; trace_options[i]; i++) {
1167 len += strlen(trace_options[i]);
1168 len += 3; /* "no" and space */
1171 /* +2 for \n and \0 */
1172 buf = kmalloc(len + 2, GFP_KERNEL);
1176 for (i = 0; trace_options[i]; i++) {
1177 if (trace_flags & (1 << i))
1178 r += sprintf(buf + r, "%s ", trace_options[i]);
1180 r += sprintf(buf + r, "no%s ", trace_options[i]);
1183 r += sprintf(buf + r, "\n");
1184 WARN_ON(r >= len + 2);
1186 r = simple_read_from_buffer(ubuf, cnt, ppos,
1195 tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
1196 size_t cnt, loff_t *ppos)
1206 if (copy_from_user(&buf, ubuf, cnt))
1211 if (strncmp(buf, "no", 2) == 0) {
1216 for (i = 0; trace_options[i]; i++) {
1217 int len = strlen(trace_options[i]);
1219 if (strncmp(cmp, trace_options[i], len) == 0) {
1221 trace_flags &= ~(1 << i);
1223 trace_flags |= (1 << i);
1233 static struct file_operations tracing_iter_fops = {
1234 .open = tracing_open_generic,
1235 .read = tracing_iter_ctrl_read,
1236 .write = tracing_iter_ctrl_write,
1240 tracing_ctrl_read(struct file *filp, char __user *ubuf,
1241 size_t cnt, loff_t *ppos)
1243 struct trace_array *tr = filp->private_data;
1247 r = sprintf(buf, "%ld\n", tr->ctrl);
1248 return simple_read_from_buffer(ubuf, cnt, ppos,
1253 tracing_ctrl_write(struct file *filp, const char __user *ubuf,
1254 size_t cnt, loff_t *ppos)
1256 struct trace_array *tr = filp->private_data;
1263 if (copy_from_user(&buf, ubuf, cnt))
1268 val = simple_strtoul(buf, NULL, 10);
1272 mutex_lock(&trace_types_lock);
1273 if (tr->ctrl ^ val) {
1281 if (current_trace && current_trace->ctrl_update)
1282 current_trace->ctrl_update(tr);
1284 mutex_unlock(&trace_types_lock);
1292 tracing_set_trace_read(struct file *filp, char __user *ubuf,
1293 size_t cnt, loff_t *ppos)
1295 char buf[max_tracer_type_len+2];
1298 mutex_lock(&trace_types_lock);
1300 r = sprintf(buf, "%s\n", current_trace->name);
1302 r = sprintf(buf, "\n");
1303 mutex_unlock(&trace_types_lock);
1305 return simple_read_from_buffer(ubuf, cnt, ppos,
1310 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
1311 size_t cnt, loff_t *ppos)
1313 struct trace_array *tr = &global_trace;
1315 char buf[max_tracer_type_len+1];
1318 if (cnt > max_tracer_type_len)
1319 cnt = max_tracer_type_len;
1321 if (copy_from_user(&buf, ubuf, cnt))
1326 /* strip ending whitespace. */
1327 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
1330 mutex_lock(&trace_types_lock);
1331 for (t = trace_types; t; t = t->next) {
1332 if (strcmp(t->name, buf) == 0)
1335 if (!t || t == current_trace)
1338 if (current_trace && current_trace->reset)
1339 current_trace->reset(tr);
1346 mutex_unlock(&trace_types_lock);
1354 tracing_max_lat_read(struct file *filp, char __user *ubuf,
1355 size_t cnt, loff_t *ppos)
1357 unsigned long *ptr = filp->private_data;
1361 r = snprintf(buf, 64, "%ld\n",
1362 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
1365 return simple_read_from_buffer(ubuf, cnt, ppos,
1370 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
1371 size_t cnt, loff_t *ppos)
1373 long *ptr = filp->private_data;
1380 if (copy_from_user(&buf, ubuf, cnt))
1385 val = simple_strtoul(buf, NULL, 10);
1392 static struct file_operations tracing_max_lat_fops = {
1393 .open = tracing_open_generic,
1394 .read = tracing_max_lat_read,
1395 .write = tracing_max_lat_write,
1398 static struct file_operations tracing_ctrl_fops = {
1399 .open = tracing_open_generic,
1400 .read = tracing_ctrl_read,
1401 .write = tracing_ctrl_write,
1404 static struct file_operations set_tracer_fops = {
1405 .open = tracing_open_generic,
1406 .read = tracing_set_trace_read,
1407 .write = tracing_set_trace_write,
1410 #ifdef CONFIG_DYNAMIC_FTRACE
1413 tracing_read_long(struct file *filp, char __user *ubuf,
1414 size_t cnt, loff_t *ppos)
1416 unsigned long *p = filp->private_data;
1420 r = sprintf(buf, "%ld\n", *p);
1421 return simple_read_from_buffer(ubuf, cnt, ppos,
1425 static struct file_operations tracing_read_long_fops = {
1426 .open = tracing_open_generic,
1427 .read = tracing_read_long,
1431 static struct dentry *d_tracer;
1433 struct dentry *tracing_init_dentry(void)
1440 d_tracer = debugfs_create_dir("tracing", NULL);
1442 if (!d_tracer && !once) {
1444 pr_warning("Could not create debugfs directory 'tracing'\n");
1451 static __init void tracer_init_debugfs(void)
1453 struct dentry *d_tracer;
1454 struct dentry *entry;
1456 d_tracer = tracing_init_dentry();
1458 entry = debugfs_create_file("tracing_enabled", 0644, d_tracer,
1459 &global_trace, &tracing_ctrl_fops);
1461 pr_warning("Could not create debugfs 'tracing_enabled' entry\n");
1463 entry = debugfs_create_file("iter_ctrl", 0644, d_tracer,
1464 NULL, &tracing_iter_fops);
1466 pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
1468 entry = debugfs_create_file("latency_trace", 0444, d_tracer,
1469 &global_trace, &tracing_lt_fops);
1471 pr_warning("Could not create debugfs 'latency_trace' entry\n");
1473 entry = debugfs_create_file("trace", 0444, d_tracer,
1474 &global_trace, &tracing_fops);
1476 pr_warning("Could not create debugfs 'trace' entry\n");
1478 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
1479 &global_trace, &show_traces_fops);
1481 pr_warning("Could not create debugfs 'trace' entry\n");
1483 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
1484 &global_trace, &set_tracer_fops);
1486 pr_warning("Could not create debugfs 'trace' entry\n");
1488 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
1489 &tracing_max_latency,
1490 &tracing_max_lat_fops);
1492 pr_warning("Could not create debugfs "
1493 "'tracing_max_latency' entry\n");
1495 entry = debugfs_create_file("tracing_thresh", 0644, d_tracer,
1496 &tracing_thresh, &tracing_max_lat_fops);
1498 pr_warning("Could not create debugfs "
1499 "'tracing_threash' entry\n");
1501 #ifdef CONFIG_DYNAMIC_FTRACE
1502 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
1503 &ftrace_update_tot_cnt,
1504 &tracing_read_long_fops);
1506 pr_warning("Could not create debugfs "
1507 "'dyn_ftrace_total_info' entry\n");
1511 /* dummy trace to disable tracing */
1512 static struct tracer no_tracer __read_mostly =
1517 static int trace_alloc_page(void)
1519 struct trace_array_cpu *data;
1521 struct page *page, *tmp;
1525 /* first allocate a page for each CPU */
1526 for_each_possible_cpu(i) {
1527 array = (void *)__get_free_page(GFP_KERNEL);
1528 if (array == NULL) {
1529 printk(KERN_ERR "tracer: failed to allocate page"
1530 "for trace buffer!\n");
1534 page = virt_to_page(array);
1535 list_add(&page->lru, &pages);
1537 /* Only allocate if we are actually using the max trace */
1538 #ifdef CONFIG_TRACER_MAX_TRACE
1539 array = (void *)__get_free_page(GFP_KERNEL);
1540 if (array == NULL) {
1541 printk(KERN_ERR "tracer: failed to allocate page"
1542 "for trace buffer!\n");
1545 page = virt_to_page(array);
1546 list_add(&page->lru, &pages);
1550 /* Now that we successfully allocate a page per CPU, add them */
1551 for_each_possible_cpu(i) {
1552 data = global_trace.data[i];
1553 page = list_entry(pages.next, struct page, lru);
1554 list_del(&page->lru);
1555 list_add_tail(&page->lru, &data->trace_pages);
1558 #ifdef CONFIG_TRACER_MAX_TRACE
1559 data = max_tr.data[i];
1560 page = list_entry(pages.next, struct page, lru);
1561 list_del(&page->lru);
1562 list_add_tail(&page->lru, &data->trace_pages);
1566 global_trace.entries += ENTRIES_PER_PAGE;
1571 list_for_each_entry_safe(page, tmp, &pages, lru) {
1572 list_del(&page->lru);
1578 __init static int tracer_alloc_buffers(void)
1580 struct trace_array_cpu *data;
1586 /* Allocate the first page for all buffers */
1587 for_each_possible_cpu(i) {
1588 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
1589 max_tr.data[i] = &per_cpu(max_data, i);
1591 array = (void *)__get_free_page(GFP_KERNEL);
1592 if (array == NULL) {
1593 printk(KERN_ERR "tracer: failed to allocate page"
1594 "for trace buffer!\n");
1597 data->trace = array;
1599 /* set the array to the list */
1600 INIT_LIST_HEAD(&data->trace_pages);
1601 page = virt_to_page(array);
1602 list_add(&page->lru, &data->trace_pages);
1603 /* use the LRU flag to differentiate the two buffers */
1606 /* Only allocate if we are actually using the max trace */
1607 #ifdef CONFIG_TRACER_MAX_TRACE
1608 array = (void *)__get_free_page(GFP_KERNEL);
1609 if (array == NULL) {
1610 printk(KERN_ERR "tracer: failed to allocate page"
1611 "for trace buffer!\n");
1614 max_tr.data[i]->trace = array;
1616 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages);
1617 page = virt_to_page(array);
1618 list_add(&page->lru, &max_tr.data[i]->trace_pages);
1624 * Since we allocate by orders of pages, we may be able to
1627 global_trace.entries = ENTRIES_PER_PAGE;
1628 max_tr.entries = global_trace.entries;
1631 while (global_trace.entries < trace_nr_entries) {
1632 if (trace_alloc_page())
1637 pr_info("tracer: %d pages allocated for %ld",
1638 pages, trace_nr_entries);
1639 pr_info(" entries of %ld bytes\n", (long)TRACE_ENTRY_SIZE);
1640 pr_info(" actual entries %ld\n", global_trace.entries);
1642 tracer_init_debugfs();
1644 trace_init_cmdlines();
1646 register_tracer(&no_tracer);
1647 current_trace = &no_tracer;
1652 for (i-- ; i >= 0; i--) {
1653 struct page *page, *tmp;
1654 struct trace_array_cpu *data = global_trace.data[i];
1656 if (data && data->trace) {
1657 list_for_each_entry_safe(page, tmp,
1658 &data->trace_pages, lru) {
1659 list_del(&page->lru);
1665 #ifdef CONFIG_TRACER_MAX_TRACE
1666 data = max_tr.data[i];
1667 if (data && data->trace) {
1668 list_for_each_entry_safe(page, tmp,
1669 &data->trace_pages, lru) {
1670 list_del(&page->lru);
1680 device_initcall(tracer_alloc_buffers);