]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/trace/trace.c
Merge branch 'tip/tracing/ftrace' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-omap-h63xx.git] / kernel / trace / trace.c
index 4c97947650ae54b2fb2be4338453c9c82ebae33c..1ce6208fd727205f25850e2ed13aa63265da76f0 100644 (file)
 unsigned long __read_mostly    tracing_max_latency;
 unsigned long __read_mostly    tracing_thresh;
 
+/*
+ * On boot up, the ring buffer is set to the minimum size, so that
+ * we do not waste memory on systems that are not using tracing.
+ */
+static int ring_buffer_expanded;
+
 /*
  * We need to change this state when a selftest is running.
  * A selftest will lurk into the ring-buffer to count the
@@ -128,6 +134,8 @@ static int __init set_ftrace(char *str)
 {
        strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE);
        default_bootup_tracer = bootup_tracer_buf;
+       /* We are using ftrace early, expand it */
+       ring_buffer_expanded = 1;
        return 1;
 }
 __setup("ftrace=", set_ftrace);
@@ -762,30 +770,34 @@ static void trace_save_cmdline(struct task_struct *tsk)
        __raw_spin_unlock(&trace_cmdline_lock);
 }
 
-char *trace_find_cmdline(int pid)
+void trace_find_cmdline(int pid, char comm[])
 {
-       char *cmdline = "<...>";
        unsigned map;
 
-       if (!pid)
-               return "<idle>";
+       if (!pid) {
+               strcpy(comm, "<idle>");
+               return;
+       }
 
-       if (pid > PID_MAX_DEFAULT)
-               goto out;
+       if (pid > PID_MAX_DEFAULT) {
+               strcpy(comm, "<...>");
+               return;
+       }
 
+       __raw_spin_lock(&trace_cmdline_lock);
        map = map_pid_to_cmdline[pid];
        if (map >= SAVED_CMDLINES)
                goto out;
 
-       cmdline = saved_cmdlines[map];
+       strcpy(comm, saved_cmdlines[map]);
 
  out:
-       return cmdline;
+       __raw_spin_unlock(&trace_cmdline_lock);
 }
 
 void tracing_record_cmdline(struct task_struct *tsk)
 {
-       if (atomic_read(&trace_record_cmdline_disabled))
+       if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
                return;
 
        trace_save_cmdline(tsk);
@@ -1171,10 +1183,10 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
 
 
 /**
- * trace_vprintk - write binary msg to tracing buffer
+ * trace_vbprintk - write binary msg to tracing buffer
  *
  */
-int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
+int trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args)
 {
        static raw_spinlock_t trace_buf_lock =
                (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
@@ -1183,7 +1195,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
        struct ring_buffer_event *event;
        struct trace_array *tr = &global_trace;
        struct trace_array_cpu *data;
-       struct print_entry *entry;
+       struct bprint_entry *entry;
        unsigned long flags;
        int resched;
        int cpu, len = 0, size, pc;
@@ -1211,7 +1223,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
                goto out_unlock;
 
        size = sizeof(*entry) + sizeof(u32) * len;
-       event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, flags, pc);
+       event = trace_buffer_lock_reserve(tr, TRACE_BPRINT, size, flags, pc);
        if (!event)
                goto out_unlock;
        entry = ring_buffer_event_data(event);
@@ -1232,6 +1244,60 @@ out:
 
        return len;
 }
+EXPORT_SYMBOL_GPL(trace_vbprintk);
+
+int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
+{
+       static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
+       static char trace_buf[TRACE_BUF_SIZE];
+
+       struct ring_buffer_event *event;
+       struct trace_array *tr = &global_trace;
+       struct trace_array_cpu *data;
+       int cpu, len = 0, size, pc;
+       struct print_entry *entry;
+       unsigned long irq_flags;
+
+       if (tracing_disabled || tracing_selftest_running)
+               return 0;
+
+       pc = preempt_count();
+       preempt_disable_notrace();
+       cpu = raw_smp_processor_id();
+       data = tr->data[cpu];
+
+       if (unlikely(atomic_read(&data->disabled)))
+               goto out;
+
+       pause_graph_tracing();
+       raw_local_irq_save(irq_flags);
+       __raw_spin_lock(&trace_buf_lock);
+       len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
+
+       len = min(len, TRACE_BUF_SIZE-1);
+       trace_buf[len] = 0;
+
+       size = sizeof(*entry) + len + 1;
+       event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc);
+       if (!event)
+               goto out_unlock;
+       entry = ring_buffer_event_data(event);
+       entry->ip                       = ip;
+       entry->depth                    = depth;
+
+       memcpy(&entry->buf, trace_buf, len);
+       entry->buf[len] = 0;
+       ring_buffer_unlock_commit(tr->buffer, event);
+
+ out_unlock:
+       __raw_spin_unlock(&trace_buf_lock);
+       raw_local_irq_restore(irq_flags);
+       unpause_graph_tracing();
+ out:
+       preempt_enable_notrace();
+
+       return len;
+}
 EXPORT_SYMBOL_GPL(trace_vprintk);
 
 enum trace_file_type {
@@ -1470,11 +1536,11 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
        total = entries +
                ring_buffer_overruns(iter->tr->buffer);
 
-       seq_printf(m, "%s latency trace v1.1.5 on %s\n",
+       seq_printf(m, "%s latency trace v1.1.5 on %s\n",
                   name, UTS_RELEASE);
-       seq_puts(m, "-----------------------------------"
+       seq_puts(m, "-----------------------------------"
                 "---------------------------------\n");
-       seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
+       seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
                   " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
                   nsecs_to_usecs(data->saved_latency),
                   entries,
@@ -1496,24 +1562,24 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
 #else
        seq_puts(m, ")\n");
 #endif
-       seq_puts(m, "    -----------------\n");
-       seq_printf(m, "    | task: %.16s-%d "
+       seq_puts(m, "#    -----------------\n");
+       seq_printf(m, "#    | task: %.16s-%d "
                   "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
                   data->comm, data->pid, data->uid, data->nice,
                   data->policy, data->rt_priority);
-       seq_puts(m, "    -----------------\n");
+       seq_puts(m, "#    -----------------\n");
 
        if (data->critical_start) {
-               seq_puts(m, " => started at: ");
+               seq_puts(m, " => started at: ");
                seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
                trace_print_seq(m, &iter->seq);
-               seq_puts(m, "\n => ended at:   ");
+               seq_puts(m, "\n => ended at:   ");
                seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
                trace_print_seq(m, &iter->seq);
-               seq_puts(m, "\n");
+               seq_puts(m, "#\n");
        }
 
-       seq_puts(m, "\n");
+       seq_puts(m, "#\n");
 }
 
 static void test_cpu_buff_start(struct trace_iterator *iter)
@@ -1620,6 +1686,22 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
        return TRACE_TYPE_HANDLED;
 }
 
+static enum print_line_t print_bprintk_msg_only(struct trace_iterator *iter)
+{
+       struct trace_seq *s = &iter->seq;
+       struct trace_entry *entry = iter->ent;
+       struct bprint_entry *field;
+       int ret;
+
+       trace_assign_type(field, entry);
+
+       ret = trace_seq_bprintf(s, field->fmt, field->buf);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
 static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
 {
        struct trace_seq *s = &iter->seq;
@@ -1629,7 +1711,7 @@ static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
 
        trace_assign_type(field, entry);
 
-       ret = trace_seq_bprintf(s, field->fmt, field->buf);
+       ret = trace_seq_printf(s, "%s", field->buf);
        if (!ret)
                return TRACE_TYPE_PARTIAL_LINE;
 
@@ -1658,6 +1740,19 @@ static int trace_empty(struct trace_iterator *iter)
 {
        int cpu;
 
+       /* If we are looking at one CPU buffer, only check that one */
+       if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
+               cpu = iter->cpu_file;
+               if (iter->buffer_iter[cpu]) {
+                       if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
+                               return 0;
+               } else {
+                       if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
+                               return 0;
+               }
+               return 1;
+       }
+
        for_each_tracing_cpu(cpu) {
                if (iter->buffer_iter[cpu]) {
                        if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
@@ -1681,6 +1776,11 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
                        return ret;
        }
 
+       if (iter->ent->type == TRACE_BPRINT &&
+                       trace_flags & TRACE_ITER_PRINTK &&
+                       trace_flags & TRACE_ITER_PRINTK_MSGONLY)
+               return print_bprintk_msg_only(iter);
+
        if (iter->ent->type == TRACE_PRINT &&
                        trace_flags & TRACE_ITER_PRINTK &&
                        trace_flags & TRACE_ITER_PRINTK_MSGONLY)
@@ -1784,17 +1884,11 @@ __tracing_open(struct inode *inode, struct file *file)
 
                        iter->buffer_iter[cpu] =
                                ring_buffer_read_start(iter->tr->buffer, cpu);
-
-                       if (!iter->buffer_iter[cpu])
-                               goto fail_buffer;
                }
        } else {
                cpu = iter->cpu_file;
                iter->buffer_iter[cpu] =
                                ring_buffer_read_start(iter->tr->buffer, cpu);
-
-               if (!iter->buffer_iter[cpu])
-                       goto fail;
        }
 
        /* TODO stop tracer */
@@ -2315,6 +2409,75 @@ int tracer_init(struct tracer *t, struct trace_array *tr)
        return t->init(tr);
 }
 
+static int tracing_resize_ring_buffer(unsigned long size)
+{
+       int ret;
+
+       /*
+        * If kernel or user changes the size of the ring buffer
+        * we use the size that was given, and we can forget about
+        * expanding it later.
+        */
+       ring_buffer_expanded = 1;
+
+       ret = ring_buffer_resize(global_trace.buffer, size);
+       if (ret < 0)
+               return ret;
+
+       ret = ring_buffer_resize(max_tr.buffer, size);
+       if (ret < 0) {
+               int r;
+
+               r = ring_buffer_resize(global_trace.buffer,
+                                      global_trace.entries);
+               if (r < 0) {
+                       /*
+                        * AARGH! We are left with different
+                        * size max buffer!!!!
+                        * The max buffer is our "snapshot" buffer.
+                        * When a tracer needs a snapshot (one of the
+                        * latency tracers), it swaps the max buffer
+                        * with the saved snap shot. We succeeded to
+                        * update the size of the main buffer, but failed to
+                        * update the size of the max buffer. But when we tried
+                        * to reset the main buffer to the original size, we
+                        * failed there too. This is very unlikely to
+                        * happen, but if it does, warn and kill all
+                        * tracing.
+                        */
+                       WARN_ON(1);
+                       tracing_disabled = 1;
+               }
+               return ret;
+       }
+
+       global_trace.entries = size;
+
+       return ret;
+}
+
+/**
+ * tracing_update_buffers - used by tracing facility to expand ring buffers
+ *
+ * To save on memory when the tracing is never used on a system with it
+ * configured in. The ring buffers are set to a minimum size. But once
+ * a user starts to use the tracing facility, then they need to grow
+ * to their default size.
+ *
+ * This function is to be called when a tracer is about to be used.
+ */
+int tracing_update_buffers(void)
+{
+       int ret = 0;
+
+       mutex_lock(&trace_types_lock);
+       if (!ring_buffer_expanded)
+               ret = tracing_resize_ring_buffer(trace_buf_size);
+       mutex_unlock(&trace_types_lock);
+
+       return ret;
+}
+
 struct trace_option_dentry;
 
 static struct trace_option_dentry *
@@ -2331,6 +2494,14 @@ static int tracing_set_tracer(const char *buf)
        int ret = 0;
 
        mutex_lock(&trace_types_lock);
+
+       if (!ring_buffer_expanded) {
+               ret = tracing_resize_ring_buffer(trace_buf_size);
+               if (ret < 0)
+                       goto out;
+               ret = 0;
+       }
+
        for (t = trace_types; t; t = t->next) {
                if (strcmp(t->name, buf) == 0)
                        break;
@@ -2856,10 +3027,18 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
                     size_t cnt, loff_t *ppos)
 {
        struct trace_array *tr = filp->private_data;
-       char buf[64];
+       char buf[96];
        int r;
 
-       r = sprintf(buf, "%lu\n", tr->entries >> 10);
+       mutex_lock(&trace_types_lock);
+       if (!ring_buffer_expanded)
+               r = sprintf(buf, "%lu (expanded: %lu)\n",
+                           tr->entries >> 10,
+                           trace_buf_size >> 10);
+       else
+               r = sprintf(buf, "%lu\n", tr->entries >> 10);
+       mutex_unlock(&trace_types_lock);
+
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 }
 
@@ -2903,28 +3082,11 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
        val <<= 10;
 
        if (val != global_trace.entries) {
-               ret = ring_buffer_resize(global_trace.buffer, val);
+               ret = tracing_resize_ring_buffer(val);
                if (ret < 0) {
                        cnt = ret;
                        goto out;
                }
-
-               ret = ring_buffer_resize(max_tr.buffer, val);
-               if (ret < 0) {
-                       int r;
-                       cnt = ret;
-                       r = ring_buffer_resize(global_trace.buffer,
-                                              global_trace.entries);
-                       if (r < 0) {
-                               /* AARGH! We are left with different
-                                * size max buffer!!!! */
-                               WARN_ON(1);
-                               tracing_disabled = 1;
-                       }
-                       goto out;
-               }
-
-               global_trace.entries = val;
        }
 
        filp->f_pos += cnt;
@@ -3385,6 +3547,11 @@ static void tracing_init_debugfs_percpu(long cpu)
                                (void *) cpu, &tracing_fops);
        if (!entry)
                pr_warning("Could not create debugfs 'trace' entry\n");
+
+       entry = debugfs_create_file("trace_pipe_raw", 0444, d_cpu,
+                                   (void *) cpu, &tracing_buffers_fops);
+       if (!entry)
+               pr_warning("Could not create debugfs 'trace_pipe_raw' entry\n");
 }
 
 #ifdef CONFIG_FTRACE_SELFTEST
@@ -3668,7 +3835,6 @@ static __init void create_trace_options_dir(void)
 static __init int tracer_init_debugfs(void)
 {
        struct dentry *d_tracer;
-       struct dentry *buffers;
        struct dentry *entry;
        int cpu;
 
@@ -3741,26 +3907,6 @@ static __init int tracer_init_debugfs(void)
                pr_warning("Could not create debugfs "
                           "'trace_marker' entry\n");
 
-       buffers = debugfs_create_dir("binary_buffers", d_tracer);
-
-       if (!buffers)
-               pr_warning("Could not create buffers directory\n");
-       else {
-               int cpu;
-               char buf[64];
-
-               for_each_tracing_cpu(cpu) {
-                       sprintf(buf, "%d", cpu);
-
-                       entry = debugfs_create_file(buf, 0444, buffers,
-                                                   (void *)(long)cpu,
-                                                   &tracing_buffers_fops);
-                       if (!entry)
-                               pr_warning("Could not create debugfs buffers "
-                                          "'%s' entry\n", buf);
-               }
-       }
-
 #ifdef CONFIG_DYNAMIC_FTRACE
        entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
                                    &ftrace_update_tot_cnt,
@@ -3916,6 +4062,7 @@ void ftrace_dump(void)
 __init static int tracer_alloc_buffers(void)
 {
        struct trace_array_cpu *data;
+       int ring_buf_size;
        int i;
        int ret = -ENOMEM;
 
@@ -3928,12 +4075,18 @@ __init static int tracer_alloc_buffers(void)
        if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
                goto out_free_tracing_cpumask;
 
+       /* To save memory, keep the ring buffer size to its minimum */
+       if (ring_buffer_expanded)
+               ring_buf_size = trace_buf_size;
+       else
+               ring_buf_size = 1;
+
        cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
        cpumask_copy(tracing_cpumask, cpu_all_mask);
        cpumask_clear(tracing_reader_cpumask);
 
        /* TODO: make the number of buffers hot pluggable with CPUS */
-       global_trace.buffer = ring_buffer_alloc(trace_buf_size,
+       global_trace.buffer = ring_buffer_alloc(ring_buf_size,
                                                   TRACE_BUFFER_FLAGS);
        if (!global_trace.buffer) {
                printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
@@ -3944,7 +4097,7 @@ __init static int tracer_alloc_buffers(void)
 
 
 #ifdef CONFIG_TRACER_MAX_TRACE
-       max_tr.buffer = ring_buffer_alloc(trace_buf_size,
+       max_tr.buffer = ring_buffer_alloc(ring_buf_size,
                                             TRACE_BUFFER_FLAGS);
        if (!max_tr.buffer) {
                printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
@@ -3976,7 +4129,8 @@ __init static int tracer_alloc_buffers(void)
                                       &trace_panic_notifier);
 
        register_die_notifier(&trace_die_notifier);
-       ret = 0;
+
+       return 0;
 
 out_free_cpumask:
        free_cpumask_var(tracing_reader_cpumask);