]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/trace/trace.c
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-omap-h63xx.git] / kernel / trace / trace.c
index c1634068adfa4644724a90c14adbb6d1916c7226..9f3b478f91716bdd1cc48ac6872c2a3c1412dbb8 100644 (file)
@@ -34,6 +34,7 @@
 
 #include <linux/stacktrace.h>
 #include <linux/ring_buffer.h>
+#include <linux/irqflags.h>
 
 #include "trace.h"
 
@@ -335,28 +336,23 @@ trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
        return len;
 }
 
-#define HEX_CHARS 17
-static const char hex2asc[] = "0123456789abcdef";
+#define MAX_MEMHEX_BYTES       8
+#define HEX_CHARS              (MAX_MEMHEX_BYTES*2 + 1)
 
 static int
 trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
 {
        unsigned char hex[HEX_CHARS];
        unsigned char *data = mem;
-       unsigned char byte;
        int i, j;
 
-       BUG_ON(len >= HEX_CHARS);
-
 #ifdef __BIG_ENDIAN
        for (i = 0, j = 0; i < len; i++) {
 #else
        for (i = len-1, j = 0; i >= 0; i--) {
 #endif
-               byte = data[i];
-
-               hex[j++] = hex2asc[byte & 0x0f];
-               hex[j++] = hex2asc[byte >> 4];
+               hex[j++] = hex_asc_hi(data[i]);
+               hex[j++] = hex_asc_lo(data[i]);
        }
        hex[j++] = ' ';
 
@@ -652,17 +648,19 @@ void tracing_record_cmdline(struct task_struct *tsk)
 }
 
 void
-tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
+tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
+                            int pc)
 {
        struct task_struct *tsk = current;
-       unsigned long pc;
-
-       pc = preempt_count();
 
        entry->preempt_count            = pc & 0xff;
        entry->pid                      = (tsk) ? tsk->pid : 0;
        entry->flags =
+#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
                (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
+#else
+               TRACE_FLAG_IRQS_NOSUPPORT |
+#endif
                ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
                ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
                (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
@@ -670,7 +668,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
 
 void
 trace_function(struct trace_array *tr, struct trace_array_cpu *data,
-              unsigned long ip, unsigned long parent_ip, unsigned long flags)
+              unsigned long ip, unsigned long parent_ip, unsigned long flags,
+              int pc)
 {
        struct ring_buffer_event *event;
        struct ftrace_entry *entry;
@@ -685,7 +684,7 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, flags);
+       tracing_generic_entry_update(&entry->ent, flags, pc);
        entry->ent.type                 = TRACE_FN;
        entry->ip                       = ip;
        entry->parent_ip                = parent_ip;
@@ -694,17 +693,19 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
 
 void
 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
-       unsigned long ip, unsigned long parent_ip, unsigned long flags)
+       unsigned long ip, unsigned long parent_ip, unsigned long flags,
+       int pc)
 {
        if (likely(!atomic_read(&data->disabled)))
-               trace_function(tr, data, ip, parent_ip, flags);
+               trace_function(tr, data, ip, parent_ip, flags, pc);
 }
 
-void __trace_stack(struct trace_array *tr,
-                  struct trace_array_cpu *data,
-                  unsigned long flags,
-                  int skip)
+static void ftrace_trace_stack(struct trace_array *tr,
+                              struct trace_array_cpu *data,
+                              unsigned long flags,
+                              int skip, int pc)
 {
+#ifdef CONFIG_STACKTRACE
        struct ring_buffer_event *event;
        struct stack_entry *entry;
        struct stack_trace trace;
@@ -718,7 +719,7 @@ void __trace_stack(struct trace_array *tr,
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, flags);
+       tracing_generic_entry_update(&entry->ent, flags, pc);
        entry->ent.type         = TRACE_STACK;
 
        memset(&entry->caller, 0, sizeof(entry->caller));
@@ -730,11 +731,21 @@ void __trace_stack(struct trace_array *tr,
 
        save_stack_trace(&trace);
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+#endif
 }
 
-void
-__trace_special(void *__tr, void *__data,
-               unsigned long arg1, unsigned long arg2, unsigned long arg3)
+void __trace_stack(struct trace_array *tr,
+                  struct trace_array_cpu *data,
+                  unsigned long flags,
+                  int skip)
+{
+       ftrace_trace_stack(tr, data, flags, skip, preempt_count());
+}
+
+static void
+ftrace_trace_special(void *__tr, void *__data,
+                    unsigned long arg1, unsigned long arg2, unsigned long arg3,
+                    int pc)
 {
        struct ring_buffer_event *event;
        struct trace_array_cpu *data = __data;
@@ -747,23 +758,30 @@ __trace_special(void *__tr, void *__data,
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, 0);
+       tracing_generic_entry_update(&entry->ent, 0, pc);
        entry->ent.type                 = TRACE_SPECIAL;
        entry->arg1                     = arg1;
        entry->arg2                     = arg2;
        entry->arg3                     = arg3;
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-       __trace_stack(tr, data, irq_flags, 4);
+       ftrace_trace_stack(tr, data, irq_flags, 4, pc);
 
        trace_wake_up();
 }
 
+void
+__trace_special(void *__tr, void *__data,
+               unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+       ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count());
+}
+
 void
 tracing_sched_switch_trace(struct trace_array *tr,
                           struct trace_array_cpu *data,
                           struct task_struct *prev,
                           struct task_struct *next,
-                          unsigned long flags)
+                          unsigned long flags, int pc)
 {
        struct ring_buffer_event *event;
        struct ctx_switch_entry *entry;
@@ -774,7 +792,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, flags);
+       tracing_generic_entry_update(&entry->ent, flags, pc);
        entry->ent.type                 = TRACE_CTX;
        entry->prev_pid                 = prev->pid;
        entry->prev_prio                = prev->prio;
@@ -784,7 +802,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
        entry->next_state               = next->state;
        entry->next_cpu = task_cpu(next);
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-       __trace_stack(tr, data, flags, 5);
+       ftrace_trace_stack(tr, data, flags, 5, pc);
 }
 
 void
@@ -792,7 +810,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
                           struct trace_array_cpu *data,
                           struct task_struct *wakee,
                           struct task_struct *curr,
-                          unsigned long flags)
+                          unsigned long flags, int pc)
 {
        struct ring_buffer_event *event;
        struct ctx_switch_entry *entry;
@@ -803,7 +821,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
        if (!event)
                return;
        entry   = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, flags);
+       tracing_generic_entry_update(&entry->ent, flags, pc);
        entry->ent.type                 = TRACE_WAKE;
        entry->prev_pid                 = curr->pid;
        entry->prev_prio                = curr->prio;
@@ -813,7 +831,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
        entry->next_state               = wakee->state;
        entry->next_cpu                 = task_cpu(wakee);
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
-       __trace_stack(tr, data, flags, 6);
+       ftrace_trace_stack(tr, data, flags, 6, pc);
 
        trace_wake_up();
 }
@@ -823,26 +841,24 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
 {
        struct trace_array *tr = &global_trace;
        struct trace_array_cpu *data;
-       unsigned long flags;
-       long disabled;
        int cpu;
+       int pc;
 
        if (tracing_disabled || !tr->ctrl)
                return;
 
-       local_irq_save(flags);
+       pc = preempt_count();
+       preempt_disable_notrace();
        cpu = raw_smp_processor_id();
        data = tr->data[cpu];
-       disabled = atomic_inc_return(&data->disabled);
 
-       if (likely(disabled == 1))
-               __trace_special(tr, data, arg1, arg2, arg3);
+       if (likely(!atomic_read(&data->disabled)))
+               ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
 
-       atomic_dec(&data->disabled);
-       local_irq_restore(flags);
+       preempt_enable_notrace();
 }
 
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 static void
 function_trace_call(unsigned long ip, unsigned long parent_ip)
 {
@@ -850,24 +866,28 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
        struct trace_array_cpu *data;
        unsigned long flags;
        long disabled;
-       int cpu;
+       int cpu, resched;
+       int pc;
 
        if (unlikely(!ftrace_function_enabled))
                return;
 
-       if (skip_trace(ip))
-               return;
-
-       local_irq_save(flags);
+       pc = preempt_count();
+       resched = need_resched();
+       preempt_disable_notrace();
+       local_save_flags(flags);
        cpu = raw_smp_processor_id();
        data = tr->data[cpu];
        disabled = atomic_inc_return(&data->disabled);
 
        if (likely(disabled == 1))
-               trace_function(tr, data, ip, parent_ip, flags);
+               trace_function(tr, data, ip, parent_ip, flags, pc);
 
        atomic_dec(&data->disabled);
-       local_irq_restore(flags);
+       if (resched)
+               preempt_enable_no_resched_notrace();
+       else
+               preempt_enable_notrace();
 }
 
 static struct ftrace_ops trace_ops __read_mostly =
@@ -1068,17 +1088,20 @@ static void s_stop(struct seq_file *m, void *p)
        mutex_unlock(&trace_types_lock);
 }
 
-#define KRETPROBE_MSG "[unknown/kretprobe'd]"
-
 #ifdef CONFIG_KRETPROBES
-static inline int kretprobed(unsigned long addr)
+static inline const char *kretprobed(const char *name)
 {
-       return addr == (unsigned long)kretprobe_trampoline;
+       static const char tramp_name[] = "kretprobe_trampoline";
+       int size = sizeof(tramp_name);
+
+       if (strncmp(tramp_name, name, size) == 0)
+               return "[unknown/kretprobe'd]";
+       return name;
 }
 #else
-static inline int kretprobed(unsigned long addr)
+static inline const char *kretprobed(const char *name)
 {
-       return 0;
+       return name;
 }
 #endif /* CONFIG_KRETPROBES */
 
@@ -1087,10 +1110,13 @@ seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
 {
 #ifdef CONFIG_KALLSYMS
        char str[KSYM_SYMBOL_LEN];
+       const char *name;
 
        kallsyms_lookup(address, NULL, NULL, NULL, str);
 
-       return trace_seq_printf(s, fmt, str);
+       name = kretprobed(str);
+
+       return trace_seq_printf(s, fmt, name);
 #endif
        return 1;
 }
@@ -1101,9 +1127,12 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt,
 {
 #ifdef CONFIG_KALLSYMS
        char str[KSYM_SYMBOL_LEN];
+       const char *name;
 
        sprint_symbol(str, address);
-       return trace_seq_printf(s, fmt, str);
+       name = kretprobed(str);
+
+       return trace_seq_printf(s, fmt, name);
 #endif
        return 1;
 }
@@ -1230,7 +1259,8 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
        trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
        trace_seq_printf(s, "%3d", cpu);
        trace_seq_printf(s, "%c%c",
-                       (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
+                       (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
+                        (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.',
                        ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
 
        hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
@@ -1350,21 +1380,21 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
        }
        switch (entry->type) {
        case TRACE_FN: {
-               struct ftrace_entry *field = (struct ftrace_entry *)entry;
+               struct ftrace_entry *field;
+
+               trace_assign_type(field, entry);
 
                seq_print_ip_sym(s, field->ip, sym_flags);
                trace_seq_puts(s, " (");
-               if (kretprobed(field->parent_ip))
-                       trace_seq_puts(s, KRETPROBE_MSG);
-               else
-                       seq_print_ip_sym(s, field->parent_ip, sym_flags);
+               seq_print_ip_sym(s, field->parent_ip, sym_flags);
                trace_seq_puts(s, ")\n");
                break;
        }
        case TRACE_CTX:
        case TRACE_WAKE: {
-               struct ctx_switch_entry *field =
-                       (struct ctx_switch_entry *)entry;
+               struct ctx_switch_entry *field;
+
+               trace_assign_type(field, entry);
 
                T = field->next_state < sizeof(state_to_char) ?
                        state_to_char[field->next_state] : 'X';
@@ -1384,7 +1414,9 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
                break;
        }
        case TRACE_SPECIAL: {
-               struct special_entry *field = (struct special_entry *)entry;
+               struct special_entry *field;
+
+               trace_assign_type(field, entry);
 
                trace_seq_printf(s, "# %ld %ld %ld\n",
                                 field->arg1,
@@ -1393,7 +1425,9 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
                break;
        }
        case TRACE_STACK: {
-               struct stack_entry *field = (struct stack_entry *)entry;
+               struct stack_entry *field;
+
+               trace_assign_type(field, entry);
 
                for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
                        if (i)
@@ -1404,7 +1438,9 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
                break;
        }
        case TRACE_PRINT: {
-               struct print_entry *field = (struct print_entry *)entry;
+               struct print_entry *field;
+
+               trace_assign_type(field, entry);
 
                seq_print_ip_sym(s, field->ip, sym_flags);
                trace_seq_printf(s, ": %s", field->buf);
@@ -1454,7 +1490,9 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
 
        switch (entry->type) {
        case TRACE_FN: {
-               struct ftrace_entry *field = (struct ftrace_entry *)entry;
+               struct ftrace_entry *field;
+
+               trace_assign_type(field, entry);
 
                ret = seq_print_ip_sym(s, field->ip, sym_flags);
                if (!ret)
@@ -1464,12 +1502,9 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
                        ret = trace_seq_printf(s, " <-");
                        if (!ret)
                                return TRACE_TYPE_PARTIAL_LINE;
-                       if (kretprobed(field->parent_ip))
-                               ret = trace_seq_puts(s, KRETPROBE_MSG);
-                       else
-                               ret = seq_print_ip_sym(s,
-                                                      field->parent_ip,
-                                                      sym_flags);
+                       ret = seq_print_ip_sym(s,
+                                              field->parent_ip,
+                                              sym_flags);
                        if (!ret)
                                return TRACE_TYPE_PARTIAL_LINE;
                }
@@ -1480,8 +1515,9 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
        }
        case TRACE_CTX:
        case TRACE_WAKE: {
-               struct ctx_switch_entry *field =
-                       (struct ctx_switch_entry *)entry;
+               struct ctx_switch_entry *field;
+
+               trace_assign_type(field, entry);
 
                S = field->prev_state < sizeof(state_to_char) ?
                        state_to_char[field->prev_state] : 'X';
@@ -1501,7 +1537,9 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
                break;
        }
        case TRACE_SPECIAL: {
-               struct special_entry *field = (struct special_entry *)entry;
+               struct special_entry *field;
+
+               trace_assign_type(field, entry);
 
                ret = trace_seq_printf(s, "# %ld %ld %ld\n",
                                 field->arg1,
@@ -1512,7 +1550,9 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
                break;
        }
        case TRACE_STACK: {
-               struct stack_entry *field = (struct stack_entry *)entry;
+               struct stack_entry *field;
+
+               trace_assign_type(field, entry);
 
                for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
                        if (i) {
@@ -1531,7 +1571,9 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
                break;
        }
        case TRACE_PRINT: {
-               struct print_entry *field = (struct print_entry *)entry;
+               struct print_entry *field;
+
+               trace_assign_type(field, entry);
 
                seq_print_ip_sym(s, field->ip, sym_flags);
                trace_seq_printf(s, ": %s", field->buf);
@@ -1562,7 +1604,9 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
 
        switch (entry->type) {
        case TRACE_FN: {
-               struct ftrace_entry *field = (struct ftrace_entry *)entry;
+               struct ftrace_entry *field;
+
+               trace_assign_type(field, entry);
 
                ret = trace_seq_printf(s, "%x %x\n",
                                        field->ip,
@@ -1573,8 +1617,9 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
        }
        case TRACE_CTX:
        case TRACE_WAKE: {
-               struct ctx_switch_entry *field =
-                       (struct ctx_switch_entry *)entry;
+               struct ctx_switch_entry *field;
+
+               trace_assign_type(field, entry);
 
                S = field->prev_state < sizeof(state_to_char) ?
                        state_to_char[field->prev_state] : 'X';
@@ -1596,7 +1641,9 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
        }
        case TRACE_SPECIAL:
        case TRACE_STACK: {
-               struct special_entry *field = (struct special_entry *)entry;
+               struct special_entry *field;
+
+               trace_assign_type(field, entry);
 
                ret = trace_seq_printf(s, "# %ld %ld %ld\n",
                                 field->arg1,
@@ -1607,7 +1654,9 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
                break;
        }
        case TRACE_PRINT: {
-               struct print_entry *field = (struct print_entry *)entry;
+               struct print_entry *field;
+
+               trace_assign_type(field, entry);
 
                trace_seq_printf(s, "# %lx %s", field->ip, field->buf);
                if (entry->flags & TRACE_FLAG_CONT)
@@ -1626,6 +1675,7 @@ do {                                                      \
 
 #define SEQ_PUT_HEX_FIELD_RET(s, x)                    \
 do {                                                   \
+       BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES);     \
        if (!trace_seq_putmem_hex(s, &(x), sizeof(x)))  \
                return 0;                               \
 } while (0)
@@ -1648,7 +1698,9 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
 
        switch (entry->type) {
        case TRACE_FN: {
-               struct ftrace_entry *field = (struct ftrace_entry *)entry;
+               struct ftrace_entry *field;
+
+               trace_assign_type(field, entry);
 
                SEQ_PUT_HEX_FIELD_RET(s, field->ip);
                SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
@@ -1656,8 +1708,9 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
        }
        case TRACE_CTX:
        case TRACE_WAKE: {
-               struct ctx_switch_entry *field =
-                       (struct ctx_switch_entry *)entry;
+               struct ctx_switch_entry *field;
+
+               trace_assign_type(field, entry);
 
                S = field->prev_state < sizeof(state_to_char) ?
                        state_to_char[field->prev_state] : 'X';
@@ -1676,7 +1729,9 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
        }
        case TRACE_SPECIAL:
        case TRACE_STACK: {
-               struct special_entry *field = (struct special_entry *)entry;
+               struct special_entry *field;
+
+               trace_assign_type(field, entry);
 
                SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
                SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
@@ -1705,15 +1760,18 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
 
        switch (entry->type) {
        case TRACE_FN: {
-               struct ftrace_entry *field = (struct ftrace_entry *)entry;
+               struct ftrace_entry *field;
+
+               trace_assign_type(field, entry);
 
                SEQ_PUT_FIELD_RET(s, field->ip);
                SEQ_PUT_FIELD_RET(s, field->parent_ip);
                break;
        }
        case TRACE_CTX: {
-               struct ctx_switch_entry *field =
-                       (struct ctx_switch_entry *)entry;
+               struct ctx_switch_entry *field;
+
+               trace_assign_type(field, entry);
 
                SEQ_PUT_FIELD_RET(s, field->prev_pid);
                SEQ_PUT_FIELD_RET(s, field->prev_prio);
@@ -1725,7 +1783,9 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
        }
        case TRACE_SPECIAL:
        case TRACE_STACK: {
-               struct special_entry *field = (struct special_entry *)entry;
+               struct special_entry *field;
+
+               trace_assign_type(field, entry);
 
                SEQ_PUT_FIELD_RET(s, field->arg1);
                SEQ_PUT_FIELD_RET(s, field->arg2);
@@ -2325,6 +2385,9 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
        struct tracer *t;
        char buf[max_tracer_type_len+1];
        int i;
+       size_t ret;
+
+       ret = cnt;
 
        if (cnt > max_tracer_type_len)
                cnt = max_tracer_type_len;
@@ -2343,7 +2406,11 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
                if (strcmp(t->name, buf) == 0)
                        break;
        }
-       if (!t || t == current_trace)
+       if (!t) {
+               ret = -EINVAL;
+               goto out;
+       }
+       if (t == current_trace)
                goto out;
 
        if (current_trace && current_trace->reset)
@@ -2356,9 +2423,10 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
  out:
        mutex_unlock(&trace_types_lock);
 
-       filp->f_pos += cnt;
+       if (ret > 0)
+               filp->f_pos += ret;
 
-       return cnt;
+       return ret;
 }
 
 static ssize_t
@@ -2473,9 +2541,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
                  size_t cnt, loff_t *ppos)
 {
        struct trace_iterator *iter = filp->private_data;
-#ifdef CONFIG_FTRACE
-       int ftrace_save;
-#endif
        ssize_t sret;
 
        /* return any leftover data */
@@ -2558,20 +2623,6 @@ waitagain:
               offsetof(struct trace_iterator, seq));
        iter->pos = -1;
 
-       /*
-        * We need to stop all tracing on all CPUS to read the
-        * the next buffer. This is a bit expensive, but is
-        * not done often. We fill all what we can read,
-        * and then release the locks again.
-        */
-
-       local_irq_disable();
-#ifdef CONFIG_FTRACE
-       ftrace_save = ftrace_enabled;
-       ftrace_enabled = 0;
-#endif
-       smp_wmb();
-
        while (find_next_entry_inc(iter) != NULL) {
                enum print_line_t ret;
                int len = iter->seq.len;
@@ -2589,11 +2640,6 @@ waitagain:
                        break;
        }
 
-#ifdef CONFIG_FTRACE
-       ftrace_enabled = ftrace_save;
-#endif
-       local_irq_enable();
-
        /* Now copy what we have to the user */
        sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
        if (iter->seq.readpos >= iter->seq.len)
@@ -2924,21 +2970,20 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
        struct trace_array_cpu *data;
        struct print_entry *entry;
        unsigned long flags, irq_flags;
-       long disabled;
-       int cpu, len = 0, size;
+       int cpu, len = 0, size, pc;
 
        if (!tr->ctrl || tracing_disabled)
                return 0;
 
-       local_irq_save(flags);
+       pc = preempt_count();
+       preempt_disable_notrace();
        cpu = raw_smp_processor_id();
        data = tr->data[cpu];
-       disabled = atomic_inc_return(&data->disabled);
 
-       if (unlikely(disabled != 1))
+       if (unlikely(atomic_read(&data->disabled)))
                goto out;
 
-       spin_lock(&trace_buf_lock);
+       spin_lock_irqsave(&trace_buf_lock, flags);
        len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
 
        len = min(len, TRACE_BUF_SIZE-1);
@@ -2949,7 +2994,7 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
        if (!event)
                goto out_unlock;
        entry = ring_buffer_event_data(event);
-       tracing_generic_entry_update(&entry->ent, flags);
+       tracing_generic_entry_update(&entry->ent, flags, pc);
        entry->ent.type                 = TRACE_PRINT;
        entry->ip                       = ip;
 
@@ -2958,11 +3003,10 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
 
  out_unlock:
-       spin_unlock(&trace_buf_lock);
+       spin_unlock_irqrestore(&trace_buf_lock, flags);
 
  out:
-       atomic_dec(&data->disabled);
-       local_irq_restore(flags);
+       preempt_enable_notrace();
 
        return len;
 }
@@ -3062,7 +3106,7 @@ void ftrace_dump(void)
        dump_ran = 1;
 
        /* No turning back! */
-       ftrace_kill_atomic();
+       ftrace_kill();
 
        for_each_tracing_cpu(cpu) {
                atomic_inc(&global_trace.data[cpu]->disabled);