]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/trace/trace.c
ftrace: modulize the number of CPU buffers
[linux-2.6-omap-h63xx.git] / kernel / trace / trace.c
index 0e4b7119e263e094b55c0ffbc36d05ada915e8d7..5da391c5fb0dc25a3e0ccf75460a22046d0b0c62 100644 (file)
 unsigned long __read_mostly    tracing_max_latency = (cycle_t)ULONG_MAX;
 unsigned long __read_mostly    tracing_thresh;
 
+static unsigned long __read_mostly     tracing_nr_buffers;
+static cpumask_t __read_mostly         tracing_buffer_mask;
+
+#define for_each_tracing_cpu(cpu)      \
+       for_each_cpu_mask(cpu, tracing_buffer_mask)
+
+/* dummy trace to disable tracing */
+static struct tracer no_tracer __read_mostly = {
+       .name           = "none",
+};
+
+static int trace_alloc_page(void);
+static int trace_free_page(void);
+
 static int tracing_disabled = 1;
 
-static long
+long
 ns2usecs(cycle_t nsec)
 {
        nsec += 500;
@@ -66,15 +80,34 @@ static struct tracer                *current_trace __read_mostly;
 static int                     max_tracer_type_len;
 
 static DEFINE_MUTEX(trace_types_lock);
-static DECLARE_WAIT_QUEUE_HEAD (trace_wait);
+static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
+
+unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
+
+void trace_wake_up(void)
+{
+       /*
+        * The runqueue_is_locked() can fail, but this is the best we
+        * have for now:
+        */
+       if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
+               wake_up(&trace_wait);
+}
 
 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
 
 static int __init set_nr_entries(char *str)
 {
+       unsigned long nr_entries;
+       int ret;
+
        if (!str)
                return 0;
-       trace_nr_entries = simple_strtoul(str, &str, 0);
+       ret = strict_strtoul(str, 0, &nr_entries);
+       /* nr_entries can not be zero */
+       if (ret < 0 || nr_entries == 0)
+               return 0;
+       trace_nr_entries = nr_entries;
        return 1;
 }
 __setup("trace_entries=", set_nr_entries);
@@ -84,18 +117,6 @@ unsigned long nsecs_to_usecs(unsigned long nsecs)
        return nsecs / 1000;
 }
 
-enum trace_type {
-       __TRACE_FIRST_TYPE = 0,
-
-       TRACE_FN,
-       TRACE_CTX,
-       TRACE_WAKE,
-       TRACE_STACK,
-       TRACE_SPECIAL,
-
-       __TRACE_LAST_TYPE
-};
-
 enum trace_flag_type {
        TRACE_FLAG_IRQS_OFF             = 0x01,
        TRACE_FLAG_NEED_RESCHED         = 0x02,
@@ -103,18 +124,6 @@ enum trace_flag_type {
        TRACE_FLAG_SOFTIRQ              = 0x08,
 };
 
-enum trace_iterator_flags {
-       TRACE_ITER_PRINT_PARENT         = 0x01,
-       TRACE_ITER_SYM_OFFSET           = 0x02,
-       TRACE_ITER_SYM_ADDR             = 0x04,
-       TRACE_ITER_VERBOSE              = 0x08,
-       TRACE_ITER_RAW                  = 0x10,
-       TRACE_ITER_HEX                  = 0x20,
-       TRACE_ITER_BIN                  = 0x40,
-       TRACE_ITER_BLOCK                = 0x80,
-       TRACE_ITER_STACKTRACE           = 0x100,
-};
-
 #define TRACE_ITER_SYM_MASK \
        (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
 
@@ -129,12 +138,12 @@ static const char *trace_options[] = {
        "bin",
        "block",
        "stacktrace",
+       "sched-tree",
        NULL
 };
 
-static unsigned trace_flags = TRACE_ITER_PRINT_PARENT;
-
-static DEFINE_SPINLOCK(ftrace_max_lock);
+static raw_spinlock_t ftrace_max_lock =
+       (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
 
 /*
  * Copy the new maximum trace into the separate maximum-trace
@@ -190,7 +199,7 @@ void *head_page(struct trace_array_cpu *data)
        return page_address(page);
 }
 
-static int
+int
 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
 {
        int len = (PAGE_SIZE - 1) - s->len;
@@ -205,7 +214,7 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
        va_end(ap);
 
        /* If we can't write it all, don't bother writing anything */
-       if (ret > len)
+       if (ret >= len)
                return 0;
 
        s->len += ret;
@@ -251,19 +260,18 @@ trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
 }
 
 #define HEX_CHARS 17
+static const char hex2asc[] = "0123456789abcdef";
 
 static int
 trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
 {
        unsigned char hex[HEX_CHARS];
-       unsigned char *data;
+       unsigned char *data = mem;
        unsigned char byte;
        int i, j;
 
        BUG_ON(len >= HEX_CHARS);
 
-       data = mem;
-
 #ifdef __BIG_ENDIAN
        for (i = 0, j = 0; i < len; i++) {
 #else
@@ -271,22 +279,10 @@ trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
 #endif
                byte = data[i];
 
-               hex[j]   = byte & 0x0f;
-               if (hex[j] >= 10)
-                       hex[j] += 'a' - 10;
-               else
-                       hex[j] += '0';
-               j++;
-
-               hex[j] = byte >> 4;
-               if (hex[j] >= 10)
-                       hex[j] += 'a' - 10;
-               else
-                       hex[j] += '0';
-               j++;
+               hex[j++] = hex2asc[byte & 0x0f];
+               hex[j++] = hex2asc[byte >> 4];
        }
-       hex[j] = ' ';
-       j++;
+       hex[j++] = ' ';
 
        return trace_seq_putmem(s, hex, j);
 }
@@ -336,16 +332,16 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
        int i;
 
        WARN_ON_ONCE(!irqs_disabled());
-       spin_lock(&ftrace_max_lock);
+       __raw_spin_lock(&ftrace_max_lock);
        /* clear out all the previous traces */
-       for_each_possible_cpu(i) {
+       for_each_tracing_cpu(i) {
                data = tr->data[i];
                flip_trace(max_tr.data[i], data);
                tracing_reset(data);
        }
 
        __update_max_tr(tr, tsk, cpu);
-       spin_unlock(&ftrace_max_lock);
+       __raw_spin_unlock(&ftrace_max_lock);
 }
 
 /**
@@ -361,15 +357,15 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
        int i;
 
        WARN_ON_ONCE(!irqs_disabled());
-       spin_lock(&ftrace_max_lock);
-       for_each_possible_cpu(i)
+       __raw_spin_lock(&ftrace_max_lock);
+       for_each_tracing_cpu(i)
                tracing_reset(max_tr.data[i]);
 
        flip_trace(max_tr.data[cpu], data);
        tracing_reset(data);
 
        __update_max_tr(tr, tsk, cpu);
-       spin_unlock(&ftrace_max_lock);
+       __raw_spin_unlock(&ftrace_max_lock);
 }
 
 int register_tracer(struct tracer *type)
@@ -408,7 +404,7 @@ int register_tracer(struct tracer *type)
                 * internal tracing to verify that everything is in order.
                 * If we fail, we do not register this tracer.
                 */
-               for_each_possible_cpu(i) {
+               for_each_tracing_cpu(i) {
                        data = tr->data[i];
                        if (!head_page(data))
                                continue;
@@ -427,7 +423,7 @@ int register_tracer(struct tracer *type)
                        goto out;
                }
                /* Only reset on passing, to avoid touching corrupted buffers */
-               for_each_possible_cpu(i) {
+               for_each_tracing_cpu(i) {
                        data = tr->data[i];
                        if (!head_page(data))
                                continue;
@@ -638,7 +634,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
        pc = preempt_count();
 
        entry->preempt_count    = pc & 0xff;
-       entry->pid              = tsk->pid;
+       entry->pid              = (tsk) ? tsk->pid : 0;
        entry->t                = ftrace_now(raw_smp_processor_id());
        entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
                ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
@@ -653,16 +649,15 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
        struct trace_entry *entry;
        unsigned long irq_flags;
 
-       spin_lock_irqsave(&data->lock, irq_flags);
+       raw_local_irq_save(irq_flags);
+       __raw_spin_lock(&data->lock);
        entry                   = tracing_get_trace_entry(tr, data);
        tracing_generic_entry_update(entry, flags);
        entry->type             = TRACE_FN;
        entry->fn.ip            = ip;
        entry->fn.parent_ip     = parent_ip;
-       spin_unlock_irqrestore(&data->lock, irq_flags);
-
-       if (!(trace_flags & TRACE_ITER_BLOCK))
-               wake_up(&trace_wait);
+       __raw_spin_unlock(&data->lock);
+       raw_local_irq_restore(irq_flags);
 }
 
 void
@@ -674,23 +669,26 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data,
 }
 
 void
-trace_special(struct trace_array *tr, struct trace_array_cpu *data,
-             unsigned long arg1, unsigned long arg2, unsigned long arg3)
+__trace_special(void *__tr, void *__data,
+               unsigned long arg1, unsigned long arg2, unsigned long arg3)
 {
+       struct trace_array_cpu *data = __data;
+       struct trace_array *tr = __tr;
        struct trace_entry *entry;
        unsigned long irq_flags;
 
-       spin_lock_irqsave(&data->lock, irq_flags);
+       raw_local_irq_save(irq_flags);
+       __raw_spin_lock(&data->lock);
        entry                   = tracing_get_trace_entry(tr, data);
        tracing_generic_entry_update(entry, 0);
        entry->type             = TRACE_SPECIAL;
        entry->special.arg1     = arg1;
        entry->special.arg2     = arg2;
        entry->special.arg3     = arg3;
-       spin_unlock_irqrestore(&data->lock, irq_flags);
+       __raw_spin_unlock(&data->lock);
+       raw_local_irq_restore(irq_flags);
 
-       if (!(trace_flags & TRACE_ITER_BLOCK))
-               wake_up(&trace_wait);
+       trace_wake_up();
 }
 
 void __trace_stack(struct trace_array *tr,
@@ -728,7 +726,8 @@ tracing_sched_switch_trace(struct trace_array *tr,
        struct trace_entry *entry;
        unsigned long irq_flags;
 
-       spin_lock_irqsave(&data->lock, irq_flags);
+       raw_local_irq_save(irq_flags);
+       __raw_spin_lock(&data->lock);
        entry                   = tracing_get_trace_entry(tr, data);
        tracing_generic_entry_update(entry, flags);
        entry->type             = TRACE_CTX;
@@ -737,11 +736,10 @@ tracing_sched_switch_trace(struct trace_array *tr,
        entry->ctx.prev_state   = prev->state;
        entry->ctx.next_pid     = next->pid;
        entry->ctx.next_prio    = next->prio;
+       entry->ctx.next_state   = next->state;
        __trace_stack(tr, data, flags, 4);
-       spin_unlock_irqrestore(&data->lock, irq_flags);
-
-       if (!(trace_flags & TRACE_ITER_BLOCK))
-               wake_up(&trace_wait);
+       __raw_spin_unlock(&data->lock);
+       raw_local_irq_restore(irq_flags);
 }
 
 void
@@ -754,7 +752,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
        struct trace_entry *entry;
        unsigned long irq_flags;
 
-       spin_lock_irqsave(&data->lock, irq_flags);
+       raw_local_irq_save(irq_flags);
+       __raw_spin_lock(&data->lock);
        entry                   = tracing_get_trace_entry(tr, data);
        tracing_generic_entry_update(entry, flags);
        entry->type             = TRACE_WAKE;
@@ -763,11 +762,12 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
        entry->ctx.prev_state   = curr->state;
        entry->ctx.next_pid     = wakee->pid;
        entry->ctx.next_prio    = wakee->prio;
+       entry->ctx.next_state   = wakee->state;
        __trace_stack(tr, data, flags, 5);
-       spin_unlock_irqrestore(&data->lock, irq_flags);
+       __raw_spin_unlock(&data->lock);
+       raw_local_irq_restore(irq_flags);
 
-       if (!(trace_flags & TRACE_ITER_BLOCK))
-               wake_up(&trace_wait);
+       trace_wake_up();
 }
 
 #ifdef CONFIG_FTRACE
@@ -853,7 +853,7 @@ find_next_entry(struct trace_iterator *iter, int *ent_cpu)
        int next_cpu = -1;
        int cpu;
 
-       for_each_possible_cpu(cpu) {
+       for_each_tracing_cpu(cpu) {
                if (!head_page(tr->data[cpu]))
                        continue;
                ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
@@ -960,8 +960,10 @@ static void *s_start(struct seq_file *m, loff_t *pos)
 
        mutex_lock(&trace_types_lock);
 
-       if (!current_trace || current_trace != iter->trace)
+       if (!current_trace || current_trace != iter->trace) {
+               mutex_unlock(&trace_types_lock);
                return NULL;
+       }
 
        atomic_inc(&trace_record_cmdline_disabled);
 
@@ -976,7 +978,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
                iter->prev_ent = NULL;
                iter->prev_cpu = -1;
 
-               for_each_possible_cpu(i) {
+               for_each_tracing_cpu(i) {
                        iter->next_idx[i] = 0;
                        iter->next_page[i] = NULL;
                }
@@ -1093,7 +1095,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
        if (type)
                name = type->name;
 
-       for_each_possible_cpu(cpu) {
+       for_each_tracing_cpu(cpu) {
                if (head_page(tr->data[cpu])) {
                        total += tr->data[cpu]->trace_idx;
                        if (tr->data[cpu]->trace_idx > tr->entries)
@@ -1165,12 +1167,12 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
 
        hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
        softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
-       if (hardirq && softirq)
+       if (hardirq && softirq) {
                trace_seq_putc(s, 'H');
-       else {
-               if (hardirq)
+       else {
+               if (hardirq) {
                        trace_seq_putc(s, 'h');
-               else {
+               else {
                        if (softirq)
                                trace_seq_putc(s, 's');
                        else
@@ -1212,8 +1214,9 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
        unsigned long abs_usecs;
        unsigned long rel_usecs;
        char *comm;
-       int S;
+       int S, T;
        int i;
+       unsigned state;
 
        if (!next_entry)
                next_entry = entry;
@@ -1232,10 +1235,8 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
                                 abs_usecs % 1000, rel_usecs/1000,
                                 rel_usecs % 1000);
        } else {
-               if (entry->type != TRACE_STACK) {
-                       lat_print_generic(s, entry, cpu);
-                       lat_print_timestamp(s, abs_usecs, rel_usecs);
-               }
+               lat_print_generic(s, entry, cpu);
+               lat_print_timestamp(s, abs_usecs, rel_usecs);
        }
        switch (entry->type) {
        case TRACE_FN:
@@ -1246,19 +1247,22 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
                break;
        case TRACE_CTX:
        case TRACE_WAKE:
-               S = entry->ctx.prev_state < sizeof(state_to_char) ?
-                       state_to_char[entry->ctx.prev_state] : 'X';
+               T = entry->ctx.next_state < sizeof(state_to_char) ?
+                       state_to_char[entry->ctx.next_state] : 'X';
+
+               state = entry->ctx.prev_state ? __ffs(entry->ctx.prev_state) + 1 : 0;
+               S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
                comm = trace_find_cmdline(entry->ctx.next_pid);
-               trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d %s\n",
+               trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c %s\n",
                                 entry->ctx.prev_pid,
                                 entry->ctx.prev_prio,
                                 S, entry->type == TRACE_CTX ? "==>" : "  +",
                                 entry->ctx.next_pid,
                                 entry->ctx.next_prio,
-                                comm);
+                                T, comm);
                break;
        case TRACE_SPECIAL:
-               trace_seq_printf(s, " %lx %lx %lx\n",
+               trace_seq_printf(s, "# %ld %ld %ld\n",
                                 entry->special.arg1,
                                 entry->special.arg2,
                                 entry->special.arg3);
@@ -1287,7 +1291,7 @@ static int print_trace_fmt(struct trace_iterator *iter)
        unsigned long secs;
        char *comm;
        int ret;
-       int S;
+       int S, T;
        int i;
 
        entry = iter->ent;
@@ -1298,17 +1302,15 @@ static int print_trace_fmt(struct trace_iterator *iter)
        usec_rem = do_div(t, 1000000ULL);
        secs = (unsigned long)t;
 
-       if (entry->type != TRACE_STACK) {
-               ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
-               if (!ret)
-                       return 0;
-               ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
-               if (!ret)
-                       return 0;
-               ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
-               if (!ret)
-                       return 0;
-       }
+       ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
+       if (!ret)
+               return 0;
+       ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
+       if (!ret)
+               return 0;
+       ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
+       if (!ret)
+               return 0;
 
        switch (entry->type) {
        case TRACE_FN:
@@ -1333,18 +1335,21 @@ static int print_trace_fmt(struct trace_iterator *iter)
        case TRACE_WAKE:
                S = entry->ctx.prev_state < sizeof(state_to_char) ?
                        state_to_char[entry->ctx.prev_state] : 'X';
-               ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d\n",
+               T = entry->ctx.next_state < sizeof(state_to_char) ?
+                       state_to_char[entry->ctx.next_state] : 'X';
+               ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c\n",
                                       entry->ctx.prev_pid,
                                       entry->ctx.prev_prio,
                                       S,
                                       entry->type == TRACE_CTX ? "==>" : "  +",
                                       entry->ctx.next_pid,
-                                      entry->ctx.next_prio);
+                                      entry->ctx.next_prio,
+                                      T);
                if (!ret)
                        return 0;
                break;
        case TRACE_SPECIAL:
-               ret = trace_seq_printf(s, " %lx %lx %lx\n",
+               ret = trace_seq_printf(s, "# %ld %ld %ld\n",
                                 entry->special.arg1,
                                 entry->special.arg2,
                                 entry->special.arg3);
@@ -1376,7 +1381,7 @@ static int print_raw_fmt(struct trace_iterator *iter)
        struct trace_seq *s = &iter->seq;
        struct trace_entry *entry;
        int ret;
-       int S;
+       int S, T;
 
        entry = iter->ent;
 
@@ -1396,20 +1401,23 @@ static int print_raw_fmt(struct trace_iterator *iter)
        case TRACE_WAKE:
                S = entry->ctx.prev_state < sizeof(state_to_char) ?
                        state_to_char[entry->ctx.prev_state] : 'X';
+               T = entry->ctx.next_state < sizeof(state_to_char) ?
+                       state_to_char[entry->ctx.next_state] : 'X';
                if (entry->type == TRACE_WAKE)
                        S = '+';
-               ret = trace_seq_printf(s, "%d %d %c %d %d\n",
+               ret = trace_seq_printf(s, "%d %d %c %d %d %c\n",
                                       entry->ctx.prev_pid,
                                       entry->ctx.prev_prio,
                                       S,
                                       entry->ctx.next_pid,
-                                      entry->ctx.next_prio);
+                                      entry->ctx.next_prio,
+                                      T);
                if (!ret)
                        return 0;
                break;
        case TRACE_SPECIAL:
        case TRACE_STACK:
-               ret = trace_seq_printf(s, " %lx %lx %lx\n",
+               ret = trace_seq_printf(s, "# %ld %ld %ld\n",
                                 entry->special.arg1,
                                 entry->special.arg2,
                                 entry->special.arg3);
@@ -1437,7 +1445,7 @@ static int print_hex_fmt(struct trace_iterator *iter)
        struct trace_seq *s = &iter->seq;
        unsigned char newline = '\n';
        struct trace_entry *entry;
-       int S;
+       int S, T;
 
        entry = iter->ent;
 
@@ -1454,6 +1462,8 @@ static int print_hex_fmt(struct trace_iterator *iter)
        case TRACE_WAKE:
                S = entry->ctx.prev_state < sizeof(state_to_char) ?
                        state_to_char[entry->ctx.prev_state] : 'X';
+               T = entry->ctx.next_state < sizeof(state_to_char) ?
+                       state_to_char[entry->ctx.next_state] : 'X';
                if (entry->type == TRACE_WAKE)
                        S = '+';
                SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_pid);
@@ -1462,6 +1472,7 @@ static int print_hex_fmt(struct trace_iterator *iter)
                SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_pid);
                SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_prio);
                SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
+               SEQ_PUT_HEX_FIELD_RET(s, T);
                break;
        case TRACE_SPECIAL:
        case TRACE_STACK:
@@ -1497,6 +1508,7 @@ static int print_bin_fmt(struct trace_iterator *iter)
                SEQ_PUT_FIELD_RET(s, entry->ctx.prev_state);
                SEQ_PUT_FIELD_RET(s, entry->ctx.next_pid);
                SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio);
+               SEQ_PUT_FIELD_RET(s, entry->ctx.next_state);
                break;
        case TRACE_SPECIAL:
        case TRACE_STACK:
@@ -1513,7 +1525,7 @@ static int trace_empty(struct trace_iterator *iter)
        struct trace_array_cpu *data;
        int cpu;
 
-       for_each_possible_cpu(cpu) {
+       for_each_tracing_cpu(cpu) {
                data = iter->tr->data[cpu];
 
                if (head_page(data) && data->trace_idx &&
@@ -1526,6 +1538,9 @@ static int trace_empty(struct trace_iterator *iter)
 
 static int print_trace_line(struct trace_iterator *iter)
 {
+       if (iter->trace && iter->trace->print_line)
+               return iter->trace->print_line(iter);
+
        if (trace_flags & TRACE_ITER_BIN)
                return print_bin_fmt(iter);
 
@@ -1759,9 +1774,102 @@ static struct file_operations tracing_lt_fops = {
 };
 
 static struct file_operations show_traces_fops = {
-       .open = show_traces_open,
-       .read = seq_read,
-       .release = seq_release,
+       .open           = show_traces_open,
+       .read           = seq_read,
+       .release        = seq_release,
+};
+
+/*
+ * Only trace on a CPU if the bitmask is set:
+ */
+static cpumask_t tracing_cpumask = CPU_MASK_ALL;
+
+/*
+ * When tracing/tracing_cpu_mask is modified then this holds
+ * the new bitmask we are about to install:
+ */
+static cpumask_t tracing_cpumask_new;
+
+/*
+ * The tracer itself will not take this lock, but still we want
+ * to provide a consistent cpumask to user-space:
+ */
+static DEFINE_MUTEX(tracing_cpumask_update_lock);
+
+/*
+ * Temporary storage for the character representation of the
+ * CPU bitmask (and one more byte for the newline):
+ */
+static char mask_str[NR_CPUS + 1];
+
+static ssize_t
+tracing_cpumask_read(struct file *filp, char __user *ubuf,
+                    size_t count, loff_t *ppos)
+{
+       int len;
+
+       mutex_lock(&tracing_cpumask_update_lock);
+
+       len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
+       if (count - len < 2) {
+               count = -EINVAL;
+               goto out_err;
+       }
+       len += sprintf(mask_str + len, "\n");
+       count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
+
+out_err:
+       mutex_unlock(&tracing_cpumask_update_lock);
+
+       return count;
+}
+
+static ssize_t
+tracing_cpumask_write(struct file *filp, const char __user *ubuf,
+                     size_t count, loff_t *ppos)
+{
+       int err, cpu;
+
+       mutex_lock(&tracing_cpumask_update_lock);
+       err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
+       if (err)
+               goto err_unlock;
+
+       raw_local_irq_disable();
+       __raw_spin_lock(&ftrace_max_lock);
+       for_each_tracing_cpu(cpu) {
+               /*
+                * Increase/decrease the disabled counter if we are
+                * about to flip a bit in the cpumask:
+                */
+               if (cpu_isset(cpu, tracing_cpumask) &&
+                               !cpu_isset(cpu, tracing_cpumask_new)) {
+                       atomic_inc(&global_trace.data[cpu]->disabled);
+               }
+               if (!cpu_isset(cpu, tracing_cpumask) &&
+                               cpu_isset(cpu, tracing_cpumask_new)) {
+                       atomic_dec(&global_trace.data[cpu]->disabled);
+               }
+       }
+       __raw_spin_unlock(&ftrace_max_lock);
+       raw_local_irq_enable();
+
+       tracing_cpumask = tracing_cpumask_new;
+
+       mutex_unlock(&tracing_cpumask_update_lock);
+
+       return count;
+
+err_unlock:
+       mutex_unlock(&tracing_cpumask_update_lock);
+
+       return err;
+}
+
+static struct file_operations tracing_cpumask_fops = {
+       .open           = tracing_open_generic,
+       .read           = tracing_cpumask_read,
+       .write          = tracing_cpumask_write,
 };
 
 static ssize_t
@@ -1794,8 +1902,7 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
        r += sprintf(buf + r, "\n");
        WARN_ON(r >= len + 2);
 
-       r = simple_read_from_buffer(ubuf, cnt, ppos,
-                                   buf, r);
+       r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 
        kfree(buf);
 
@@ -1811,8 +1918,8 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
        int neg = 0;
        int i;
 
-       if (cnt > 63)
-               cnt = 63;
+       if (cnt >= sizeof(buf))
+               return -EINVAL;
 
        if (copy_from_user(&buf, ubuf, cnt))
                return -EFAULT;
@@ -1835,6 +1942,11 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
                        break;
                }
        }
+       /*
+        * If no option could be set, return an error:
+        */
+       if (!trace_options[i])
+               return -EINVAL;
 
        filp->f_pos += cnt;
 
@@ -1842,9 +1954,9 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
 }
 
 static struct file_operations tracing_iter_fops = {
-       .open = tracing_open_generic,
-       .read = tracing_iter_ctrl_read,
-       .write = tracing_iter_ctrl_write,
+       .open           = tracing_open_generic,
+       .read           = tracing_iter_ctrl_read,
+       .write          = tracing_iter_ctrl_write,
 };
 
 static const char readme_msg[] =
@@ -1875,8 +1987,8 @@ tracing_readme_read(struct file *filp, char __user *ubuf,
 }
 
 static struct file_operations tracing_readme_fops = {
-       .open = tracing_open_generic,
-       .read = tracing_readme_read,
+       .open           = tracing_open_generic,
+       .read           = tracing_readme_read,
 };
 
 static ssize_t
@@ -1896,18 +2008,21 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
                   size_t cnt, loff_t *ppos)
 {
        struct trace_array *tr = filp->private_data;
-       long val;
        char buf[64];
+       long val;
+       int ret;
 
-       if (cnt > 63)
-               cnt = 63;
+       if (cnt >= sizeof(buf))
+               return -EINVAL;
 
        if (copy_from_user(&buf, ubuf, cnt))
                return -EFAULT;
 
        buf[cnt] = 0;
 
-       val = simple_strtoul(buf, NULL, 10);
+       ret = strict_strtoul(buf, 10, &val);
+       if (ret < 0)
+               return ret;
 
        val = !!val;
 
@@ -1999,10 +2114,10 @@ tracing_max_lat_read(struct file *filp, char __user *ubuf,
        char buf[64];
        int r;
 
-       r = snprintf(buf, 64, "%ld\n",
+       r = snprintf(buf, sizeof(buf), "%ld\n",
                     *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
-       if (r > 64)
-               r = 64;
+       if (r > sizeof(buf))
+               r = sizeof(buf);
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 }
 
@@ -2011,18 +2126,21 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
                      size_t cnt, loff_t *ppos)
 {
        long *ptr = filp->private_data;
-       long val;
        char buf[64];
+       long val;
+       int ret;
 
-       if (cnt > 63)
-               cnt = 63;
+       if (cnt >= sizeof(buf))
+               return -EINVAL;
 
        if (copy_from_user(&buf, ubuf, cnt))
                return -EFAULT;
 
        buf[cnt] = 0;
 
-       val = simple_strtoul(buf, NULL, 10);
+       ret = strict_strtoul(buf, 10, &val);
+       if (ret < 0)
+               return ret;
 
        *ptr = val * 1000;
 
@@ -2050,6 +2168,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
                return -ENOMEM;
 
        iter->tr = &global_trace;
+       iter->trace = current_trace;
 
        filp->private_data = iter;
 
@@ -2076,8 +2195,7 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
                 * Always select as readable when in blocking mode
                 */
                return POLLIN | POLLRDNORM;
-       }
-       else {
+       } else {
                if (!trace_empty(iter))
                        return POLLIN | POLLRDNORM;
                poll_wait(filp, &trace_wait, poll_table);
@@ -2097,6 +2215,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
 {
        struct trace_iterator *iter = filp->private_data;
        struct trace_array_cpu *data;
+       struct trace_array *tr = iter->tr;
+       struct tracer *tracer = iter->trace;
        static cpumask_t mask;
        static int start;
        unsigned long flags;
@@ -2126,8 +2246,10 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
        start = 0;
 
        while (trace_empty(iter)) {
-               if (!(trace_flags & TRACE_ITER_BLOCK))
-                       return -EWOULDBLOCK;
+
+               if ((filp->f_flags & O_NONBLOCK))
+                       return -EAGAIN;
+
                /*
                 * This is a make-shift waitqueue. The reason we don't use
                 * an actual wait queue is because:
@@ -2148,6 +2270,9 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
                if (signal_pending(current))
                        return -EINTR;
 
+               if (iter->trace != current_trace)
+                       return 0;
+
                /*
                 * We block until we read something and tracing is disabled.
                 * We still block if tracing is disabled, but we have never
@@ -2171,7 +2296,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
                cnt = PAGE_SIZE - 1;
 
        memset(iter, 0, sizeof(*iter));
-       iter->tr = &global_trace;
+       iter->tr = tr;
+       iter->trace = tracer;
        iter->pos = -1;
 
        /*
@@ -2188,7 +2314,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
        ftrace_enabled = 0;
 #endif
        smp_wmb();
-       for_each_possible_cpu(cpu) {
+       for_each_tracing_cpu(cpu) {
                data = iter->tr->data[cpu];
 
                if (!head_page(data) || !data->trace_idx)
@@ -2200,7 +2326,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
 
        for_each_cpu_mask(cpu, mask) {
                data = iter->tr->data[cpu];
-               spin_lock(&data->lock);
+               __raw_spin_lock(&data->lock);
        }
 
        while (find_next_entry_inc(iter) != NULL) {
@@ -2221,7 +2347,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
 
        for_each_cpu_mask(cpu, mask) {
                data = iter->tr->data[cpu];
-               spin_unlock(&data->lock);
+               __raw_spin_unlock(&data->lock);
        }
 
        for_each_cpu_mask(cpu, mask) {
@@ -2251,6 +2377,73 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
        return read;
 }
 
+static ssize_t
+tracing_entries_read(struct file *filp, char __user *ubuf,
+                    size_t cnt, loff_t *ppos)
+{
+       struct trace_array *tr = filp->private_data;
+       char buf[64];
+       int r;
+
+       r = sprintf(buf, "%lu\n", tr->entries);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t
+tracing_entries_write(struct file *filp, const char __user *ubuf,
+                     size_t cnt, loff_t *ppos)
+{
+       unsigned long val;
+       char buf[64];
+       int ret;
+
+       if (cnt >= sizeof(buf))
+               return -EINVAL;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       /* must have at least 1 entry */
+       if (!val)
+               return -EINVAL;
+
+       mutex_lock(&trace_types_lock);
+
+       if (current_trace != &no_tracer) {
+               cnt = -EBUSY;
+               pr_info("ftrace: set current_tracer to none"
+                       " before modifying buffer size\n");
+               goto out;
+       }
+
+       if (val > global_trace.entries) {
+               while (global_trace.entries < val) {
+                       if (trace_alloc_page()) {
+                               cnt = -ENOMEM;
+                               goto out;
+                       }
+               }
+       } else {
+               /* include the number of entries in val (inc of page entries) */
+               while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1))
+                       trace_free_page();
+       }
+
+       filp->f_pos += cnt;
+
+ out:
+       max_tr.entries = global_trace.entries;
+       mutex_unlock(&trace_types_lock);
+
+       return cnt;
+}
+
 static struct file_operations tracing_max_lat_fops = {
        .open           = tracing_open_generic,
        .read           = tracing_max_lat_read,
@@ -2276,6 +2469,12 @@ static struct file_operations tracing_pipe_fops = {
        .release        = tracing_release_pipe,
 };
 
+static struct file_operations tracing_entries_fops = {
+       .open           = tracing_open_generic,
+       .read           = tracing_entries_read,
+       .write          = tracing_entries_write,
+};
+
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 static ssize_t
@@ -2339,6 +2538,11 @@ static __init void tracer_init_debugfs(void)
        if (!entry)
                pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
 
+       entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
+                                   NULL, &tracing_cpumask_fops);
+       if (!entry)
+               pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
+
        entry = debugfs_create_file("latency_trace", 0444, d_tracer,
                                    &global_trace, &tracing_lt_fops);
        if (!entry)
@@ -2382,6 +2586,12 @@ static __init void tracer_init_debugfs(void)
                pr_warning("Could not create debugfs "
                           "'tracing_threash' entry\n");
 
+       entry = debugfs_create_file("trace_entries", 0644, d_tracer,
+                                   &global_trace, &tracing_entries_fops);
+       if (!entry)
+               pr_warning("Could not create debugfs "
+                          "'tracing_threash' entry\n");
+
 #ifdef CONFIG_DYNAMIC_FTRACE
        entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
                                    &ftrace_update_tot_cnt,
@@ -2392,12 +2602,6 @@ static __init void tracer_init_debugfs(void)
 #endif
 }
 
-/* dummy trace to disable tracing */
-static struct tracer no_tracer __read_mostly =
-{
-       .name           = "none",
-};
-
 static int trace_alloc_page(void)
 {
        struct trace_array_cpu *data;
@@ -2407,7 +2611,7 @@ static int trace_alloc_page(void)
        int i;
 
        /* first allocate a page for each CPU */
-       for_each_possible_cpu(i) {
+       for_each_tracing_cpu(i) {
                array = (void *)__get_free_page(GFP_KERNEL);
                if (array == NULL) {
                        printk(KERN_ERR "tracer: failed to allocate page"
@@ -2432,10 +2636,8 @@ static int trace_alloc_page(void)
        }
 
        /* Now that we successfully allocate a page per CPU, add them */
-       for_each_possible_cpu(i) {
+       for_each_tracing_cpu(i) {
                data = global_trace.data[i];
-               spin_lock_init(&data->lock);
-               lockdep_set_class(&data->lock, &data->lock_key);
                page = list_entry(pages.next, struct page, lru);
                list_del_init(&page->lru);
                list_add_tail(&page->lru, &data->trace_pages);
@@ -2443,8 +2645,6 @@ static int trace_alloc_page(void)
 
 #ifdef CONFIG_TRACER_MAX_TRACE
                data = max_tr.data[i];
-               spin_lock_init(&data->lock);
-               lockdep_set_class(&data->lock, &data->lock_key);
                page = list_entry(pages.next, struct page, lru);
                list_del_init(&page->lru);
                list_add_tail(&page->lru, &data->trace_pages);
@@ -2463,6 +2663,55 @@ static int trace_alloc_page(void)
        return -ENOMEM;
 }
 
+static int trace_free_page(void)
+{
+       struct trace_array_cpu *data;
+       struct page *page;
+       struct list_head *p;
+       int i;
+       int ret = 0;
+
+       /* free one page from each buffer */
+       for_each_tracing_cpu(i) {
+               data = global_trace.data[i];
+               p = data->trace_pages.next;
+               if (p == &data->trace_pages) {
+                       /* should never happen */
+                       WARN_ON(1);
+                       tracing_disabled = 1;
+                       ret = -1;
+                       break;
+               }
+               page = list_entry(p, struct page, lru);
+               ClearPageLRU(page);
+               list_del(&page->lru);
+               __free_page(page);
+
+               tracing_reset(data);
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+               data = max_tr.data[i];
+               p = data->trace_pages.next;
+               if (p == &data->trace_pages) {
+                       /* should never happen */
+                       WARN_ON(1);
+                       tracing_disabled = 1;
+                       ret = -1;
+                       break;
+               }
+               page = list_entry(p, struct page, lru);
+               ClearPageLRU(page);
+               list_del(&page->lru);
+               __free_page(page);
+
+               tracing_reset(data);
+#endif
+       }
+       global_trace.entries -= ENTRIES_PER_PAGE;
+
+       return ret;
+}
+
 __init static int tracer_alloc_buffers(void)
 {
        struct trace_array_cpu *data;
@@ -2474,8 +2723,12 @@ __init static int tracer_alloc_buffers(void)
 
        global_trace.ctrl = tracer_enabled;
 
+       /* TODO: make the number of buffers hot pluggable with CPUS */
+       tracing_nr_buffers = num_possible_cpus();
+       tracing_buffer_mask = cpu_possible_map;
+
        /* Allocate the first page for all buffers */
-       for_each_possible_cpu(i) {
+       for_each_tracing_cpu(i) {
                data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
                max_tr.data[i] = &per_cpu(max_data, i);
 
@@ -2493,6 +2746,9 @@ __init static int tracer_alloc_buffers(void)
                /* use the LRU flag to differentiate the two buffers */
                ClearPageLRU(page);
 
+               data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+               max_tr.data[i]->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+
 /* Only allocate if we are actually using the max trace */
 #ifdef CONFIG_TRACER_MAX_TRACE
                array = (void *)__get_free_page(GFP_KERNEL);