unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
unsigned long __read_mostly tracing_thresh;
+static unsigned long __read_mostly tracing_nr_buffers;
+static cpumask_t __read_mostly tracing_buffer_mask;
+
+#define for_each_tracing_cpu(cpu) \
+ for_each_cpu_mask(cpu, tracing_buffer_mask)
+
+/* dummy trace to disable tracing */
+static struct tracer no_tracer __read_mostly = {
+ .name = "none",
+};
+
+static int trace_alloc_page(void);
+static int trace_free_page(void);
+
static int tracing_disabled = 1;
-static long
+long
ns2usecs(cycle_t nsec)
{
nsec += 500;
static int __init set_nr_entries(char *str)
{
+ unsigned long nr_entries;
+ int ret;
+
if (!str)
return 0;
- trace_nr_entries = simple_strtoul(str, &str, 0);
+ ret = strict_strtoul(str, 0, &nr_entries);
+ /* nr_entries can not be zero */
+ if (ret < 0 || nr_entries == 0)
+ return 0;
+ trace_nr_entries = nr_entries;
return 1;
}
__setup("trace_entries=", set_nr_entries);
return nsecs / 1000;
}
-enum trace_type {
- __TRACE_FIRST_TYPE = 0,
-
- TRACE_FN,
- TRACE_CTX,
- TRACE_WAKE,
- TRACE_STACK,
- TRACE_SPECIAL,
-
- __TRACE_LAST_TYPE
-};
-
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
TRACE_FLAG_NEED_RESCHED = 0x02,
return page_address(page);
}
-static int
+int
trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
{
int len = (PAGE_SIZE - 1) - s->len;
va_end(ap);
/* If we can't write it all, don't bother writing anything */
- if (ret > len)
+ if (ret >= len)
return 0;
s->len += ret;
}
#define HEX_CHARS 17
+static const char hex2asc[] = "0123456789abcdef";
static int
trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
{
unsigned char hex[HEX_CHARS];
- unsigned char *data;
+ unsigned char *data = mem;
unsigned char byte;
int i, j;
BUG_ON(len >= HEX_CHARS);
- data = mem;
-
#ifdef __BIG_ENDIAN
for (i = 0, j = 0; i < len; i++) {
#else
#endif
byte = data[i];
- hex[j] = byte & 0x0f;
- if (hex[j] >= 10)
- hex[j] += 'a' - 10;
- else
- hex[j] += '0';
- j++;
-
- hex[j] = byte >> 4;
- if (hex[j] >= 10)
- hex[j] += 'a' - 10;
- else
- hex[j] += '0';
- j++;
+ hex[j++] = hex2asc[byte & 0x0f];
+ hex[j++] = hex2asc[byte >> 4];
}
- hex[j] = ' ';
- j++;
+ hex[j++] = ' ';
return trace_seq_putmem(s, hex, j);
}
WARN_ON_ONCE(!irqs_disabled());
__raw_spin_lock(&ftrace_max_lock);
/* clear out all the previous traces */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = tr->data[i];
flip_trace(max_tr.data[i], data);
tracing_reset(data);
WARN_ON_ONCE(!irqs_disabled());
__raw_spin_lock(&ftrace_max_lock);
- for_each_possible_cpu(i)
+ for_each_tracing_cpu(i)
tracing_reset(max_tr.data[i]);
flip_trace(max_tr.data[cpu], data);
* internal tracing to verify that everything is in order.
* If we fail, we do not register this tracer.
*/
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = tr->data[i];
if (!head_page(data))
continue;
goto out;
}
/* Only reset on passing, to avoid touching corrupted buffers */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = tr->data[i];
if (!head_page(data))
continue;
pc = preempt_count();
entry->preempt_count = pc & 0xff;
- entry->pid = tsk->pid;
+ entry->pid = (tsk) ? tsk->pid : 0;
entry->t = ftrace_now(raw_smp_processor_id());
entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
int next_cpu = -1;
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_tracing_cpu(cpu) {
if (!head_page(tr->data[cpu]))
continue;
ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
iter->prev_ent = NULL;
iter->prev_cpu = -1;
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
iter->next_idx[i] = 0;
iter->next_page[i] = NULL;
}
if (type)
name = type->name;
- for_each_possible_cpu(cpu) {
+ for_each_tracing_cpu(cpu) {
if (head_page(tr->data[cpu])) {
total += tr->data[cpu]->trace_idx;
if (tr->data[cpu]->trace_idx > tr->entries)
hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
- if (hardirq && softirq)
+ if (hardirq && softirq) {
trace_seq_putc(s, 'H');
- else {
- if (hardirq)
+ } else {
+ if (hardirq) {
trace_seq_putc(s, 'h');
- else {
+ } else {
if (softirq)
trace_seq_putc(s, 's');
else
char *comm;
int S, T;
int i;
+ unsigned state;
if (!next_entry)
next_entry = entry;
break;
case TRACE_CTX:
case TRACE_WAKE:
- S = entry->ctx.prev_state < sizeof(state_to_char) ?
- state_to_char[entry->ctx.prev_state] : 'X';
T = entry->ctx.next_state < sizeof(state_to_char) ?
state_to_char[entry->ctx.next_state] : 'X';
+ state = entry->ctx.prev_state ? __ffs(entry->ctx.prev_state) + 1 : 0;
+ S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
comm = trace_find_cmdline(entry->ctx.next_pid);
trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c %s\n",
entry->ctx.prev_pid,
struct trace_array_cpu *data;
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_tracing_cpu(cpu) {
data = iter->tr->data[cpu];
if (head_page(data) && data->trace_idx &&
static int print_trace_line(struct trace_iterator *iter)
{
+ if (iter->trace && iter->trace->print_line)
+ return iter->trace->print_line(iter);
+
if (trace_flags & TRACE_ITER_BIN)
return print_bin_fmt(iter);
raw_local_irq_disable();
__raw_spin_lock(&ftrace_max_lock);
- for_each_possible_cpu(cpu) {
+ for_each_tracing_cpu(cpu) {
/*
* Increase/decrease the disabled counter if we are
* about to flip a bit in the cpumask:
int neg = 0;
int i;
- if (cnt > 63)
- cnt = 63;
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
- long val;
char buf[64];
+ long val;
+ int ret;
- if (cnt > 63)
- cnt = 63;
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
- val = simple_strtoul(buf, NULL, 10);
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
val = !!val;
char buf[64];
int r;
- r = snprintf(buf, 64, "%ld\n",
+ r = snprintf(buf, sizeof(buf), "%ld\n",
*ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
- if (r > 64)
- r = 64;
+ if (r > sizeof(buf))
+ r = sizeof(buf);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
size_t cnt, loff_t *ppos)
{
long *ptr = filp->private_data;
- long val;
char buf[64];
+ long val;
+ int ret;
- if (cnt > 63)
- cnt = 63;
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
- val = simple_strtoul(buf, NULL, 10);
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
*ptr = val * 1000;
return -ENOMEM;
iter->tr = &global_trace;
+ iter->trace = current_trace;
filp->private_data = iter;
* Always select as readable when in blocking mode
*/
return POLLIN | POLLRDNORM;
- }
- else {
+ } else {
if (!trace_empty(iter))
return POLLIN | POLLRDNORM;
poll_wait(filp, &trace_wait, poll_table);
{
struct trace_iterator *iter = filp->private_data;
struct trace_array_cpu *data;
+ struct trace_array *tr = iter->tr;
+ struct tracer *tracer = iter->trace;
static cpumask_t mask;
static int start;
unsigned long flags;
start = 0;
while (trace_empty(iter)) {
- if (!(trace_flags & TRACE_ITER_BLOCK))
- return -EWOULDBLOCK;
+
+ if ((filp->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
/*
* This is a make-shift waitqueue. The reason we don't use
* an actual wait queue is because:
if (signal_pending(current))
return -EINTR;
+ if (iter->trace != current_trace)
+ return 0;
+
/*
* We block until we read something and tracing is disabled.
* We still block if tracing is disabled, but we have never
cnt = PAGE_SIZE - 1;
memset(iter, 0, sizeof(*iter));
- iter->tr = &global_trace;
+ iter->tr = tr;
+ iter->trace = tracer;
iter->pos = -1;
/*
ftrace_enabled = 0;
#endif
smp_wmb();
- for_each_possible_cpu(cpu) {
+ for_each_tracing_cpu(cpu) {
data = iter->tr->data[cpu];
if (!head_page(data) || !data->trace_idx)
return read;
}
+static ssize_t
+tracing_entries_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct trace_array *tr = filp->private_data;
+ char buf[64];
+ int r;
+
+ r = sprintf(buf, "%lu\n", tr->entries);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t
+tracing_entries_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ unsigned long val;
+ char buf[64];
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ /* must have at least 1 entry */
+ if (!val)
+ return -EINVAL;
+
+ mutex_lock(&trace_types_lock);
+
+ if (current_trace != &no_tracer) {
+ cnt = -EBUSY;
+ pr_info("ftrace: set current_tracer to none"
+ " before modifying buffer size\n");
+ goto out;
+ }
+
+ if (val > global_trace.entries) {
+ while (global_trace.entries < val) {
+ if (trace_alloc_page()) {
+ cnt = -ENOMEM;
+ goto out;
+ }
+ }
+ } else {
+ /* include the number of entries in val (inc of page entries) */
+ while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1))
+ trace_free_page();
+ }
+
+ filp->f_pos += cnt;
+
+ out:
+ max_tr.entries = global_trace.entries;
+ mutex_unlock(&trace_types_lock);
+
+ return cnt;
+}
+
static struct file_operations tracing_max_lat_fops = {
.open = tracing_open_generic,
.read = tracing_max_lat_read,
.release = tracing_release_pipe,
};
+static struct file_operations tracing_entries_fops = {
+ .open = tracing_open_generic,
+ .read = tracing_entries_read,
+ .write = tracing_entries_write,
+};
+
#ifdef CONFIG_DYNAMIC_FTRACE
static ssize_t
pr_warning("Could not create debugfs "
"'tracing_threash' entry\n");
+ entry = debugfs_create_file("trace_entries", 0644, d_tracer,
+ &global_trace, &tracing_entries_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs "
+ "'tracing_threash' entry\n");
+
#ifdef CONFIG_DYNAMIC_FTRACE
entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
&ftrace_update_tot_cnt,
#endif
}
-/* dummy trace to disable tracing */
-static struct tracer no_tracer __read_mostly =
-{
- .name = "none",
-};
-
static int trace_alloc_page(void)
{
struct trace_array_cpu *data;
int i;
/* first allocate a page for each CPU */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
array = (void *)__get_free_page(GFP_KERNEL);
if (array == NULL) {
printk(KERN_ERR "tracer: failed to allocate page"
}
/* Now that we successfully allocate a page per CPU, add them */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = global_trace.data[i];
- data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
page = list_entry(pages.next, struct page, lru);
list_del_init(&page->lru);
list_add_tail(&page->lru, &data->trace_pages);
#ifdef CONFIG_TRACER_MAX_TRACE
data = max_tr.data[i];
- data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
page = list_entry(pages.next, struct page, lru);
list_del_init(&page->lru);
list_add_tail(&page->lru, &data->trace_pages);
return -ENOMEM;
}
+static int trace_free_page(void)
+{
+ struct trace_array_cpu *data;
+ struct page *page;
+ struct list_head *p;
+ int i;
+ int ret = 0;
+
+ /* free one page from each buffer */
+ for_each_tracing_cpu(i) {
+ data = global_trace.data[i];
+ p = data->trace_pages.next;
+ if (p == &data->trace_pages) {
+ /* should never happen */
+ WARN_ON(1);
+ tracing_disabled = 1;
+ ret = -1;
+ break;
+ }
+ page = list_entry(p, struct page, lru);
+ ClearPageLRU(page);
+ list_del(&page->lru);
+ __free_page(page);
+
+ tracing_reset(data);
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+ data = max_tr.data[i];
+ p = data->trace_pages.next;
+ if (p == &data->trace_pages) {
+ /* should never happen */
+ WARN_ON(1);
+ tracing_disabled = 1;
+ ret = -1;
+ break;
+ }
+ page = list_entry(p, struct page, lru);
+ ClearPageLRU(page);
+ list_del(&page->lru);
+ __free_page(page);
+
+ tracing_reset(data);
+#endif
+ }
+ global_trace.entries -= ENTRIES_PER_PAGE;
+
+ return ret;
+}
+
__init static int tracer_alloc_buffers(void)
{
struct trace_array_cpu *data;
global_trace.ctrl = tracer_enabled;
+ /* TODO: make the number of buffers hot pluggable with CPUS */
+ tracing_nr_buffers = num_possible_cpus();
+ tracing_buffer_mask = cpu_possible_map;
+
/* Allocate the first page for all buffers */
- for_each_possible_cpu(i) {
+ for_each_tracing_cpu(i) {
data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
max_tr.data[i] = &per_cpu(max_data, i);
/* use the LRU flag to differentiate the two buffers */
ClearPageLRU(page);
+ data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+ max_tr.data[i]->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+
/* Only allocate if we are actually using the max trace */
#ifdef CONFIG_TRACER_MAX_TRACE
array = (void *)__get_free_page(GFP_KERNEL);