#include <linux/init.h>
#include <linux/hash.h>
#include <linux/list.h>
+#include <linux/cpu.h>
#include <linux/fs.h>
#include "trace.h"
struct mutex mutex;
struct ring_buffer_per_cpu **buffers;
+
+#ifdef CONFIG_HOTPLUG
+ struct notifier_block cpu_notify;
+#endif
};
struct ring_buffer_iter {
*/
extern int ring_buffer_page_too_big(void);
+#ifdef CONFIG_HOTPLUG
+static int __cpuinit rb_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu);
+#endif
+
/**
* ring_buffer_alloc - allocate a new ring_buffer
* @size: the size in bytes per cpu that is needed.
if (buffer->pages == 1)
buffer->pages++;
- cpumask_copy(buffer->cpumask, cpu_possible_mask);
+ get_online_cpus();
+ cpumask_copy(buffer->cpumask, cpu_online_mask);
buffer->cpus = nr_cpu_ids;
bsize = sizeof(void *) * nr_cpu_ids;
goto fail_free_buffers;
}
+#ifdef CONFIG_HOTPLUG
+ buffer->cpu_notify.notifier_call = rb_cpu_notify;
+ buffer->cpu_notify.priority = 0;
+ register_cpu_notifier(&buffer->cpu_notify);
+#endif
+
+ put_online_cpus();
mutex_init(&buffer->mutex);
return buffer;
fail_free_cpumask:
free_cpumask_var(buffer->cpumask);
+ put_online_cpus();
fail_free_buffer:
kfree(buffer);
{
int cpu;
+ get_online_cpus();
+
+#ifdef CONFIG_HOTPLUG
+ unregister_cpu_notifier(&buffer->cpu_notify);
+#endif
+
for_each_buffer_cpu(buffer, cpu)
rb_free_cpu_buffer(buffer->buffers[cpu]);
+ put_online_cpus();
+
free_cpumask_var(buffer->cpumask);
kfree(buffer);
return size;
mutex_lock(&buffer->mutex);
+ get_online_cpus();
nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
if (size < buffer_size) {
/* easy case, just free pages */
- if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
- mutex_unlock(&buffer->mutex);
- return -1;
- }
+ if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
+ goto out_fail;
rm_pages = buffer->pages - nr_pages;
* add these pages to the cpu_buffers. Otherwise we just free
* them all and return -ENOMEM;
*/
- if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
- mutex_unlock(&buffer->mutex);
- return -1;
- }
+ if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
+ goto out_fail;
new_pages = nr_pages - buffer->pages;
rb_insert_pages(cpu_buffer, &pages, new_pages);
}
- if (RB_WARN_ON(buffer, !list_empty(&pages))) {
- mutex_unlock(&buffer->mutex);
- return -1;
- }
+ if (RB_WARN_ON(buffer, !list_empty(&pages)))
+ goto out_fail;
out:
buffer->pages = nr_pages;
+ put_online_cpus();
mutex_unlock(&buffer->mutex);
return size;
list_del_init(&bpage->list);
free_buffer_page(bpage);
}
+ put_online_cpus();
mutex_unlock(&buffer->mutex);
return -ENOMEM;
+
+ /*
+ * Something went totally wrong, and we are too paranoid
+ * to even clean up the mess.
+ */
+ out_fail:
+ put_online_cpus();
+ mutex_unlock(&buffer->mutex);
+ return -1;
}
EXPORT_SYMBOL_GPL(ring_buffer_resize);
{
struct ring_buffer_per_cpu *cpu_buffer;
+ get_online_cpus();
+
if (!cpumask_test_cpu(cpu, buffer->cpumask))
- return;
+ goto out;
cpu_buffer = buffer->buffers[cpu];
atomic_inc(&cpu_buffer->record_disabled);
+ out:
+ put_online_cpus();
}
EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
{
struct ring_buffer_per_cpu *cpu_buffer;
+ get_online_cpus();
+
if (!cpumask_test_cpu(cpu, buffer->cpumask))
- return;
+ goto out;
cpu_buffer = buffer->buffers[cpu];
atomic_dec(&cpu_buffer->record_disabled);
+ out:
+ put_online_cpus();
}
EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long ret = 0;
+
+ get_online_cpus();
if (!cpumask_test_cpu(cpu, buffer->cpumask))
- return 0;
+ goto out;
cpu_buffer = buffer->buffers[cpu];
- return cpu_buffer->entries;
+ ret = cpu_buffer->entries;
+ out:
+ put_online_cpus();
+
+ return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long ret = 0;
+
+ get_online_cpus();
if (!cpumask_test_cpu(cpu, buffer->cpumask))
- return 0;
+ goto out;
cpu_buffer = buffer->buffers[cpu];
- return cpu_buffer->overrun;
+ ret = cpu_buffer->overrun;
+ out:
+ put_online_cpus();
+
+ return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
unsigned long entries = 0;
int cpu;
+ get_online_cpus();
+
/* if you care about this being correct, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
entries += cpu_buffer->entries;
}
+ put_online_cpus();
+
return entries;
}
EXPORT_SYMBOL_GPL(ring_buffer_entries);
unsigned long overruns = 0;
int cpu;
+ get_online_cpus();
+
/* if you care about this being correct, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
overruns += cpu_buffer->overrun;
}
+ put_online_cpus();
+
return overruns;
}
EXPORT_SYMBOL_GPL(ring_buffer_overruns);
*/
void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
{
- struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+ struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
+ if (!iter)
+ return;
+
+ cpu_buffer = iter->cpu_buffer;
+
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
rb_iter_reset(iter);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
struct buffer_page *reader;
int nr_loops = 0;
- if (!cpumask_test_cpu(cpu, buffer->cpumask))
- return NULL;
-
cpu_buffer = buffer->buffers[cpu];
again:
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
- struct ring_buffer_event *event;
+ struct ring_buffer_event *event = NULL;
unsigned long flags;
+ get_online_cpus();
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ goto out;
+
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_buffer_peek(buffer, cpu, ts);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ out:
+ put_online_cpus();
+
return event;
}
struct ring_buffer_event *
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
{
- struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
- struct ring_buffer_event *event;
+ struct ring_buffer_per_cpu *cpu_buffer;
+ struct ring_buffer_event *event = NULL;
unsigned long flags;
+ /* might be called in atomic */
+ preempt_disable();
+
if (!cpumask_test_cpu(cpu, buffer->cpumask))
- return NULL;
+ goto out;
+ cpu_buffer = buffer->buffers[cpu];
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_buffer_peek(buffer, cpu, ts);
if (!event)
- goto out;
+ goto out_unlock;
rb_advance_reader(cpu_buffer);
- out:
+ out_unlock:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ out:
+ preempt_enable();
+
return event;
}
EXPORT_SYMBOL_GPL(ring_buffer_consume);
ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
- struct ring_buffer_iter *iter;
+ struct ring_buffer_iter *iter = NULL;
unsigned long flags;
+ get_online_cpus();
+
if (!cpumask_test_cpu(cpu, buffer->cpumask))
- return NULL;
+ goto out;
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
- return NULL;
+ goto out;
cpu_buffer = buffer->buffers[cpu];
__raw_spin_unlock(&cpu_buffer->lock);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ out:
+ put_online_cpus();
+
return iter;
}
EXPORT_SYMBOL_GPL(ring_buffer_read_start);
{
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
unsigned long flags;
+ int resched;
+
+ /* Can't use get_online_cpus because this can be in atomic */
+ resched = ftrace_preempt_disable();
if (!cpumask_test_cpu(cpu, buffer->cpumask))
- return;
+ goto out;
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
__raw_spin_unlock(&cpu_buffer->lock);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ out:
+ ftrace_preempt_enable(resched);
}
EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
*/
void ring_buffer_reset(struct ring_buffer *buffer)
{
+ int resched;
int cpu;
+ /* Can't use get_online_cpus because this can be in atomic */
+ resched = ftrace_preempt_disable();
+
for_each_buffer_cpu(buffer, cpu)
ring_buffer_reset_cpu(buffer, cpu);
+
+ ftrace_preempt_enable(resched);
}
EXPORT_SYMBOL_GPL(ring_buffer_reset);
struct ring_buffer_per_cpu *cpu_buffer;
int cpu;
+ get_online_cpus();
+
/* yes this is racy, but if you don't like the race, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
if (!rb_per_cpu_empty(cpu_buffer))
return 0;
}
+
+ put_online_cpus();
+
return 1;
}
EXPORT_SYMBOL_GPL(ring_buffer_empty);
int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
+ int ret = 1;
+
+ get_online_cpus();
if (!cpumask_test_cpu(cpu, buffer->cpumask))
- return 1;
+ goto out;
cpu_buffer = buffer->buffers[cpu];
- return rb_per_cpu_empty(cpu_buffer);
+ ret = rb_per_cpu_empty(cpu_buffer);
+
+ out:
+ put_online_cpus();
+
+ return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
{
struct ring_buffer_per_cpu *cpu_buffer_a;
struct ring_buffer_per_cpu *cpu_buffer_b;
+ int ret = -EINVAL;
+
+ get_online_cpus();
if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
!cpumask_test_cpu(cpu, buffer_b->cpumask))
- return -EINVAL;
+ goto out;
/* At least make sure the two buffers are somewhat the same */
if (buffer_a->pages != buffer_b->pages)
- return -EINVAL;
+ goto out;
+
+ ret = -EAGAIN;
if (ring_buffer_flags != RB_BUFFERS_ON)
- return -EAGAIN;
+ goto out;
if (atomic_read(&buffer_a->record_disabled))
- return -EAGAIN;
+ goto out;
if (atomic_read(&buffer_b->record_disabled))
- return -EAGAIN;
+ goto out;
cpu_buffer_a = buffer_a->buffers[cpu];
cpu_buffer_b = buffer_b->buffers[cpu];
if (atomic_read(&cpu_buffer_a->record_disabled))
- return -EAGAIN;
+ goto out;
if (atomic_read(&cpu_buffer_b->record_disabled))
- return -EAGAIN;
+ goto out;
/*
* We can't do a synchronize_sched here because this
atomic_dec(&cpu_buffer_a->record_disabled);
atomic_dec(&cpu_buffer_b->record_disabled);
- return 0;
+ ret = 0;
+out:
+ put_online_cpus();
+
+ return ret;
}
EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
u64 save_timestamp;
int ret = -1;
+ get_online_cpus();
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ goto out;
+
/*
* If len is not big enough to hold the page header, then
* we can not copy anything.
*/
if (len <= BUF_PAGE_HDR_SIZE)
- return -1;
+ goto out;
len -= BUF_PAGE_HDR_SIZE;
if (!data_page)
- return -1;
+ goto out;
bpage = *data_page;
if (!bpage)
- return -1;
+ goto out;
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
reader = rb_get_reader_page(cpu_buffer);
if (!reader)
- goto out;
+ goto out_unlock;
event = rb_reader_event(cpu_buffer);
unsigned int size;
if (full)
- goto out;
+ goto out_unlock;
if (len > (commit - read))
len = (commit - read);
size = rb_event_length(event);
if (len < size)
- goto out;
+ goto out_unlock;
/* save the current timestamp, since the user will need it */
save_timestamp = cpu_buffer->read_stamp;
}
ret = read;
- out:
+ out_unlock:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ out:
+ put_online_cpus();
+
return ret;
}
}
fs_initcall(rb_init_debugfs);
+
+#ifdef CONFIG_HOTPLUG
+static int __cpuinit rb_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ struct ring_buffer *buffer =
+ container_of(self, struct ring_buffer, cpu_notify);
+ long cpu = (long)hcpu;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ if (cpu_isset(cpu, *buffer->cpumask))
+ return NOTIFY_OK;
+
+ buffer->buffers[cpu] =
+ rb_allocate_cpu_buffer(buffer, cpu);
+ if (!buffer->buffers[cpu]) {
+ WARN(1, "failed to allocate ring buffer on CPU %ld\n",
+ cpu);
+ return NOTIFY_OK;
+ }
+ smp_wmb();
+ cpu_set(cpu, *buffer->cpumask);
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ /*
+ * Do nothing.
+ * If we were to free the buffer, then the user would
+ * lose any trace that was in the buffer.
+ */
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+#endif
unsigned long __read_mostly tracing_max_latency;
unsigned long __read_mostly tracing_thresh;
+/*
+ * On boot up, the ring buffer is set to the minimum size, so that
+ * we do not waste memory on systems that are not using tracing.
+ */
+static int ring_buffer_expanded;
+
/*
* We need to change this state when a selftest is running.
* A selftest will lurk into the ring-buffer to count the
{
strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE);
default_bootup_tracer = bootup_tracer_buf;
+ /* We are using ftrace early, expand it */
+ ring_buffer_expanded = 1;
return 1;
}
__setup("ftrace=", set_ftrace);
{
int cpu;
+ /* If we are looking at one CPU buffer, only check that one */
+ if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
+ cpu = iter->cpu_file;
+ if (iter->buffer_iter[cpu]) {
+ if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
+ return 0;
+ } else {
+ if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
+ return 0;
+ }
+ return 1;
+ }
+
for_each_tracing_cpu(cpu) {
if (iter->buffer_iter[cpu]) {
if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
iter->buffer_iter[cpu] =
ring_buffer_read_start(iter->tr->buffer, cpu);
-
- if (!iter->buffer_iter[cpu])
- goto fail_buffer;
}
} else {
cpu = iter->cpu_file;
iter->buffer_iter[cpu] =
ring_buffer_read_start(iter->tr->buffer, cpu);
-
- if (!iter->buffer_iter[cpu])
- goto fail;
}
/* TODO stop tracer */
return t->init(tr);
}
+static int tracing_resize_ring_buffer(unsigned long size)
+{
+ int ret;
+
+ /*
+ * If kernel or user changes the size of the ring buffer
+ * it get completed.
+ */
+ ring_buffer_expanded = 1;
+
+ ret = ring_buffer_resize(global_trace.buffer, size);
+ if (ret < 0)
+ return ret;
+
+ ret = ring_buffer_resize(max_tr.buffer, size);
+ if (ret < 0) {
+ int r;
+
+ r = ring_buffer_resize(global_trace.buffer,
+ global_trace.entries);
+ if (r < 0) {
+ /* AARGH! We are left with different
+ * size max buffer!!!! */
+ WARN_ON(1);
+ tracing_disabled = 1;
+ }
+ return ret;
+ }
+
+ global_trace.entries = size;
+
+ return ret;
+}
+
+/**
+ * tracing_update_buffers - used by tracing facility to expand ring buffers
+ *
+ * To save on memory when the tracing is never used on a system with it
+ * configured in. The ring buffers are set to a minimum size. But once
+ * a user starts to use the tracing facility, then they need to grow
+ * to their default size.
+ *
+ * This function is to be called when a tracer is about to be used.
+ */
+int tracing_update_buffers(void)
+{
+ int ret = 0;
+
+ if (!ring_buffer_expanded)
+ ret = tracing_resize_ring_buffer(trace_buf_size);
+
+ return ret;
+}
+
struct trace_option_dentry;
static struct trace_option_dentry *
struct tracer *t;
int ret = 0;
+ if (!ring_buffer_expanded) {
+ ret = tracing_resize_ring_buffer(trace_buf_size);
+ if (ret < 0)
+ return ret;
+ ret = 0;
+ }
+
mutex_lock(&trace_types_lock);
for (t = trace_types; t; t = t->next) {
if (strcmp(t->name, buf) == 0)
val <<= 10;
if (val != global_trace.entries) {
- ret = ring_buffer_resize(global_trace.buffer, val);
- if (ret < 0) {
- cnt = ret;
- goto out;
- }
-
- ret = ring_buffer_resize(max_tr.buffer, val);
+ ret = tracing_resize_ring_buffer(val);
if (ret < 0) {
- int r;
cnt = ret;
- r = ring_buffer_resize(global_trace.buffer,
- global_trace.entries);
- if (r < 0) {
- /* AARGH! We are left with different
- * size max buffer!!!! */
- WARN_ON(1);
- tracing_disabled = 1;
- }
goto out;
}
-
- global_trace.entries = val;
}
filp->f_pos += cnt;
__init static int tracer_alloc_buffers(void)
{
struct trace_array_cpu *data;
+ int ring_buf_size;
int i;
int ret = -ENOMEM;
if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL))
goto out_free_tracing_cpumask;
+ /* To save memory, keep the ring buffer size to its minimum */
+ if (ring_buffer_expanded)
+ ring_buf_size = trace_buf_size;
+ else
+ ring_buf_size = 1;
+
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
cpumask_copy(tracing_cpumask, cpu_all_mask);
cpumask_clear(tracing_reader_cpumask);
/* TODO: make the number of buffers hot pluggable with CPUS */
- global_trace.buffer = ring_buffer_alloc(trace_buf_size,
+ global_trace.buffer = ring_buffer_alloc(ring_buf_size,
TRACE_BUFFER_FLAGS);
if (!global_trace.buffer) {
printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
#ifdef CONFIG_TRACER_MAX_TRACE
- max_tr.buffer = ring_buffer_alloc(trace_buf_size,
+ max_tr.buffer = ring_buffer_alloc(ring_buf_size,
TRACE_BUFFER_FLAGS);
if (!max_tr.buffer) {
printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");