X-Git-Url: http://pilppa.org/gitweb/?a=blobdiff_plain;f=drivers%2Foprofile%2Fcpu_buffer.c;h=9e66c384e016afb58253737af246c3918dc2d9ed;hb=317f33bce6d43367a2fd170bc87ba18a88d2621d;hp=01d38e78cde18bd341fca22940249e4a80068217;hpb=cf76dddb22c019f03ada6479210f894f19bd591b;p=linux-2.6-omap-h63xx.git diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 01d38e78cde..9e66c384e01 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -28,6 +28,25 @@ #include "buffer_sync.h" #include "oprof.h" +#define OP_BUFFER_FLAGS 0 + +/* + * Read and write access is using spin locking. Thus, writing to the + * buffer by NMI handler (x86) could occur also during critical + * sections when reading the buffer. To avoid this, there are 2 + * buffers for independent read and write access. Read access is in + * process context only, write access only in the NMI handler. If the + * read buffer runs empty, both buffers are swapped atomically. There + * is potentially a small window during swapping where the buffers are + * disabled and samples could be lost. + * + * Using 2 buffers is a little bit overhead, but the solution is clear + * and does not require changes in the ring buffer implementation. It + * can be changed to a single buffer solution when the ring buffer + * access is implemented as non-locking atomic code. + */ +static struct ring_buffer *op_ring_buffer_read; +static struct ring_buffer *op_ring_buffer_write; DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); static void wq_sync_buffer(struct work_struct *work); @@ -35,19 +54,9 @@ static void wq_sync_buffer(struct work_struct *work); #define DEFAULT_TIMER_EXPIRE (HZ / 10) static int work_enabled; -void free_cpu_buffers(void) -{ - int i; - - for_each_possible_cpu(i) { - vfree(per_cpu(cpu_buffer, i).buffer); - per_cpu(cpu_buffer, i).buffer = NULL; - } -} - unsigned long oprofile_get_cpu_buffer_size(void) { - return fs_cpu_buffer_size; + return oprofile_cpu_buffer_size; } void oprofile_cpu_buffer_inc_smpl_lost(void) @@ -58,20 +67,32 @@ void oprofile_cpu_buffer_inc_smpl_lost(void) cpu_buf->sample_lost_overflow++; } +void free_cpu_buffers(void) +{ + if (op_ring_buffer_read) + ring_buffer_free(op_ring_buffer_read); + op_ring_buffer_read = NULL; + if (op_ring_buffer_write) + ring_buffer_free(op_ring_buffer_write); + op_ring_buffer_write = NULL; +} + int alloc_cpu_buffers(void) { int i; - unsigned long buffer_size = fs_cpu_buffer_size; + unsigned long buffer_size = oprofile_cpu_buffer_size; + + op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); + if (!op_ring_buffer_read) + goto fail; + op_ring_buffer_write = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); + if (!op_ring_buffer_write) + goto fail; for_each_possible_cpu(i) { struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); - b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size, - cpu_to_node(i)); - if (!b->buffer) - goto fail; - b->last_task = NULL; b->last_is_kernel = -1; b->tracing = 0; @@ -124,57 +145,75 @@ void end_cpu_work(void) flush_scheduled_work(); } -/* Resets the cpu buffer to a sane state. */ -void cpu_buffer_reset(struct oprofile_cpu_buffer *cpu_buf) +int op_cpu_buffer_write_entry(struct op_entry *entry) { - /* reset these to invalid values; the next sample - * collected will populate the buffer with proper - * values to initialize the buffer - */ - cpu_buf->last_is_kernel = -1; - cpu_buf->last_task = NULL; -} - -/* compute number of available slots in cpu_buffer queue */ -static unsigned long nr_available_slots(struct oprofile_cpu_buffer const *b) -{ - unsigned long head = b->head_pos; - unsigned long tail = b->tail_pos; + entry->event = ring_buffer_lock_reserve(op_ring_buffer_write, + sizeof(struct op_sample), + &entry->irq_flags); + if (entry->event) + entry->sample = ring_buffer_event_data(entry->event); + else + entry->sample = NULL; - if (tail > head) - return (tail - head) - 1; + if (!entry->sample) + return -ENOMEM; - return tail + (b->buffer_size - head) - 1; + return 0; } -static void increment_head(struct oprofile_cpu_buffer *b) +int op_cpu_buffer_write_commit(struct op_entry *entry) { - unsigned long new_head = b->head_pos + 1; + return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event, + entry->irq_flags); +} - /* Ensure anything written to the slot before we - * increment is visible */ - wmb(); +struct op_sample *op_cpu_buffer_read_entry(int cpu) +{ + struct ring_buffer_event *e; + e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); + if (e) + return ring_buffer_event_data(e); + if (ring_buffer_swap_cpu(op_ring_buffer_read, + op_ring_buffer_write, + cpu)) + return NULL; + e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); + if (e) + return ring_buffer_event_data(e); + return NULL; +} - if (new_head < b->buffer_size) - b->head_pos = new_head; - else - b->head_pos = 0; +unsigned long op_cpu_buffer_entries(int cpu) +{ + return ring_buffer_entries_cpu(op_ring_buffer_read, cpu) + + ring_buffer_entries_cpu(op_ring_buffer_write, cpu); } -static inline void +static inline int add_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, unsigned long event) { - struct op_sample *entry = &cpu_buf->buffer[cpu_buf->head_pos]; - entry->eip = pc; - entry->event = event; - increment_head(cpu_buf); + struct op_entry entry; + int ret; + + ret = op_cpu_buffer_write_entry(&entry); + if (ret) + return ret; + + entry.sample->eip = pc; + entry.sample->event = event; + + ret = op_cpu_buffer_write_commit(&entry); + if (ret) + return ret; + + return 0; } -static inline void +static inline int add_code(struct oprofile_cpu_buffer *buffer, unsigned long value) { - add_sample(buffer, ESCAPE_CODE, value); + return add_sample(buffer, ESCAPE_CODE, value); } /* This must be safe from any context. It's safe writing here @@ -198,11 +237,6 @@ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, return 0; } - if (nr_available_slots(cpu_buf) < 3) { - cpu_buf->sample_lost_overflow++; - return 0; - } - is_kernel = !!is_kernel; task = current; @@ -210,26 +244,29 @@ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, /* notice a switch from user->kernel or vice versa */ if (cpu_buf->last_is_kernel != is_kernel) { cpu_buf->last_is_kernel = is_kernel; - add_code(cpu_buf, is_kernel); + if (add_code(cpu_buf, is_kernel)) + goto fail; } /* notice a task switch */ if (cpu_buf->last_task != task) { cpu_buf->last_task = task; - add_code(cpu_buf, (unsigned long)task); + if (add_code(cpu_buf, (unsigned long)task)) + goto fail; } - add_sample(cpu_buf, pc, event); + if (add_sample(cpu_buf, pc, event)) + goto fail; + return 1; + +fail: + cpu_buf->sample_lost_overflow++; + return 0; } static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) { - if (nr_available_slots(cpu_buf) < 4) { - cpu_buf->sample_lost_overflow++; - return 0; - } - add_code(cpu_buf, CPU_TRACE_BEGIN); cpu_buf->tracing = 1; return 1; @@ -240,12 +277,13 @@ static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf) cpu_buf->tracing = 0; } -void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, - unsigned long event, int is_kernel) +static inline void +__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, + unsigned long event, int is_kernel) { struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); - if (!backtrace_depth) { + if (!oprofile_backtrace_depth) { log_sample(cpu_buf, pc, is_kernel, event); return; } @@ -253,68 +291,82 @@ void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, if (!oprofile_begin_trace(cpu_buf)) return; - /* if log_sample() fail we can't backtrace since we lost the source - * of this event */ + /* + * if log_sample() fail we can't backtrace since we lost the + * source of this event + */ if (log_sample(cpu_buf, pc, is_kernel, event)) - oprofile_ops.backtrace(regs, backtrace_depth); + oprofile_ops.backtrace(regs, oprofile_backtrace_depth); oprofile_end_trace(cpu_buf); } +void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, + unsigned long event, int is_kernel) +{ + __oprofile_add_ext_sample(pc, regs, event, is_kernel); +} + void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) { int is_kernel = !user_mode(regs); unsigned long pc = profile_pc(regs); - oprofile_add_ext_sample(pc, regs, event, is_kernel); + __oprofile_add_ext_sample(pc, regs, event, is_kernel); } #ifdef CONFIG_OPROFILE_IBS #define MAX_IBS_SAMPLE_SIZE 14 -void oprofile_add_ibs_sample(struct pt_regs *const regs, - unsigned int *const ibs_sample, int ibs_code) +void oprofile_add_ibs_sample(struct pt_regs * const regs, + unsigned int * const ibs_sample, int ibs_code) { int is_kernel = !user_mode(regs); struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); struct task_struct *task; + int fail = 0; cpu_buf->sample_received++; - if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) { - /* we can't backtrace since we lost the source of this event */ - cpu_buf->sample_lost_overflow++; - return; - } - /* notice a switch from user->kernel or vice versa */ if (cpu_buf->last_is_kernel != is_kernel) { + if (add_code(cpu_buf, is_kernel)) + goto fail; cpu_buf->last_is_kernel = is_kernel; - add_code(cpu_buf, is_kernel); } /* notice a task switch */ if (!is_kernel) { task = current; if (cpu_buf->last_task != task) { + if (add_code(cpu_buf, (unsigned long)task)) + goto fail; cpu_buf->last_task = task; - add_code(cpu_buf, (unsigned long)task); } } - add_code(cpu_buf, ibs_code); - add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]); - add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]); - add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]); + fail = fail || add_code(cpu_buf, ibs_code); + fail = fail || add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]); + fail = fail || add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]); + fail = fail || add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]); if (ibs_code == IBS_OP_BEGIN) { - add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]); - add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]); - add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]); + fail = fail || add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]); + fail = fail || add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]); + fail = fail || add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]); } - if (backtrace_depth) - oprofile_ops.backtrace(regs, backtrace_depth); + if (fail) + goto fail; + + if (oprofile_backtrace_depth) + oprofile_ops.backtrace(regs, oprofile_backtrace_depth); + + return; + +fail: + cpu_buf->sample_lost_overflow++; + return; } #endif @@ -332,21 +384,21 @@ void oprofile_add_trace(unsigned long pc) if (!cpu_buf->tracing) return; - if (nr_available_slots(cpu_buf) < 1) { - cpu_buf->tracing = 0; - cpu_buf->sample_lost_overflow++; - return; - } + /* + * broken frame can give an eip with the same value as an + * escape code, abort the trace if we get it + */ + if (pc == ESCAPE_CODE) + goto fail; - /* broken frame can give an eip with the same value as an escape code, - * abort the trace if we get it */ - if (pc == ESCAPE_CODE) { - cpu_buf->tracing = 0; - cpu_buf->backtrace_aborted++; - return; - } + if (add_sample(cpu_buf, pc, 0)) + goto fail; - add_sample(cpu_buf, pc, 0); + return; +fail: + cpu_buf->tracing = 0; + cpu_buf->backtrace_aborted++; + return; } /*