X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;ds=sidebyside;f=drivers%2Foprofile%2Fbuffer_sync.c;h=ed982273fb8b1d9311aba68a3b2a42de5d174e78;hb=43096597a4cc4bd3f912be8a69dbd12e27e08038;hp=8134c7e198a5b1a197a77b4faed123c15327650a;hpb=22aadf8a07067644e101267ed5003043f2ad05bf;p=linux-2.6-omap-h63xx.git diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index 8134c7e198a..ed982273fb8 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c @@ -5,6 +5,7 @@ * @remark Read the file COPYING * * @author John Levon + * @author Barry Kasindorf * * This is the core of the buffer management. Each * CPU buffer is processed and entered into the @@ -33,7 +34,7 @@ #include "event_buffer.h" #include "cpu_buffer.h" #include "buffer_sync.h" - + static LIST_HEAD(dying_tasks); static LIST_HEAD(dead_tasks); static cpumask_t marked_cpus = CPU_MASK_NONE; @@ -48,10 +49,11 @@ static void process_task_mortuary(void); * Can be invoked from softirq via RCU callback due to * call_rcu() of the task struct, hence the _irqsave. */ -static int task_free_notify(struct notifier_block * self, unsigned long val, void * data) +static int +task_free_notify(struct notifier_block *self, unsigned long val, void *data) { unsigned long flags; - struct task_struct * task = data; + struct task_struct *task = data; spin_lock_irqsave(&task_mortuary, flags); list_add(&task->tasks, &dying_tasks); spin_unlock_irqrestore(&task_mortuary, flags); @@ -62,13 +64,14 @@ static int task_free_notify(struct notifier_block * self, unsigned long val, voi /* The task is on its way out. A sync of the buffer means we can catch * any remaining samples for this task. */ -static int task_exit_notify(struct notifier_block * self, unsigned long val, void * data) +static int +task_exit_notify(struct notifier_block *self, unsigned long val, void *data) { /* To avoid latency problems, we only process the current CPU, * hoping that most samples for the task are on this CPU */ sync_buffer(raw_smp_processor_id()); - return 0; + return 0; } @@ -77,11 +80,12 @@ static int task_exit_notify(struct notifier_block * self, unsigned long val, voi * we don't lose any. This does not have to be exact, it's a QoI issue * only. */ -static int munmap_notify(struct notifier_block * self, unsigned long val, void * data) +static int +munmap_notify(struct notifier_block *self, unsigned long val, void *data) { unsigned long addr = (unsigned long)data; - struct mm_struct * mm = current->mm; - struct vm_area_struct * mpnt; + struct mm_struct *mm = current->mm; + struct vm_area_struct *mpnt; down_read(&mm->mmap_sem); @@ -99,11 +103,12 @@ static int munmap_notify(struct notifier_block * self, unsigned long val, void * return 0; } - + /* We need to be told about new modules so we don't attribute to a previously * loaded module, or drop the samples on the floor. */ -static int module_load_notify(struct notifier_block * self, unsigned long val, void * data) +static int +module_load_notify(struct notifier_block *self, unsigned long val, void *data) { #ifdef CONFIG_MODULES if (val != MODULE_STATE_COMING) @@ -118,7 +123,7 @@ static int module_load_notify(struct notifier_block * self, unsigned long val, v return 0; } - + static struct notifier_block task_free_nb = { .notifier_call = task_free_notify, }; @@ -135,7 +140,7 @@ static struct notifier_block module_load_nb = { .notifier_call = module_load_notify, }; - + static void end_sync(void) { end_cpu_work(); @@ -187,43 +192,41 @@ void sync_stop(void) end_sync(); } - + /* Optimisation. We can manage without taking the dcookie sem * because we cannot reach this code without at least one * dcookie user still being registered (namely, the reader * of the event buffer). */ -static inline unsigned long fast_get_dcookie(struct dentry * dentry, - struct vfsmount * vfsmnt) +static inline unsigned long fast_get_dcookie(struct path *path) { unsigned long cookie; - - if (dentry->d_cookie) - return (unsigned long)dentry; - get_dcookie(dentry, vfsmnt, &cookie); + + if (path->dentry->d_cookie) + return (unsigned long)path->dentry; + get_dcookie(path, &cookie); return cookie; } - + /* Look up the dcookie for the task's first VM_EXECUTABLE mapping, * which corresponds loosely to "application name". This is * not strictly necessary but allows oprofile to associate * shared-library samples with particular applications */ -static unsigned long get_exec_dcookie(struct mm_struct * mm) +static unsigned long get_exec_dcookie(struct mm_struct *mm) { unsigned long cookie = NO_COOKIE; - struct vm_area_struct * vma; - + struct vm_area_struct *vma; + if (!mm) goto out; - + for (vma = mm->mmap; vma; vma = vma->vm_next) { if (!vma->vm_file) continue; if (!(vma->vm_flags & VM_EXECUTABLE)) continue; - cookie = fast_get_dcookie(vma->vm_file->f_path.dentry, - vma->vm_file->f_path.mnt); + cookie = fast_get_dcookie(&vma->vm_file->f_path); break; } @@ -237,19 +240,19 @@ out: * sure to do this lookup before a mm->mmap modification happens so * we don't lose track. */ -static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, off_t * offset) +static unsigned long +lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset) { unsigned long cookie = NO_COOKIE; - struct vm_area_struct * vma; + struct vm_area_struct *vma; for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { - + if (addr < vma->vm_start || addr >= vma->vm_end) continue; if (vma->vm_file) { - cookie = fast_get_dcookie(vma->vm_file->f_path.dentry, - vma->vm_file->f_path.mnt); + cookie = fast_get_dcookie(&vma->vm_file->f_path); *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr - vma->vm_start; } else { @@ -266,9 +269,20 @@ static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, o return cookie; } +static void increment_tail(struct oprofile_cpu_buffer *b) +{ + unsigned long new_tail = b->tail_pos + 1; + + rmb(); /* be sure fifo pointers are synchromized */ + + if (new_tail < b->buffer_size) + b->tail_pos = new_tail; + else + b->tail_pos = 0; +} static unsigned long last_cookie = INVALID_COOKIE; - + static void add_cpu_switch(int i) { add_event_entry(ESCAPE_CODE); @@ -281,16 +295,16 @@ static void add_kernel_ctx_switch(unsigned int in_kernel) { add_event_entry(ESCAPE_CODE); if (in_kernel) - add_event_entry(KERNEL_ENTER_SWITCH_CODE); + add_event_entry(KERNEL_ENTER_SWITCH_CODE); else - add_event_entry(KERNEL_EXIT_SWITCH_CODE); + add_event_entry(KERNEL_EXIT_SWITCH_CODE); } - + static void -add_user_ctx_switch(struct task_struct const * task, unsigned long cookie) +add_user_ctx_switch(struct task_struct const *task, unsigned long cookie) { add_event_entry(ESCAPE_CODE); - add_event_entry(CTX_SWITCH_CODE); + add_event_entry(CTX_SWITCH_CODE); add_event_entry(task->pid); add_event_entry(cookie); /* Another code for daemon back-compat */ @@ -299,7 +313,7 @@ add_user_ctx_switch(struct task_struct const * task, unsigned long cookie) add_event_entry(task->tgid); } - + static void add_cookie_switch(unsigned long cookie) { add_event_entry(ESCAPE_CODE); @@ -307,13 +321,78 @@ static void add_cookie_switch(unsigned long cookie) add_event_entry(cookie); } - + static void add_trace_begin(void) { add_event_entry(ESCAPE_CODE); add_event_entry(TRACE_BEGIN_CODE); } +#ifdef CONFIG_OPROFILE_IBS + +#define IBS_FETCH_CODE_SIZE 2 +#define IBS_OP_CODE_SIZE 5 +#define IBS_EIP(offset) \ + (((struct op_sample *)&cpu_buf->buffer[(offset)])->eip) +#define IBS_EVENT(offset) \ + (((struct op_sample *)&cpu_buf->buffer[(offset)])->event) + +/* + * Add IBS fetch and op entries to event buffer + */ +static void add_ibs_begin(struct oprofile_cpu_buffer *cpu_buf, int code, + int in_kernel, struct mm_struct *mm) +{ + unsigned long rip; + int i, count; + unsigned long ibs_cookie = 0; + off_t offset; + + increment_tail(cpu_buf); /* move to RIP entry */ + + rip = IBS_EIP(cpu_buf->tail_pos); + +#ifdef __LP64__ + rip += IBS_EVENT(cpu_buf->tail_pos) << 32; +#endif + + if (mm) { + ibs_cookie = lookup_dcookie(mm, rip, &offset); + + if (ibs_cookie == NO_COOKIE) + offset = rip; + if (ibs_cookie == INVALID_COOKIE) { + atomic_inc(&oprofile_stats.sample_lost_no_mapping); + offset = rip; + } + if (ibs_cookie != last_cookie) { + add_cookie_switch(ibs_cookie); + last_cookie = ibs_cookie; + } + } else + offset = rip; + + add_event_entry(ESCAPE_CODE); + add_event_entry(code); + add_event_entry(offset); /* Offset from Dcookie */ + + /* we send the Dcookie offset, but send the raw Linear Add also*/ + add_event_entry(IBS_EIP(cpu_buf->tail_pos)); + add_event_entry(IBS_EVENT(cpu_buf->tail_pos)); + + if (code == IBS_FETCH_CODE) + count = IBS_FETCH_CODE_SIZE; /*IBS FETCH is 2 int64s*/ + else + count = IBS_OP_CODE_SIZE; /*IBS OP is 5 int64s*/ + + for (i = 0; i < count; i++) { + increment_tail(cpu_buf); + add_event_entry(IBS_EIP(cpu_buf->tail_pos)); + add_event_entry(IBS_EVENT(cpu_buf->tail_pos)); + } +} + +#endif static void add_sample_entry(unsigned long offset, unsigned long event) { @@ -322,13 +401,13 @@ static void add_sample_entry(unsigned long offset, unsigned long event) } -static int add_us_sample(struct mm_struct * mm, struct op_sample * s) +static int add_us_sample(struct mm_struct *mm, struct op_sample *s) { unsigned long cookie; off_t offset; - - cookie = lookup_dcookie(mm, s->eip, &offset); - + + cookie = lookup_dcookie(mm, s->eip, &offset); + if (cookie == INVALID_COOKIE) { atomic_inc(&oprofile_stats.sample_lost_no_mapping); return 0; @@ -344,13 +423,13 @@ static int add_us_sample(struct mm_struct * mm, struct op_sample * s) return 1; } - + /* Add a sample to the global event buffer. If possible the * sample is converted into a persistent dentry/offset pair * for later lookup from userspace. */ static int -add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel) +add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel) { if (in_kernel) { add_sample_entry(s->eip, s->event); @@ -362,9 +441,9 @@ add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel) } return 0; } - -static void release_mm(struct mm_struct * mm) + +static void release_mm(struct mm_struct *mm) { if (!mm) return; @@ -373,9 +452,9 @@ static void release_mm(struct mm_struct * mm) } -static struct mm_struct * take_tasks_mm(struct task_struct * task) +static struct mm_struct *take_tasks_mm(struct task_struct *task) { - struct mm_struct * mm = get_task_mm(task); + struct mm_struct *mm = get_task_mm(task); if (mm) down_read(&mm->mmap_sem); return mm; @@ -386,10 +465,10 @@ static inline int is_code(unsigned long val) { return val == ESCAPE_CODE; } - + /* "acquire" as many cpu buffer slots as we can */ -static unsigned long get_slots(struct oprofile_cpu_buffer * b) +static unsigned long get_slots(struct oprofile_cpu_buffer *b) { unsigned long head = b->head_pos; unsigned long tail = b->tail_pos; @@ -415,19 +494,6 @@ static unsigned long get_slots(struct oprofile_cpu_buffer * b) } -static void increment_tail(struct oprofile_cpu_buffer * b) -{ - unsigned long new_tail = b->tail_pos + 1; - - rmb(); - - if (new_tail < b->buffer_size) - b->tail_pos = new_tail; - else - b->tail_pos = 0; -} - - /* Move tasks along towards death. Any tasks on dead_tasks * will definitely have no remaining references in any * CPU buffers at this point, because we use two lists, @@ -438,8 +504,8 @@ static void process_task_mortuary(void) { unsigned long flags; LIST_HEAD(local_dead_tasks); - struct task_struct * task; - struct task_struct * ttask; + struct task_struct *task; + struct task_struct *ttask; spin_lock_irqsave(&task_mortuary, flags); @@ -494,9 +560,9 @@ typedef enum { */ void sync_buffer(int cpu) { - struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[cpu]; + struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu); struct mm_struct *mm = NULL; - struct task_struct * new; + struct task_struct *new; unsigned long cookie = 0; int in_kernel = 1; unsigned int i; @@ -504,7 +570,7 @@ void sync_buffer(int cpu) unsigned long available; mutex_lock(&buffer_mutex); - + add_cpu_switch(cpu); /* Remember, only we can modify tail_pos */ @@ -512,8 +578,8 @@ void sync_buffer(int cpu) available = get_slots(cpu_buf); for (i = 0; i < available; ++i) { - struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos]; - + struct op_sample *s = &cpu_buf->buffer[cpu_buf->tail_pos]; + if (is_code(s->eip)) { if (s->event <= CPU_IS_KERNEL) { /* kernel/userspace switch */ @@ -524,8 +590,18 @@ void sync_buffer(int cpu) } else if (s->event == CPU_TRACE_BEGIN) { state = sb_bt_start; add_trace_begin(); +#ifdef CONFIG_OPROFILE_IBS + } else if (s->event == IBS_FETCH_BEGIN) { + state = sb_bt_start; + add_ibs_begin(cpu_buf, + IBS_FETCH_CODE, in_kernel, mm); + } else if (s->event == IBS_OP_BEGIN) { + state = sb_bt_start; + add_ibs_begin(cpu_buf, + IBS_OP_CODE, in_kernel, mm); +#endif } else { - struct mm_struct * oldmm = mm; + struct mm_struct *oldmm = mm; /* userspace context switch */ new = (struct task_struct *)s->event; @@ -536,13 +612,11 @@ void sync_buffer(int cpu) cookie = get_exec_dcookie(mm); add_user_ctx_switch(new, cookie); } - } else { - if (state >= sb_bt_start && - !add_sample(mm, s, in_kernel)) { - if (state == sb_bt_start) { - state = sb_bt_ignore; - atomic_inc(&oprofile_stats.bt_lost_no_mapping); - } + } else if (state >= sb_bt_start && + !add_sample(mm, s, in_kernel)) { + if (state == sb_bt_start) { + state = sb_bt_ignore; + atomic_inc(&oprofile_stats.bt_lost_no_mapping); } }