/*
* trace stack traces
*
+ * Copyright (C) 2004-2008, Soeren Sandmann
* Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
- * Copyright (C) 2004, 2005, Soeren Sandmann
*/
#include <linux/kallsyms.h>
#include <linux/debugfs.h>
#include <linux/irq.h>
#include <linux/fs.h>
+#include <asm/stacktrace.h>
+
#include "trace.h"
static struct trace_array *sysprof_trace;
return ret;
}
+struct backtrace_info {
+ struct trace_array_cpu *data;
+ struct trace_array *tr;
+ int pos;
+};
+
+static void
+backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
+{
+ /* Ignore warnings */
+}
+
+static void backtrace_warning(void *data, char *msg)
+{
+ /* Ignore warnings */
+}
+
+static int backtrace_stack(void *data, char *name)
+{
+ /* Don't bother with IRQ stacks for now */
+ return -1;
+}
+
+static void backtrace_address(void *data, unsigned long addr, int reliable)
+{
+ struct backtrace_info *info = data;
+
+ if (info->pos < sample_max_depth && reliable) {
+ __trace_special(info->tr, info->data, 1, addr, 0);
+
+ info->pos++;
+ }
+}
+
+const static struct stacktrace_ops backtrace_ops = {
+ .warning = backtrace_warning,
+ .warning_symbol = backtrace_warning_symbol,
+ .stack = backtrace_stack,
+ .address = backtrace_address,
+};
+
+static int
+trace_kernel(struct pt_regs *regs, struct trace_array *tr,
+ struct trace_array_cpu *data)
+{
+ struct backtrace_info info;
+ unsigned long bp;
+ char *stack;
+
+ info.tr = tr;
+ info.data = data;
+ info.pos = 1;
+
+ __trace_special(info.tr, info.data, 1, regs->ip, 0);
+
+ stack = ((char *)regs + sizeof(struct pt_regs));
+#ifdef CONFIG_FRAME_POINTER
+ bp = regs->bp;
+#else
+ bp = 0;
+#endif
+
+ dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, &info);
+
+ return info.pos;
+}
+
static void timer_notify(struct pt_regs *regs, int cpu)
{
struct trace_array_cpu *data;
if (is_user && current->state != TASK_RUNNING)
return;
- if (!is_user) {
- /* kernel */
- ftrace(tr, data, current->pid, 1, 0);
- return;
+ __trace_special(tr, data, 0, 0, current->pid);
- }
+ if (!is_user)
+ i = trace_kernel(regs, tr, data);
+ else
+ i = 0;
- __trace_special(tr, data, 0, current->pid, regs->ip);
+ /*
+ * Trace user stack if we are not a kernel thread
+ */
+ if (current->mm && i < sample_max_depth) {
+ regs = (struct pt_regs *)current->thread.sp0 - 1;
- fp = (void __user *)regs->bp;
+ fp = (void __user *)regs->bp;
- for (i = 0; i < sample_max_depth; i++) {
- frame.next_fp = 0;
- frame.return_address = 0;
- if (!copy_stack_frame(fp, &frame))
- break;
- if ((unsigned long)fp < regs->sp)
- break;
+ __trace_special(tr, data, 2, regs->ip, 0);
- __trace_special(tr, data, 1, frame.return_address,
- (unsigned long)fp);
- fp = frame.next_fp;
- }
+ while (i < sample_max_depth) {
+ frame.next_fp = 0;
+ frame.return_address = 0;
+ if (!copy_stack_frame(fp, &frame))
+ break;
+ if ((unsigned long)fp < regs->sp)
+ break;
- __trace_special(tr, data, 2, current->pid, i);
+ __trace_special(tr, data, 2, frame.return_address,
+ (unsigned long)fp);
+ fp = frame.next_fp;
+
+ i++;
+ }
+
+ }
/*
* Special trace entry if we overflow the max depth:
*/
if (i == sample_max_depth)
__trace_special(tr, data, -1, -1, -1);
+
+ __trace_special(tr, data, 3, current->pid, i);
}
static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)