/* Add a function return address to the trace stack on thread info.*/
 static int push_return_trace(unsigned long ret, unsigned long long time,
-                               unsigned long func)
+                               unsigned long func, int *depth)
 {
        int index;
 
        current->ret_stack[index].ret = ret;
        current->ret_stack[index].func = func;
        current->ret_stack[index].calltime = time;
+       *depth = index;
 
        return 0;
 }
 
 /* Retrieve a function return address to the trace stack on thread info.*/
-static void pop_return_trace(unsigned long *ret, unsigned long long *time,
-                               unsigned long *func, unsigned long *overrun)
+static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
 {
        int index;
 
        index = current->curr_ret_stack;
        *ret = current->ret_stack[index].ret;
-       *func = current->ret_stack[index].func;
-       *time = current->ret_stack[index].calltime;
-       *overrun = atomic_read(¤t->trace_overrun);
+       trace->func = current->ret_stack[index].func;
+       trace->calltime = current->ret_stack[index].calltime;
+       trace->overrun = atomic_read(¤t->trace_overrun);
+       trace->depth = index;
        current->curr_ret_stack--;
 }
 
 unsigned long ftrace_return_to_handler(void)
 {
        struct ftrace_graph_ret trace;
-       pop_return_trace(&trace.ret, &trace.calltime, &trace.func,
-                       &trace.overrun);
+       unsigned long ret;
+
+       pop_return_trace(&trace, &ret);
        trace.rettime = cpu_clock(raw_smp_processor_id());
-       ftrace_graph_function(&trace);
+       ftrace_graph_return(&trace);
 
-       return trace.ret;
+       return ret;
 }
 
 /*
        unsigned long old;
        unsigned long long calltime;
        int faulted;
+       struct ftrace_graph_ent trace;
        unsigned long return_hooker = (unsigned long)
                                &return_to_handler;
 
 
        calltime = cpu_clock(raw_smp_processor_id());
 
-       if (push_return_trace(old, calltime, self_addr) == -EBUSY)
+       if (push_return_trace(old, calltime,
+                               self_addr, &trace.depth) == -EBUSY) {
                *parent = old;
+               return;
+       }
+
+       trace.func = self_addr;
+       ftrace_graph_entry(&trace);
+
 }
 
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
 #endif
 
 
+/*
+ * Structure that defines an entry function trace.
+ */
+struct ftrace_graph_ent {
+       unsigned long func; /* Current function */
+       int depth;
+};
+
 /*
  * Structure that defines a return function trace.
  */
 struct ftrace_graph_ret {
-       unsigned long ret; /* Return address */
        unsigned long func; /* Current function */
        unsigned long long calltime;
        unsigned long long rettime;
        /* Number of functions that overran the depth limit for current task */
        unsigned long overrun;
+       int depth;
 };
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #define FTRACE_RETFUNC_DEPTH 50
 #define FTRACE_RETSTACK_ALLOC_SIZE 32
-/* Type of a callback handler of tracing return function */
-typedef void (*trace_function_graph_t)(struct ftrace_graph_ret *);
+/* Type of the callback handlers for tracing function graph*/
+typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
+typedef void (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
+
+extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+                               trace_func_graph_ent_t entryfunc);
+
+/* The current handlers in use */
+extern trace_func_graph_ret_t ftrace_graph_return;
+extern trace_func_graph_ent_t ftrace_graph_entry;
 
-extern int register_ftrace_graph(trace_function_graph_t func);
-/* The current handler in use */
-extern trace_function_graph_t ftrace_graph_function;
 extern void unregister_ftrace_graph(void);
 
 extern void ftrace_graph_init_task(struct task_struct *t);
 
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
-static atomic_t ftrace_retfunc_active;
-
-/* The callback that hooks the return of a function */
-trace_function_graph_t ftrace_graph_function =
-                       (trace_function_graph_t)ftrace_stub;
+static atomic_t ftrace_graph_active;
 
+/* The callbacks that hook a function */
+trace_func_graph_ret_t ftrace_graph_return =
+                       (trace_func_graph_ret_t)ftrace_stub;
+trace_func_graph_ent_t ftrace_graph_entry =
+                       (trace_func_graph_ent_t)ftrace_stub;
 
 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
        return ret;
 }
 
-int register_ftrace_graph(trace_function_graph_t func)
+int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+                       trace_func_graph_ent_t entryfunc)
 {
        int ret = 0;
 
                ret = -EBUSY;
                goto out;
        }
-       atomic_inc(&ftrace_retfunc_active);
+       atomic_inc(&ftrace_graph_active);
        ret = start_graph_tracing();
        if (ret) {
-               atomic_dec(&ftrace_retfunc_active);
+               atomic_dec(&ftrace_graph_active);
                goto out;
        }
        ftrace_tracing_type = FTRACE_TYPE_RETURN;
-       ftrace_graph_function = func;
+       ftrace_graph_return = retfunc;
+       ftrace_graph_entry = entryfunc;
        ftrace_startup();
 
 out:
 {
        mutex_lock(&ftrace_sysctl_lock);
 
-       atomic_dec(&ftrace_retfunc_active);
-       ftrace_graph_function = (trace_function_graph_t)ftrace_stub;
+       atomic_dec(&ftrace_graph_active);
+       ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
+       ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub;
        ftrace_shutdown();
        /* Restore normal tracing type */
        ftrace_tracing_type = FTRACE_TYPE_ENTER;
 /* Allocate a return stack for newly created task */
 void ftrace_graph_init_task(struct task_struct *t)
 {
-       if (atomic_read(&ftrace_retfunc_active)) {
+       if (atomic_read(&ftrace_graph_active)) {
                t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
                                * sizeof(struct ftrace_ret_stack),
                                GFP_KERNEL);
 }
 #endif
 
-
-
 
 }
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-static void __trace_function_graph(struct trace_array *tr,
+static void __trace_graph_entry(struct trace_array *tr,
+                               struct trace_array_cpu *data,
+                               struct ftrace_graph_ent *trace,
+                               unsigned long flags,
+                               int pc)
+{
+       struct ring_buffer_event *event;
+       struct ftrace_graph_ent_entry *entry;
+       unsigned long irq_flags;
+
+       if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
+               return;
+
+       event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry),
+                                        &irq_flags);
+       if (!event)
+               return;
+       entry   = ring_buffer_event_data(event);
+       tracing_generic_entry_update(&entry->ent, flags, pc);
+       entry->ent.type                 = TRACE_GRAPH_ENT;
+       entry->graph_ent                        = *trace;
+       ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
+}
+
+static void __trace_graph_return(struct trace_array *tr,
                                struct trace_array_cpu *data,
                                struct ftrace_graph_ret *trace,
                                unsigned long flags,
                                int pc)
 {
        struct ring_buffer_event *event;
-       struct ftrace_graph_entry *entry;
+       struct ftrace_graph_ret_entry *entry;
        unsigned long irq_flags;
 
        if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
                return;
        entry   = ring_buffer_event_data(event);
        tracing_generic_entry_update(&entry->ent, flags, pc);
-       entry->ent.type                 = TRACE_FN_RET;
-       entry->ip                       = trace->func;
-       entry->parent_ip        = trace->ret;
-       entry->rettime          = trace->rettime;
-       entry->calltime         = trace->calltime;
-       entry->overrun          = trace->overrun;
+       entry->ent.type                 = TRACE_GRAPH_RET;
+       entry->ret                              = *trace;
        ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
 }
 #endif
 }
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-void trace_function_graph(struct ftrace_graph_ret *trace)
+void trace_graph_entry(struct ftrace_graph_ent *trace)
 {
        struct trace_array *tr = &global_trace;
        struct trace_array_cpu *data;
        disabled = atomic_inc_return(&data->disabled);
        if (likely(disabled == 1)) {
                pc = preempt_count();
-               __trace_function_graph(tr, data, trace, flags, pc);
+               __trace_graph_entry(tr, data, trace, flags, pc);
+       }
+       atomic_dec(&data->disabled);
+       raw_local_irq_restore(flags);
+}
+
+void trace_graph_return(struct ftrace_graph_ret *trace)
+{
+       struct trace_array *tr = &global_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int cpu;
+       int pc;
+
+       raw_local_irq_save(flags);
+       cpu = raw_smp_processor_id();
+       data = tr->data[cpu];
+       disabled = atomic_inc_return(&data->disabled);
+       if (likely(disabled == 1)) {
+               pc = preempt_count();
+               __trace_graph_return(tr, data, trace, flags, pc);
        }
        atomic_dec(&data->disabled);
        raw_local_irq_restore(flags);
                        trace_seq_print_cont(s, iter);
                break;
        }
-       case TRACE_FN_RET: {
+       case TRACE_GRAPH_RET: {
+               return print_graph_function(iter);
+       }
+       case TRACE_GRAPH_ENT: {
                return print_graph_function(iter);
-               break;
        }
        case TRACE_BRANCH: {
                struct trace_branch *field;
 
        TRACE_BRANCH,
        TRACE_BOOT_CALL,
        TRACE_BOOT_RET,
-       TRACE_FN_RET,
+       TRACE_GRAPH_RET,
+       TRACE_GRAPH_ENT,
        TRACE_USER_STACK,
        TRACE_BTS,
 
        unsigned long           parent_ip;
 };
 
+/* Function call entry */
+struct ftrace_graph_ent_entry {
+       struct trace_entry                      ent;
+       struct ftrace_graph_ent         graph_ent;
+};
+
 /* Function return entry */
-struct ftrace_graph_entry {
-       struct trace_entry      ent;
-       unsigned long           ip;
-       unsigned long           parent_ip;
-       unsigned long long      calltime;
-       unsigned long long      rettime;
-       unsigned long           overrun;
+struct ftrace_graph_ret_entry {
+       struct trace_entry                      ent;
+       struct ftrace_graph_ret         ret;
 };
 extern struct tracer boot_tracer;
 
                IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
                IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
                IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
-               IF_ASSIGN(var, ent, struct ftrace_graph_entry, TRACE_FN_RET);\
+               IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry,      \
+                         TRACE_GRAPH_ENT);             \
+               IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,      \
+                         TRACE_GRAPH_RET);             \
                IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\
                __ftrace_bad_type();                                    \
        } while (0)
                    unsigned long ip,
                    unsigned long parent_ip,
                    unsigned long flags, int pc);
-void
-trace_function_graph(struct ftrace_graph_ret *trace);
 
+void trace_graph_return(struct ftrace_graph_ret *trace);
+void trace_graph_entry(struct ftrace_graph_ent *trace);
 void trace_bts(struct trace_array *tr,
               unsigned long from,
               unsigned long to);
 
 
 #include "trace.h"
 
+#define TRACE_GRAPH_INDENT     2
 
 #define TRACE_GRAPH_PRINT_OVERRUN      0x1
 static struct tracer_opt trace_opts[] = {
        .opts = trace_opts
 };
 
+/* pid on the last trace processed */
+static pid_t last_pid = -1;
 
 static int graph_trace_init(struct trace_array *tr)
 {
        for_each_online_cpu(cpu)
                tracing_reset(tr, cpu);
 
-       return register_ftrace_graph(&trace_function_graph);
+       return register_ftrace_graph(&trace_graph_return,
+                                       &trace_graph_entry);
 }
 
 static void graph_trace_reset(struct trace_array *tr)
                unregister_ftrace_graph();
 }
 
+/* If the pid changed since the last trace, output this event */
+static int verif_pid(struct trace_seq *s, pid_t pid)
+{
+       if (last_pid != -1 && last_pid == pid)
+               return 1;
 
-enum print_line_t
-print_graph_function(struct trace_iterator *iter)
+       last_pid = pid;
+       return trace_seq_printf(s, "\n------------8<---------- thread %d"
+                                   " ------------8<----------\n\n",
+                                 pid);
+}
+
+static enum print_line_t
+print_graph_entry(struct ftrace_graph_ent *call, struct trace_seq *s,
+               struct trace_entry *ent)
 {
-       struct trace_seq *s = &iter->seq;
-       struct trace_entry *entry = iter->ent;
-       struct ftrace_graph_entry *field;
+       int i;
        int ret;
 
-       if (entry->type == TRACE_FN_RET) {
-               trace_assign_type(field, entry);
-               ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
+       if (!verif_pid(s, ent->pid))
+               return TRACE_TYPE_PARTIAL_LINE;
 
-               ret = seq_print_ip_sym(s, field->ip,
-                                       trace_flags & TRACE_ITER_SYM_MASK);
+       for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
+               ret = trace_seq_printf(s, " ");
                if (!ret)
                        return TRACE_TYPE_PARTIAL_LINE;
+       }
+
+       ret = seq_print_ip_sym(s, call->func, 0);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       ret = trace_seq_printf(s, "() {\n");
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t
+print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
+                  struct trace_entry *ent)
+{
+       int i;
+       int ret;
+
+       if (!verif_pid(s, ent->pid))
+               return TRACE_TYPE_PARTIAL_LINE;
 
-               ret = trace_seq_printf(s, " (%llu ns)",
-                                       field->rettime - field->calltime);
+       for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
+               ret = trace_seq_printf(s, " ");
                if (!ret)
                        return TRACE_TYPE_PARTIAL_LINE;
+       }
+
+       ret = trace_seq_printf(s, "} ");
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
 
-               if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
-                       ret = trace_seq_printf(s, " (Overruns: %lu)",
-                                               field->overrun);
-                       if (!ret)
-                               return TRACE_TYPE_PARTIAL_LINE;
-               }
+       ret = trace_seq_printf(s, "%llu\n", trace->rettime - trace->calltime);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
 
-               ret = trace_seq_printf(s, "\n");
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
+               ret = trace_seq_printf(s, " (Overruns: %lu)\n",
+                                       trace->overrun);
                if (!ret)
                        return TRACE_TYPE_PARTIAL_LINE;
+       }
+       return TRACE_TYPE_HANDLED;
+}
+
+enum print_line_t
+print_graph_function(struct trace_iterator *iter)
+{
+       struct trace_seq *s = &iter->seq;
+       struct trace_entry *entry = iter->ent;
 
-               return TRACE_TYPE_HANDLED;
+       switch (entry->type) {
+       case TRACE_GRAPH_ENT: {
+               struct ftrace_graph_ent_entry *field;
+               trace_assign_type(field, entry);
+               return print_graph_entry(&field->graph_ent, s, entry);
+       }
+       case TRACE_GRAPH_RET: {
+               struct ftrace_graph_ret_entry *field;
+               trace_assign_type(field, entry);
+               return print_graph_return(&field->ret, s, entry);
+       }
+       default:
+               return TRACE_TYPE_UNHANDLED;
        }
-       return TRACE_TYPE_UNHANDLED;
 }
 
 static struct tracer graph_trace __read_mostly = {
 
+++ /dev/null
-/*
- *
- * Function return tracer.
- * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
- * Mostly borrowed from function tracer which
- * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
- *
- */
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/ftrace.h>
-#include <linux/fs.h>
-
-#include "trace.h"
-
-
-#define TRACE_RETURN_PRINT_OVERRUN     0x1
-static struct tracer_opt trace_opts[] = {
-       /* Display overruns or not */
-       { TRACER_OPT(overrun, TRACE_RETURN_PRINT_OVERRUN) },
-       { } /* Empty entry */
-};
-
-static struct tracer_flags tracer_flags = {
-       .val = 0, /* Don't display overruns by default */
-       .opts = trace_opts
-};
-
-
-static int return_trace_init(struct trace_array *tr)
-{
-       int cpu;
-       for_each_online_cpu(cpu)
-               tracing_reset(tr, cpu);
-
-       return register_ftrace_return(&trace_function_return);
-}
-
-static void return_trace_reset(struct trace_array *tr)
-{
-               unregister_ftrace_return();
-}
-
-
-enum print_line_t
-print_return_function(struct trace_iterator *iter)
-{
-       struct trace_seq *s = &iter->seq;
-       struct trace_entry *entry = iter->ent;
-       struct ftrace_ret_entry *field;
-       int ret;
-
-       if (entry->type == TRACE_FN_RET) {
-               trace_assign_type(field, entry);
-               ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-
-               ret = seq_print_ip_sym(s, field->ip,
-                                       trace_flags & TRACE_ITER_SYM_MASK);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-
-               ret = trace_seq_printf(s, " (%llu ns)",
-                                       field->rettime - field->calltime);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-
-               if (tracer_flags.val & TRACE_RETURN_PRINT_OVERRUN) {
-                       ret = trace_seq_printf(s, " (Overruns: %lu)",
-                                               field->overrun);
-                       if (!ret)
-                               return TRACE_TYPE_PARTIAL_LINE;
-               }
-
-               ret = trace_seq_printf(s, "\n");
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-
-               return TRACE_TYPE_HANDLED;
-       }
-       return TRACE_TYPE_UNHANDLED;
-}
-
-static struct tracer return_trace __read_mostly = {
-       .name        = "return",
-       .init        = return_trace_init,
-       .reset       = return_trace_reset,
-       .print_line = print_return_function,
-       .flags          = &tracer_flags,
-};
-
-static __init int init_return_trace(void)
-{
-       return register_tracer(&return_trace);
-}
-
-device_initcall(init_return_trace);