trace_seq_init(s);
 }
 
-void ftrace_dump(void)
+static void __ftrace_dump(bool disable_tracing)
 {
        static DEFINE_SPINLOCK(ftrace_dump_lock);
        /* use static because iter can be a bit big for the stack */
        static struct trace_iterator iter;
+       unsigned int old_userobj;
        static int dump_ran;
        unsigned long flags;
        int cnt = 0, cpu;
 
        dump_ran = 1;
 
-       /* No turning back! */
        tracing_off();
-       ftrace_kill();
+
+       if (disable_tracing)
+               ftrace_kill();
 
        for_each_tracing_cpu(cpu) {
                atomic_inc(&global_trace.data[cpu]->disabled);
        }
 
+       old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
+
        /* don't look at user memory in panic mode */
        trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
 
        else
                printk(KERN_TRACE "---------------------------------\n");
 
+       /* Re-enable tracing if requested */
+       if (!disable_tracing) {
+               trace_flags |= old_userobj;
+
+               for_each_tracing_cpu(cpu) {
+                       atomic_dec(&global_trace.data[cpu]->disabled);
+               }
+               tracing_on();
+       }
+
  out:
        spin_unlock_irqrestore(&ftrace_dump_lock, flags);
 }
 
+/* By default: disable tracing after the dump */
+void ftrace_dump(void)
+{
+       __ftrace_dump(true);
+}
+
 __init static int tracer_alloc_buffers(void)
 {
        struct trace_array_cpu *data;
 
 
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+/* Maximum number of functions to trace before diagnosing a hang */
+#define GRAPH_MAX_FUNC_TEST    100000000
+
+static void __ftrace_dump(bool disable_tracing);
+static unsigned int graph_hang_thresh;
+
+/* Wrap the real function entry probe to avoid possible hanging */
+static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
+{
+       /* This is harmlessly racy, we want to approximately detect a hang */
+       if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
+               ftrace_graph_stop();
+               printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
+               if (ftrace_dump_on_oops)
+                       __ftrace_dump(false);
+               return 0;
+       }
+
+       return trace_graph_entry(trace);
+}
+
 /*
  * Pretty much the same than for the function tracer from which the selftest
  * has been borrowed.
        int ret;
        unsigned long count;
 
-       ret = tracer_init(trace, tr);
+       /*
+        * Simulate the init() callback but we attach a watchdog callback
+        * to detect and recover from possible hangs
+        */
+       tracing_reset_online_cpus(tr);
+       ret = register_ftrace_graph(&trace_graph_return,
+                                   &trace_graph_entry_watchdog);
        if (ret) {
                warn_failed_init_tracer(trace, ret);
                goto out;
        }
+       tracing_start_cmdline_record();
 
        /* Sleep for a 1/10 of a second */
        msleep(100);
 
+       /* Have we just recovered from a hang? */
+       if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
+               trace->reset(tr);
+               ret = -1;
+               goto out;
+       }
+
        tracing_stop();
 
        /* check the trace buffer */