int ret;
        va_list args;
        va_start(args, fmt);
-       ret = trace_vprintk(0, fmt, args);
+       ret = trace_vprintk(0, -1, fmt, args);
        va_end(args);
        return ret;
 }
        return 0;
 }
 
-int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
+int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
 {
-       static DEFINE_SPINLOCK(trace_buf_lock);
+       /*
+        * Raw Spinlock because a normal spinlock would be traced here
+        * and append an irrelevant couple spin_lock_irqsave/
+        * spin_unlock_irqrestore traced by ftrace around this
+        * TRACE_PRINTK trace.
+        */
+       static raw_spinlock_t trace_buf_lock =
+                               (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
        static char trace_buf[TRACE_BUF_SIZE];
 
        struct ring_buffer_event *event;
        if (unlikely(atomic_read(&data->disabled)))
                goto out;
 
-       spin_lock_irqsave(&trace_buf_lock, flags);
+       local_irq_save(flags);
+       __raw_spin_lock(&trace_buf_lock);
        len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
 
        len = min(len, TRACE_BUF_SIZE-1);
        tracing_generic_entry_update(&entry->ent, flags, pc);
        entry->ent.type                 = TRACE_PRINT;
        entry->ip                       = ip;
+       entry->depth                    = depth;
 
        memcpy(&entry->buf, trace_buf, len);
        entry->buf[len] = 0;
        ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
 
  out_unlock:
-       spin_unlock_irqrestore(&trace_buf_lock, flags);
+       __raw_spin_unlock(&trace_buf_lock);
+       local_irq_restore(flags);
 
  out:
        preempt_enable_notrace();
                return 0;
 
        va_start(ap, fmt);
-       ret = trace_vprintk(ip, fmt, ap);
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       ret = trace_vprintk(ip, current->curr_ret_stack, fmt, ap);
+#else
+       ret = trace_vprintk(ip, -1, fmt, ap);
+#endif
+
        va_end(ap);
        return ret;
 }
 
 struct print_entry {
        struct trace_entry      ent;
        unsigned long           ip;
+       int                     depth;
        char                    buf[];
 };
 
 extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
                                 size_t cnt);
 extern long ns2usecs(cycle_t nsec);
-extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
+extern int
+trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args);
 
 extern unsigned long trace_flags;
 
 
 
  */
        ret = trace_seq_printf(s,
-               "\n ------------------------------------------\n |");
+               " ------------------------------------------\n");
        if (!ret)
                TRACE_TYPE_PARTIAL_LINE;
 
        return TRACE_TYPE_HANDLED;
 }
 
+static enum print_line_t
+print_graph_comment(struct print_entry *trace, struct trace_seq *s,
+                  struct trace_entry *ent, struct trace_iterator *iter)
+{
+       int i;
+       int ret;
+
+       /* Pid */
+       if (verif_pid(s, ent->pid, iter->cpu) == TRACE_TYPE_PARTIAL_LINE)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Cpu */
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
+               ret = print_graph_cpu(s, iter->cpu);
+               if (ret == TRACE_TYPE_PARTIAL_LINE)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
+
+       /* Proc */
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
+               ret = print_graph_proc(s, ent->pid);
+               if (ret == TRACE_TYPE_PARTIAL_LINE)
+                       return TRACE_TYPE_PARTIAL_LINE;
+
+               ret = trace_seq_printf(s, " | ");
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
+
+       /* No overhead */
+       if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
+               ret = trace_seq_printf(s, "  ");
+               if (!ret)
+                       return TRACE_TYPE_PARTIAL_LINE;
+       }
+
+       /* No time */
+       ret = trace_seq_printf(s, "            |  ");
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Indentation */
+       if (trace->depth > 0)
+               for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) {
+                       ret = trace_seq_printf(s, " ");
+                       if (!ret)
+                               return TRACE_TYPE_PARTIAL_LINE;
+               }
+
+       /* The comment */
+       ret = trace_seq_printf(s, "/* %s", trace->buf);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       if (ent->flags & TRACE_FLAG_CONT)
+               trace_seq_print_cont(s, iter);
+
+       ret = trace_seq_printf(s, " */\n");
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+
 enum print_line_t
 print_graph_function(struct trace_iterator *iter)
 {
                trace_assign_type(field, entry);
                return print_graph_return(&field->ret, s, entry, iter->cpu);
        }
+       case TRACE_PRINT: {
+               struct print_entry *field;
+               trace_assign_type(field, entry);
+               return print_graph_comment(field, s, entry, iter);
+       }
        default:
                return TRACE_TYPE_UNHANDLED;
        }
 
 
 int mmio_trace_printk(const char *fmt, va_list args)
 {
-       return trace_vprintk(0, fmt, args);
+       return trace_vprintk(0, -1, fmt, args);
 }