#include <asm/nmi.h>
 
 
-
-#ifdef CONFIG_FUNCTION_RET_TRACER
-
-/*
- * These functions are picked from those used on
- * this page for dynamic ftrace. They have been
- * simplified to ignore all traces in NMI context.
- */
-static atomic_t in_nmi;
-
-void ftrace_nmi_enter(void)
-{
-       atomic_inc(&in_nmi);
-}
-
-void ftrace_nmi_exit(void)
-{
-       atomic_dec(&in_nmi);
-}
-
-/* Add a function return address to the trace stack on thread info.*/
-static int push_return_trace(unsigned long ret, unsigned long long time,
-                               unsigned long func)
-{
-       int index;
-       struct thread_info *ti = current_thread_info();
-
-       /* The return trace stack is full */
-       if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1)
-               return -EBUSY;
-
-       index = ++ti->curr_ret_stack;
-       barrier();
-       ti->ret_stack[index].ret = ret;
-       ti->ret_stack[index].func = func;
-       ti->ret_stack[index].calltime = time;
-
-       return 0;
-}
-
-/* Retrieve a function return address to the trace stack on thread info.*/
-static void pop_return_trace(unsigned long *ret, unsigned long long *time,
-                               unsigned long *func)
-{
-       int index;
-
-       struct thread_info *ti = current_thread_info();
-       index = ti->curr_ret_stack;
-       *ret = ti->ret_stack[index].ret;
-       *func = ti->ret_stack[index].func;
-       *time = ti->ret_stack[index].calltime;
-       ti->curr_ret_stack--;
-}
-
-/*
- * Send the trace to the ring-buffer.
- * @return the original return address.
- */
-unsigned long ftrace_return_to_handler(void)
-{
-       struct ftrace_retfunc trace;
-       pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
-       trace.rettime = cpu_clock(raw_smp_processor_id());
-       ftrace_function_return(&trace);
-
-       return trace.ret;
-}
-
-/*
- * Hook the return address and push it in the stack of return addrs
- * in current thread info.
- */
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
-{
-       unsigned long old;
-       unsigned long long calltime;
-       int faulted;
-       unsigned long return_hooker = (unsigned long)
-                               &return_to_handler;
-
-       /* Nmi's are currently unsupported */
-       if (atomic_read(&in_nmi))
-               return;
-
-       /*
-        * Protect against fault, even if it shouldn't
-        * happen. This tool is too much intrusive to
-        * ignore such a protection.
-        */
-       asm volatile(
-               "1: movl (%[parent_old]), %[old]\n"
-               "2: movl %[return_hooker], (%[parent_replaced])\n"
-               "   movl $0, %[faulted]\n"
-
-               ".section .fixup, \"ax\"\n"
-               "3: movl $1, %[faulted]\n"
-               ".previous\n"
-
-               ".section __ex_table, \"a\"\n"
-               "   .long 1b, 3b\n"
-               "   .long 2b, 3b\n"
-               ".previous\n"
-
-               : [parent_replaced] "=r" (parent), [old] "=r" (old),
-                 [faulted] "=r" (faulted)
-               : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
-               : "memory"
-       );
-
-       if (WARN_ON(faulted)) {
-               unregister_ftrace_return();
-               return;
-       }
-
-       if (WARN_ON(!__kernel_text_address(old))) {
-               unregister_ftrace_return();
-               *parent = old;
-               return;
-       }
-
-       calltime = cpu_clock(raw_smp_processor_id());
-
-       if (push_return_trace(old, calltime, self_addr) == -EBUSY)
-               *parent = old;
-}
-
-#endif
-
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 union ftrace_code_union {
        return 0;
 }
 #endif
+
+#ifdef CONFIG_FUNCTION_RET_TRACER
+
+#ifndef CONFIG_DYNAMIC_FTRACE
+
+/*
+ * These functions are picked from those used on
+ * this page for dynamic ftrace. They have been
+ * simplified to ignore all traces in NMI context.
+ */
+static atomic_t in_nmi;
+
+void ftrace_nmi_enter(void)
+{
+       atomic_inc(&in_nmi);
+}
+
+void ftrace_nmi_exit(void)
+{
+       atomic_dec(&in_nmi);
+}
+#endif /* !CONFIG_DYNAMIC_FTRACE */
+
+/* Add a function return address to the trace stack on thread info.*/
+static int push_return_trace(unsigned long ret, unsigned long long time,
+                               unsigned long func)
+{
+       int index;
+       struct thread_info *ti = current_thread_info();
+
+       /* The return trace stack is full */
+       if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1)
+               return -EBUSY;
+
+       index = ++ti->curr_ret_stack;
+       barrier();
+       ti->ret_stack[index].ret = ret;
+       ti->ret_stack[index].func = func;
+       ti->ret_stack[index].calltime = time;
+
+       return 0;
+}
+
+/* Retrieve a function return address to the trace stack on thread info.*/
+static void pop_return_trace(unsigned long *ret, unsigned long long *time,
+                               unsigned long *func)
+{
+       int index;
+
+       struct thread_info *ti = current_thread_info();
+       index = ti->curr_ret_stack;
+       *ret = ti->ret_stack[index].ret;
+       *func = ti->ret_stack[index].func;
+       *time = ti->ret_stack[index].calltime;
+       ti->curr_ret_stack--;
+}
+
+/*
+ * Send the trace to the ring-buffer.
+ * @return the original return address.
+ */
+unsigned long ftrace_return_to_handler(void)
+{
+       struct ftrace_retfunc trace;
+       pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
+       trace.rettime = cpu_clock(raw_smp_processor_id());
+       ftrace_function_return(&trace);
+
+       return trace.ret;
+}
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+{
+       unsigned long old;
+       unsigned long long calltime;
+       int faulted;
+       unsigned long return_hooker = (unsigned long)
+                               &return_to_handler;
+
+       /* Nmi's are currently unsupported */
+       if (atomic_read(&in_nmi))
+               return;
+
+       /*
+        * Protect against fault, even if it shouldn't
+        * happen. This tool is too much intrusive to
+        * ignore such a protection.
+        */
+       asm volatile(
+               "1: movl (%[parent_old]), %[old]\n"
+               "2: movl %[return_hooker], (%[parent_replaced])\n"
+               "   movl $0, %[faulted]\n"
+
+               ".section .fixup, \"ax\"\n"
+               "3: movl $1, %[faulted]\n"
+               ".previous\n"
+
+               ".section __ex_table, \"a\"\n"
+               "   .long 1b, 3b\n"
+               "   .long 2b, 3b\n"
+               ".previous\n"
+
+               : [parent_replaced] "=r" (parent), [old] "=r" (old),
+                 [faulted] "=r" (faulted)
+               : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
+               : "memory"
+       );
+
+       if (WARN_ON(faulted)) {
+               unregister_ftrace_return();
+               return;
+       }
+
+       if (WARN_ON(!__kernel_text_address(old))) {
+               unregister_ftrace_return();
+               *parent = old;
+               return;
+       }
+
+       calltime = cpu_clock(raw_smp_processor_id());
+
+       if (push_return_trace(old, calltime, self_addr) == -EBUSY)
+               *parent = old;
+}
+
+#endif /* CONFIG_FUNCTION_RET_TRACER */
 
 /* Quick disabling of function tracer. */
 int function_trace_stop;
 
+/* By default, current tracing type is normal tracing. */
+enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER;
+
 /*
  * ftrace_disabled is set when an anomaly is discovered.
  * ftrace_disabled is much stronger than ftrace_enabled.
        }
 }
 
-#define FTRACE_ADDR ((long)(ftrace_caller))
 
 static int
 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
 {
        unsigned long ip, fl;
+       unsigned long ftrace_addr;
+
+#ifdef CONFIG_FUNCTION_RET_TRACER
+       if (ftrace_tracing_type == FTRACE_TYPE_ENTER)
+               ftrace_addr = (unsigned long)ftrace_caller;
+       else
+               ftrace_addr = (unsigned long)ftrace_return_caller;
+#else
+       ftrace_addr = (unsigned long)ftrace_caller;
+#endif
 
        ip = rec->ip;
 
        }
 
        if (rec->flags & FTRACE_FL_ENABLED)
-               return ftrace_make_call(rec, FTRACE_ADDR);
+               return ftrace_make_call(rec, ftrace_addr);
        else
-               return ftrace_make_nop(NULL, rec, FTRACE_ADDR);
+               return ftrace_make_nop(NULL, rec, ftrace_addr);
 }
 
 static void ftrace_replace_code(int enable)
                return -1;
 
        mutex_lock(&ftrace_sysctl_lock);
+
+       if (ftrace_tracing_type == FTRACE_TYPE_RETURN) {
+               ret = -EBUSY;
+               goto out;
+       }
+
        ret = __register_ftrace_function(ops);
        ftrace_startup();
-       mutex_unlock(&ftrace_sysctl_lock);
 
+out:
+       mutex_unlock(&ftrace_sysctl_lock);
        return ret;
 }
 
 }
 
 #ifdef CONFIG_FUNCTION_RET_TRACER
+
+/* The callback that hooks the return of a function */
 trace_function_return_t ftrace_function_return =
                        (trace_function_return_t)ftrace_stub;
-void register_ftrace_return(trace_function_return_t func)
+
+int register_ftrace_return(trace_function_return_t func)
 {
+       int ret = 0;
+
+       mutex_lock(&ftrace_sysctl_lock);
+
+       /*
+        * Don't launch return tracing if normal function
+        * tracing is already running.
+        */
+       if (ftrace_trace_function != ftrace_stub) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       ftrace_tracing_type = FTRACE_TYPE_RETURN;
        ftrace_function_return = func;
+       ftrace_startup();
+
+out:
+       mutex_unlock(&ftrace_sysctl_lock);
+       return ret;
 }
 
 void unregister_ftrace_return(void)
 {
+       mutex_lock(&ftrace_sysctl_lock);
+
        ftrace_function_return = (trace_function_return_t)ftrace_stub;
+       ftrace_shutdown();
+       /* Restore normal tracing type */
+       ftrace_tracing_type = FTRACE_TYPE_ENTER;
+
+       mutex_unlock(&ftrace_sysctl_lock);
 }
 #endif