return ftrace_mod_jmp(ip, old_offset, new_offset);
  }
  
 -#else /* CONFIG_DYNAMIC_FTRACE */
 -
 -/*
 - * These functions are picked from those used on
 - * this page for dynamic ftrace. They have been
 - * simplified to ignore all traces in NMI context.
 - */
 -static atomic_t in_nmi;
 -
 -void ftrace_nmi_enter(void)
 -{
 -      atomic_inc(&in_nmi);
 -}
 -
 -void ftrace_nmi_exit(void)
 -{
 -      atomic_dec(&in_nmi);
 -}
 -
  #endif /* !CONFIG_DYNAMIC_FTRACE */
  
- /* Add a function return address to the trace stack on thread info.*/
- static int push_return_trace(unsigned long ret, unsigned long long time,
-                               unsigned long func, int *depth)
- {
-       int index;
- 
-       if (!current->ret_stack)
-               return -EBUSY;
- 
-       /* The return trace stack is full */
-       if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
-               atomic_inc(¤t->trace_overrun);
-               return -EBUSY;
-       }
- 
-       index = ++current->curr_ret_stack;
-       barrier();
-       current->ret_stack[index].ret = ret;
-       current->ret_stack[index].func = func;
-       current->ret_stack[index].calltime = time;
-       *depth = index;
- 
-       return 0;
- }
- 
- /* Retrieve a function return address to the trace stack on thread info.*/
- static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
- {
-       int index;
- 
-       index = current->curr_ret_stack;
- 
-       if (unlikely(index < 0)) {
-               ftrace_graph_stop();
-               WARN_ON(1);
-               /* Might as well panic, otherwise we have no where to go */
-               *ret = (unsigned long)panic;
-               return;
-       }
- 
-       *ret = current->ret_stack[index].ret;
-       trace->func = current->ret_stack[index].func;
-       trace->calltime = current->ret_stack[index].calltime;
-       trace->overrun = atomic_read(¤t->trace_overrun);
-       trace->depth = index;
-       barrier();
-       current->curr_ret_stack--;
- 
- }
- 
- /*
-  * Send the trace to the ring-buffer.
-  * @return the original return address.
-  */
- unsigned long ftrace_return_to_handler(void)
- {
-       struct ftrace_graph_ret trace;
-       unsigned long ret;
- 
-       pop_return_trace(&trace, &ret);
-       trace.rettime = cpu_clock(raw_smp_processor_id());
-       ftrace_graph_return(&trace);
- 
-       if (unlikely(!ret)) {
-               ftrace_graph_stop();
-               WARN_ON(1);
-               /* Might as well panic. What else to do? */
-               ret = (unsigned long)panic;
-       }
- 
-       return ret;
- }
- 
  /*
   * Hook the return address and push it in the stack of return addrs
   * in current thread info.
                return;
        }
  
 -      if (unlikely(!__kernel_text_address(old))) {
 -              ftrace_graph_stop();
 -              *parent = old;
 -              WARN_ON(1);
 -              return;
 -      }
 -
        calltime = cpu_clock(raw_smp_processor_id());
  
-       if (push_return_trace(old, calltime,
+       if (ftrace_push_return_trace(old, calltime,
                                self_addr, &trace.depth) == -EBUSY) {
                *parent = old;
                return;
 
  };
  
  /* pid on the last trace processed */
 -static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
 +
  
+ /* Add a function return address to the trace stack on thread info.*/
+ int
+ ftrace_push_return_trace(unsigned long ret, unsigned long long time,
+                        unsigned long func, int *depth)
+ {
+       int index;
+ 
+       if (!current->ret_stack)
+               return -EBUSY;
+ 
+       /* The return trace stack is full */
+       if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
+               atomic_inc(¤t->trace_overrun);
+               return -EBUSY;
+       }
+ 
+       index = ++current->curr_ret_stack;
+       barrier();
+       current->ret_stack[index].ret = ret;
+       current->ret_stack[index].func = func;
+       current->ret_stack[index].calltime = time;
+       *depth = index;
+ 
+       return 0;
+ }
+ 
+ /* Retrieve a function return address to the trace stack on thread info.*/
+ void
+ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
+ {
+       int index;
+ 
+       index = current->curr_ret_stack;
+ 
+       if (unlikely(index < 0)) {
+               ftrace_graph_stop();
+               WARN_ON(1);
+               /* Might as well panic, otherwise we have no where to go */
+               *ret = (unsigned long)panic;
+               return;
+       }
+ 
+       *ret = current->ret_stack[index].ret;
+       trace->func = current->ret_stack[index].func;
+       trace->calltime = current->ret_stack[index].calltime;
+       trace->overrun = atomic_read(¤t->trace_overrun);
+       trace->depth = index;
+       barrier();
+       current->curr_ret_stack--;
+ 
+ }
+ 
+ /*
+  * Send the trace to the ring-buffer.
+  * @return the original return address.
+  */
+ unsigned long ftrace_return_to_handler(void)
+ {
+       struct ftrace_graph_ret trace;
+       unsigned long ret;
+ 
+       ftrace_pop_return_trace(&trace, &ret);
+       trace.rettime = cpu_clock(raw_smp_processor_id());
+       ftrace_graph_return(&trace);
+ 
+       if (unlikely(!ret)) {
+               ftrace_graph_stop();
+               WARN_ON(1);
+               /* Might as well panic. What else to do? */
+               ret = (unsigned long)panic;
+       }
+ 
+       return ret;
+ }
+ 
  static int graph_trace_init(struct trace_array *tr)
  {
 -      int cpu, ret;
 -
 -      for_each_online_cpu(cpu)
 -              tracing_reset(tr, cpu);
 -
 -      ret = register_ftrace_graph(&trace_graph_return,
 +      int ret = register_ftrace_graph(&trace_graph_return,
                                        &trace_graph_entry);
        if (ret)
                return ret;