#ifndef __ASM_SH_FTRACE_H
 #define __ASM_SH_FTRACE_H
 
+#ifdef CONFIG_FUNCTION_TRACER
+
+#define MCOUNT_INSN_SIZE       4 /* sizeof mcount call */
+
 #ifndef __ASSEMBLY__
 extern void mcount(void);
+
+#define MCOUNT_ADDR            ((long)(mcount))
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+#define CALLER_ADDR            ((long)(ftrace_caller))
+#define STUB_ADDR              ((long)(ftrace_stub))
+
+#define MCOUNT_INSN_OFFSET     ((STUB_ADDR - CALLER_ADDR) >> 1)
+#endif
+
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+       /* 'addr' is the memory table address. */
+       return addr;
+}
 #endif
 
+#endif /* CONFIG_FUNCTION_TRACER */
+
 #endif /* __ASM_SH_FTRACE_H */
 
--- /dev/null
+/*
+ * Copyright (C) 2008 Matt Fleming <mjf@gentoo.org>
+ *
+ * Code for replacing ftrace calls with jumps.
+ *
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ *
+ * Thanks goes to Ingo Molnar, for suggesting the idea.
+ * Mathieu Desnoyers, for suggesting postponing the modifications.
+ * Arjan van de Ven, for keeping me straight, and explaining to me
+ * the dangers of modifying code on the run.
+ */
+#include <linux/uaccess.h>
+#include <linux/ftrace.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <asm/ftrace.h>
+#include <asm/cacheflush.h>
+
+static unsigned char ftrace_nop[] = {
+       0x09, 0x00,             /* nop */
+       0x09, 0x00,             /* nop */
+};
+
+static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE];
+
+unsigned char *ftrace_nop_replace(void)
+{
+       return ftrace_nop;
+}
+
+static int is_sh_nop(unsigned char *ip)
+{
+       return strncmp(ip, ftrace_nop, sizeof(ftrace_nop));
+}
+
+unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+{
+       /* Place the address in the memory table. */
+       if (addr == CALLER_ADDR)
+               __raw_writel(addr + MCOUNT_INSN_OFFSET, ftrace_replaced_code);
+       else
+               __raw_writel(addr, ftrace_replaced_code);
+
+       /*
+        * No locking needed, this must be called via kstop_machine
+        * which in essence is like running on a uniprocessor machine.
+        */
+       return ftrace_replaced_code;
+}
+
+int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
+                      unsigned char *new_code)
+{
+       unsigned char replaced[MCOUNT_INSN_SIZE];
+
+       /*
+        * Note: Due to modules and __init, code can
+        *  disappear and change, we need to protect against faulting
+        *  as well as code changing. We do this by using the
+        *  probe_kernel_* functions.
+        *
+        * No real locking needed, this code is run through
+        * kstop_machine, or before SMP starts.
+        */
+
+       /*
+        * If we're trying to nop out a call to a function, we instead
+        * place a call to the address after the memory table.
+        */
+       if (is_sh_nop(new_code) == 0)
+               __raw_writel(ip + MCOUNT_INSN_SIZE, (unsigned long)new_code);
+
+       /* read the text we want to modify */
+       if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
+               return -EFAULT;
+
+       /* Make sure it is what we expect it to be */
+       if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
+               return -EINVAL;
+
+       /* replace the text with the new text */
+       if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
+               return -EPERM;
+
+       flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
+
+       return 0;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+       unsigned long ip = (unsigned long)(&ftrace_call);
+       unsigned char old[MCOUNT_INSN_SIZE], *new;
+
+       memcpy(old, (unsigned char *)(ip + MCOUNT_INSN_OFFSET), MCOUNT_INSN_SIZE);
+       new = ftrace_call_replace(ip, (unsigned long)func);
+
+       return ftrace_modify_code(ip + MCOUNT_INSN_OFFSET, old, new);
+}
+
+int __init ftrace_dyn_arch_init(void *data)
+{
+       /* The return code is retured via data */
+       __raw_writel(0, (unsigned long)data);
+
+       return 0;
+}
 
--- /dev/null
+/*
+ * arch/sh/lib/mcount.S
+ *
+ *  Copyright (C) 2008  Paul Mundt
+ *  Copyright (C) 2008  Matt Fleming
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <asm/ftrace.h>
+
+#define MCOUNT_ENTER()         \
+       mov.l   r4, @-r15;      \
+       mov.l   r5, @-r15;      \
+       mov.l   r6, @-r15;      \
+       mov.l   r7, @-r15;      \
+       sts.l   pr, @-r15;      \
+                               \
+       mov.l   @(20,r15),r4;   \
+       sts     pr, r5
+
+#define MCOUNT_LEAVE()         \
+       lds.l   @r15+, pr;      \
+       mov.l   @r15+, r7;      \
+       mov.l   @r15+, r6;      \
+       mov.l   @r15+, r5;      \
+       rts;                    \
+        mov.l  @r15+, r4
+
+       .align 2
+       .globl  _mcount
+       .type   _mcount,@function
+       .globl  mcount
+       .type   mcount,@function
+_mcount:
+mcount:
+       MCOUNT_ENTER()
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+       .globl  mcount_call
+mcount_call:
+       mov.l   .Lftrace_stub, r6
+#else
+       mov.l   .Lftrace_trace_function, r6
+       mov.l   ftrace_stub, r7
+       cmp/eq  r6, r7
+       bt      skip_trace
+       mov.l   @r6, r6
+#endif
+
+       jsr     @r6
+        nop
+
+skip_trace:
+       MCOUNT_LEAVE()
+
+       .align 2
+.Lftrace_trace_function:
+       .long   ftrace_trace_function
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+       .globl ftrace_caller
+ftrace_caller:
+       MCOUNT_ENTER()
+
+       .globl ftrace_call
+ftrace_call:
+       mov.l   .Lftrace_stub, r6
+       jsr     @r6
+        nop
+
+       MCOUNT_LEAVE()
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+/*
+ * NOTE: From here on the locations of the .Lftrace_stub label and
+ * ftrace_stub itself are fixed. Adding additional data here will skew
+ * the displacement for the memory table and break the block replacement.
+ * Place new labels either after the ftrace_stub body, or before
+ * ftrace_caller. You have been warned.
+ */
+       .align 2
+.Lftrace_stub:
+       .long   ftrace_stub
+
+       .globl  ftrace_stub
+ftrace_stub:
+       rts
+        nop