/* PTRACE_SYSCALL is 24 */
 #define PTRACE_GETCRUNCHREGS   25
 #define PTRACE_SETCRUNCHREGS   26
+#define PTRACE_GETVFPREGS      27
+#define PTRACE_SETVFPREGS      28
 
 /*
  * PSR bits
 
 extern void iwmmxt_task_release(struct thread_info *);
 extern void iwmmxt_task_switch(struct thread_info *);
 
+extern void vfp_sync_state(struct thread_info *thread);
+
 #endif
 
 /*
 
 #define HOST_TEXT_START_ADDR (u.start_code)
 #define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
 
+/*
+ * User specific VFP registers. If only VFPv2 is present, registers 16 to 31
+ * are ignored by the ptrace system call.
+ */
+struct user_vfp {
+       unsigned long long fpregs[32];
+       unsigned long fpscr;
+};
+
 #endif /* _ARM_USER_H */
 
 }
 #endif
 
+#ifdef CONFIG_VFP
+/*
+ * Get the child VFP state.
+ */
+static int ptrace_getvfpregs(struct task_struct *tsk, void __user *data)
+{
+       struct thread_info *thread = task_thread_info(tsk);
+       union vfp_state *vfp = &thread->vfpstate;
+       struct user_vfp __user *ufp = data;
+
+       vfp_sync_state(thread);
+
+       /* copy the floating point registers */
+       if (copy_to_user(&ufp->fpregs, &vfp->hard.fpregs,
+                        sizeof(vfp->hard.fpregs)))
+               return -EFAULT;
+
+       /* copy the status and control register */
+       if (put_user(vfp->hard.fpscr, &ufp->fpscr))
+               return -EFAULT;
+
+       return 0;
+}
+
+/*
+ * Set the child VFP state.
+ */
+static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data)
+{
+       struct thread_info *thread = task_thread_info(tsk);
+       union vfp_state *vfp = &thread->vfpstate;
+       struct user_vfp __user *ufp = data;
+
+       vfp_sync_state(thread);
+
+       /* copy the floating point registers */
+       if (copy_from_user(&vfp->hard.fpregs, &ufp->fpregs,
+                          sizeof(vfp->hard.fpregs)))
+               return -EFAULT;
+
+       /* copy the status and control register */
+       if (get_user(vfp->hard.fpscr, &ufp->fpscr))
+               return -EFAULT;
+
+       return 0;
+}
+#endif
+
 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 {
        int ret;
                        break;
 #endif
 
+#ifdef CONFIG_VFP
+               case PTRACE_GETVFPREGS:
+                       ret = ptrace_getvfpregs(child, (void __user *)data);
+                       break;
+
+               case PTRACE_SETVFPREGS:
+                       ret = ptrace_setvfpregs(child, (void __user *)data);
+                       break;
+#endif
+
                default:
                        ret = ptrace_request(child, request, addr, data);
                        break;
 
        u32 flags;
 };
 
-#if defined(CONFIG_SMP) || defined(CONFIG_PM)
 extern void vfp_save_state(void *location, u32 fpexc);
-#endif
 
                                        @ retry the faulted instruction
 ENDPROC(vfp_support_entry)
 
-#if defined(CONFIG_SMP) || defined(CONFIG_PM)
 ENTRY(vfp_save_state)
        @ Save the current VFP state
        @ r0 - save location
        stmia   r0, {r1, r2, r3, r12}   @ save FPEXC, FPSCR, FPINST, FPINST2
        mov     pc, lr
 ENDPROC(vfp_save_state)
-#endif
 
 last_VFP_context_address:
        .word   last_VFP_context
 
 static inline void vfp_pm_init(void) { }
 #endif /* CONFIG_PM */
 
+/*
+ * Synchronise the hardware VFP state of a thread other than current with the
+ * saved one. This function is used by the ptrace mechanism.
+ */
+#ifdef CONFIG_SMP
+void vfp_sync_state(struct thread_info *thread)
+{
+       /*
+        * On SMP systems, the VFP state is automatically saved at every
+        * context switch. We mark the thread VFP state as belonging to a
+        * non-existent CPU so that the saved one will be reloaded when
+        * needed.
+        */
+       thread->vfpstate.hard.cpu = NR_CPUS;
+}
+#else
+void vfp_sync_state(struct thread_info *thread)
+{
+       unsigned int cpu = get_cpu();
+       u32 fpexc = fmrx(FPEXC);
+
+       /*
+        * If VFP is enabled, the previous state was already saved and
+        * last_VFP_context updated.
+        */
+       if (fpexc & FPEXC_EN)
+               goto out;
+
+       if (!last_VFP_context[cpu])
+               goto out;
+
+       /*
+        * Save the last VFP state on this CPU.
+        */
+       fmxr(FPEXC, fpexc | FPEXC_EN);
+       vfp_save_state(last_VFP_context[cpu], fpexc);
+       fmxr(FPEXC, fpexc);
+
+       /*
+        * Set the context to NULL to force a reload the next time the thread
+        * uses the VFP.
+        */
+       last_VFP_context[cpu] = NULL;
+
+out:
+       put_cpu();
+}
+#endif
+
 #include <linux/smp.h>
 
 /*