]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - kernel/trace/ftrace.c
ftrace: store mcount address in rec->ip
[linux-2.6-omap-h63xx.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/ftrace.h>
25 #include <linux/sysctl.h>
26 #include <linux/ctype.h>
27 #include <linux/hash.h>
28 #include <linux/list.h>
29
30 #include <asm/ftrace.h>
31
32 #include "trace.h"
33
34 /* ftrace_enabled is a method to turn ftrace on or off */
35 int ftrace_enabled __read_mostly;
36 static int last_ftrace_enabled;
37
38 /*
39  * ftrace_disabled is set when an anomaly is discovered.
40  * ftrace_disabled is much stronger than ftrace_enabled.
41  */
42 static int ftrace_disabled __read_mostly;
43
44 static DEFINE_SPINLOCK(ftrace_lock);
45 static DEFINE_MUTEX(ftrace_sysctl_lock);
46
47 static struct ftrace_ops ftrace_list_end __read_mostly =
48 {
49         .func = ftrace_stub,
50 };
51
52 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
53 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
54
55 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
56 {
57         struct ftrace_ops *op = ftrace_list;
58
59         /* in case someone actually ports this to alpha! */
60         read_barrier_depends();
61
62         while (op != &ftrace_list_end) {
63                 /* silly alpha */
64                 read_barrier_depends();
65                 op->func(ip, parent_ip);
66                 op = op->next;
67         };
68 }
69
70 /**
71  * clear_ftrace_function - reset the ftrace function
72  *
73  * This NULLs the ftrace function and in essence stops
74  * tracing.  There may be lag
75  */
76 void clear_ftrace_function(void)
77 {
78         ftrace_trace_function = ftrace_stub;
79 }
80
81 static int __register_ftrace_function(struct ftrace_ops *ops)
82 {
83         /* Should never be called by interrupts */
84         spin_lock(&ftrace_lock);
85
86         ops->next = ftrace_list;
87         /*
88          * We are entering ops into the ftrace_list but another
89          * CPU might be walking that list. We need to make sure
90          * the ops->next pointer is valid before another CPU sees
91          * the ops pointer included into the ftrace_list.
92          */
93         smp_wmb();
94         ftrace_list = ops;
95
96         if (ftrace_enabled) {
97                 /*
98                  * For one func, simply call it directly.
99                  * For more than one func, call the chain.
100                  */
101                 if (ops->next == &ftrace_list_end)
102                         ftrace_trace_function = ops->func;
103                 else
104                         ftrace_trace_function = ftrace_list_func;
105         }
106
107         spin_unlock(&ftrace_lock);
108
109         return 0;
110 }
111
112 static int __unregister_ftrace_function(struct ftrace_ops *ops)
113 {
114         struct ftrace_ops **p;
115         int ret = 0;
116
117         spin_lock(&ftrace_lock);
118
119         /*
120          * If we are removing the last function, then simply point
121          * to the ftrace_stub.
122          */
123         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
124                 ftrace_trace_function = ftrace_stub;
125                 ftrace_list = &ftrace_list_end;
126                 goto out;
127         }
128
129         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
130                 if (*p == ops)
131                         break;
132
133         if (*p != ops) {
134                 ret = -1;
135                 goto out;
136         }
137
138         *p = (*p)->next;
139
140         if (ftrace_enabled) {
141                 /* If we only have one func left, then call that directly */
142                 if (ftrace_list == &ftrace_list_end ||
143                     ftrace_list->next == &ftrace_list_end)
144                         ftrace_trace_function = ftrace_list->func;
145         }
146
147  out:
148         spin_unlock(&ftrace_lock);
149
150         return ret;
151 }
152
153 #ifdef CONFIG_DYNAMIC_FTRACE
154
155 static struct task_struct *ftraced_task;
156
157 enum {
158         FTRACE_ENABLE_CALLS             = (1 << 0),
159         FTRACE_DISABLE_CALLS            = (1 << 1),
160         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
161         FTRACE_ENABLE_MCOUNT            = (1 << 3),
162         FTRACE_DISABLE_MCOUNT           = (1 << 4),
163 };
164
165 static int ftrace_filtered;
166
167 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
168
169 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
170
171 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
172 static DEFINE_MUTEX(ftraced_lock);
173 static DEFINE_MUTEX(ftrace_regex_lock);
174
175 struct ftrace_page {
176         struct ftrace_page      *next;
177         unsigned long           index;
178         struct dyn_ftrace       records[];
179 };
180
181 #define ENTRIES_PER_PAGE \
182   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
183
184 /* estimate from running different kernels */
185 #define NR_TO_INIT              10000
186
187 static struct ftrace_page       *ftrace_pages_start;
188 static struct ftrace_page       *ftrace_pages;
189
190 static int ftraced_trigger;
191 static int ftraced_suspend;
192 static int ftraced_stop;
193
194 static int ftrace_record_suspend;
195
196 static struct dyn_ftrace *ftrace_free_records;
197
198 static inline int
199 ftrace_ip_in_hash(unsigned long ip, unsigned long key)
200 {
201         struct dyn_ftrace *p;
202         struct hlist_node *t;
203         int found = 0;
204
205         hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
206                 if (p->ip == ip) {
207                         found = 1;
208                         break;
209                 }
210         }
211
212         return found;
213 }
214
215 static inline void
216 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
217 {
218         hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
219 }
220
221 /* called from kstop_machine */
222 static inline void ftrace_del_hash(struct dyn_ftrace *node)
223 {
224         hlist_del(&node->node);
225 }
226
227 static void ftrace_free_rec(struct dyn_ftrace *rec)
228 {
229         /* no locking, only called from kstop_machine */
230
231         rec->ip = (unsigned long)ftrace_free_records;
232         ftrace_free_records = rec;
233         rec->flags |= FTRACE_FL_FREE;
234 }
235
236 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
237 {
238         struct dyn_ftrace *rec;
239
240         /* First check for freed records */
241         if (ftrace_free_records) {
242                 rec = ftrace_free_records;
243
244                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
245                         WARN_ON_ONCE(1);
246                         ftrace_free_records = NULL;
247                         ftrace_disabled = 1;
248                         ftrace_enabled = 0;
249                         return NULL;
250                 }
251
252                 ftrace_free_records = (void *)rec->ip;
253                 memset(rec, 0, sizeof(*rec));
254                 return rec;
255         }
256
257         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
258                 if (!ftrace_pages->next)
259                         return NULL;
260                 ftrace_pages = ftrace_pages->next;
261         }
262
263         return &ftrace_pages->records[ftrace_pages->index++];
264 }
265
266 static void
267 ftrace_record_ip(unsigned long ip)
268 {
269         struct dyn_ftrace *node;
270         unsigned long flags;
271         unsigned long key;
272         int resched;
273         int atomic;
274         int cpu;
275
276         if (!ftrace_enabled || ftrace_disabled)
277                 return;
278
279         resched = need_resched();
280         preempt_disable_notrace();
281
282         /*
283          * We simply need to protect against recursion.
284          * Use the the raw version of smp_processor_id and not
285          * __get_cpu_var which can call debug hooks that can
286          * cause a recursive crash here.
287          */
288         cpu = raw_smp_processor_id();
289         per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
290         if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
291                 goto out;
292
293         if (unlikely(ftrace_record_suspend))
294                 goto out;
295
296         key = hash_long(ip, FTRACE_HASHBITS);
297
298         WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
299
300         if (ftrace_ip_in_hash(ip, key))
301                 goto out;
302
303         atomic = irqs_disabled();
304
305         spin_lock_irqsave(&ftrace_shutdown_lock, flags);
306
307         /* This ip may have hit the hash before the lock */
308         if (ftrace_ip_in_hash(ip, key))
309                 goto out_unlock;
310
311         node = ftrace_alloc_dyn_node(ip);
312         if (!node)
313                 goto out_unlock;
314
315         node->ip = ip;
316
317         ftrace_add_hash(node, key);
318
319         ftraced_trigger = 1;
320
321  out_unlock:
322         spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
323  out:
324         per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
325
326         /* prevent recursion with scheduler */
327         if (resched)
328                 preempt_enable_no_resched_notrace();
329         else
330                 preempt_enable_notrace();
331 }
332
333 #define FTRACE_ADDR ((long)(ftrace_caller))
334
335 static int
336 __ftrace_replace_code(struct dyn_ftrace *rec,
337                       unsigned char *old, unsigned char *new, int enable)
338 {
339         unsigned long ip, fl;
340
341         ip = rec->ip;
342
343         if (ftrace_filtered && enable) {
344                 /*
345                  * If filtering is on:
346                  *
347                  * If this record is set to be filtered and
348                  * is enabled then do nothing.
349                  *
350                  * If this record is set to be filtered and
351                  * it is not enabled, enable it.
352                  *
353                  * If this record is not set to be filtered
354                  * and it is not enabled do nothing.
355                  *
356                  * If this record is set not to trace then
357                  * do nothing.
358                  *
359                  * If this record is set not to trace and
360                  * it is enabled then disable it.
361                  *
362                  * If this record is not set to be filtered and
363                  * it is enabled, disable it.
364                  */
365
366                 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE |
367                                    FTRACE_FL_ENABLED);
368
369                 if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
370                     (fl ==  (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) ||
371                     !fl || (fl == FTRACE_FL_NOTRACE))
372                         return 0;
373
374                 /*
375                  * If it is enabled disable it,
376                  * otherwise enable it!
377                  */
378                 if (fl & FTRACE_FL_ENABLED) {
379                         /* swap new and old */
380                         new = old;
381                         old = ftrace_call_replace(ip, FTRACE_ADDR);
382                         rec->flags &= ~FTRACE_FL_ENABLED;
383                 } else {
384                         new = ftrace_call_replace(ip, FTRACE_ADDR);
385                         rec->flags |= FTRACE_FL_ENABLED;
386                 }
387         } else {
388
389                 if (enable) {
390                         /*
391                          * If this record is set not to trace and is
392                          * not enabled, do nothing.
393                          */
394                         fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
395                         if (fl == FTRACE_FL_NOTRACE)
396                                 return 0;
397
398                         new = ftrace_call_replace(ip, FTRACE_ADDR);
399                 } else
400                         old = ftrace_call_replace(ip, FTRACE_ADDR);
401
402                 if (enable) {
403                         if (rec->flags & FTRACE_FL_ENABLED)
404                                 return 0;
405                         rec->flags |= FTRACE_FL_ENABLED;
406                 } else {
407                         if (!(rec->flags & FTRACE_FL_ENABLED))
408                                 return 0;
409                         rec->flags &= ~FTRACE_FL_ENABLED;
410                 }
411         }
412
413         return ftrace_modify_code(ip, old, new);
414 }
415
416 static void ftrace_replace_code(int enable)
417 {
418         int i, failed;
419         unsigned char *new = NULL, *old = NULL;
420         struct dyn_ftrace *rec;
421         struct ftrace_page *pg;
422
423         if (enable)
424                 old = ftrace_nop_replace();
425         else
426                 new = ftrace_nop_replace();
427
428         for (pg = ftrace_pages_start; pg; pg = pg->next) {
429                 for (i = 0; i < pg->index; i++) {
430                         rec = &pg->records[i];
431
432                         /* don't modify code that has already faulted */
433                         if (rec->flags & FTRACE_FL_FAILED)
434                                 continue;
435
436                         failed = __ftrace_replace_code(rec, old, new, enable);
437                         if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
438                                 rec->flags |= FTRACE_FL_FAILED;
439                                 if ((system_state == SYSTEM_BOOTING) ||
440                                     !core_kernel_text(rec->ip)) {
441                                         ftrace_del_hash(rec);
442                                         ftrace_free_rec(rec);
443                                 }
444                         }
445                 }
446         }
447 }
448
449 static void ftrace_shutdown_replenish(void)
450 {
451         if (ftrace_pages->next)
452                 return;
453
454         /* allocate another page */
455         ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
456 }
457
458 static int
459 ftrace_code_disable(struct dyn_ftrace *rec)
460 {
461         unsigned long ip;
462         unsigned char *nop, *call;
463         int failed;
464
465         ip = rec->ip;
466
467         nop = ftrace_nop_replace();
468         call = ftrace_call_replace(ip, MCOUNT_ADDR);
469
470         failed = ftrace_modify_code(ip, call, nop);
471         if (failed) {
472                 rec->flags |= FTRACE_FL_FAILED;
473                 return 0;
474         }
475         return 1;
476 }
477
478 static int __ftrace_update_code(void *ignore);
479
480 static int __ftrace_modify_code(void *data)
481 {
482         unsigned long addr;
483         int *command = data;
484
485         if (*command & FTRACE_ENABLE_CALLS) {
486                 /*
487                  * Update any recorded ips now that we have the
488                  * machine stopped
489                  */
490                 __ftrace_update_code(NULL);
491                 ftrace_replace_code(1);
492         } else if (*command & FTRACE_DISABLE_CALLS)
493                 ftrace_replace_code(0);
494
495         if (*command & FTRACE_UPDATE_TRACE_FUNC)
496                 ftrace_update_ftrace_func(ftrace_trace_function);
497
498         if (*command & FTRACE_ENABLE_MCOUNT) {
499                 addr = (unsigned long)ftrace_record_ip;
500                 ftrace_mcount_set(&addr);
501         } else if (*command & FTRACE_DISABLE_MCOUNT) {
502                 addr = (unsigned long)ftrace_stub;
503                 ftrace_mcount_set(&addr);
504         }
505
506         return 0;
507 }
508
509 static void ftrace_run_update_code(int command)
510 {
511         stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
512 }
513
514 void ftrace_disable_daemon(void)
515 {
516         /* Stop the daemon from calling kstop_machine */
517         mutex_lock(&ftraced_lock);
518         ftraced_stop = 1;
519         mutex_unlock(&ftraced_lock);
520
521         ftrace_force_update();
522 }
523
524 void ftrace_enable_daemon(void)
525 {
526         mutex_lock(&ftraced_lock);
527         ftraced_stop = 0;
528         mutex_unlock(&ftraced_lock);
529
530         ftrace_force_update();
531 }
532
533 static ftrace_func_t saved_ftrace_func;
534
535 static void ftrace_startup(void)
536 {
537         int command = 0;
538
539         if (unlikely(ftrace_disabled))
540                 return;
541
542         mutex_lock(&ftraced_lock);
543         ftraced_suspend++;
544         if (ftraced_suspend == 1)
545                 command |= FTRACE_ENABLE_CALLS;
546
547         if (saved_ftrace_func != ftrace_trace_function) {
548                 saved_ftrace_func = ftrace_trace_function;
549                 command |= FTRACE_UPDATE_TRACE_FUNC;
550         }
551
552         if (!command || !ftrace_enabled)
553                 goto out;
554
555         ftrace_run_update_code(command);
556  out:
557         mutex_unlock(&ftraced_lock);
558 }
559
560 static void ftrace_shutdown(void)
561 {
562         int command = 0;
563
564         if (unlikely(ftrace_disabled))
565                 return;
566
567         mutex_lock(&ftraced_lock);
568         ftraced_suspend--;
569         if (!ftraced_suspend)
570                 command |= FTRACE_DISABLE_CALLS;
571
572         if (saved_ftrace_func != ftrace_trace_function) {
573                 saved_ftrace_func = ftrace_trace_function;
574                 command |= FTRACE_UPDATE_TRACE_FUNC;
575         }
576
577         if (!command || !ftrace_enabled)
578                 goto out;
579
580         ftrace_run_update_code(command);
581  out:
582         mutex_unlock(&ftraced_lock);
583 }
584
585 static void ftrace_startup_sysctl(void)
586 {
587         int command = FTRACE_ENABLE_MCOUNT;
588
589         if (unlikely(ftrace_disabled))
590                 return;
591
592         mutex_lock(&ftraced_lock);
593         /* Force update next time */
594         saved_ftrace_func = NULL;
595         /* ftraced_suspend is true if we want ftrace running */
596         if (ftraced_suspend)
597                 command |= FTRACE_ENABLE_CALLS;
598
599         ftrace_run_update_code(command);
600         mutex_unlock(&ftraced_lock);
601 }
602
603 static void ftrace_shutdown_sysctl(void)
604 {
605         int command = FTRACE_DISABLE_MCOUNT;
606
607         if (unlikely(ftrace_disabled))
608                 return;
609
610         mutex_lock(&ftraced_lock);
611         /* ftraced_suspend is true if ftrace is running */
612         if (ftraced_suspend)
613                 command |= FTRACE_DISABLE_CALLS;
614
615         ftrace_run_update_code(command);
616         mutex_unlock(&ftraced_lock);
617 }
618
619 static cycle_t          ftrace_update_time;
620 static unsigned long    ftrace_update_cnt;
621 unsigned long           ftrace_update_tot_cnt;
622
623 static int __ftrace_update_code(void *ignore)
624 {
625         struct dyn_ftrace *p;
626         struct hlist_node *t, *n;
627         int save_ftrace_enabled;
628         cycle_t start, stop;
629         int i;
630
631         /* Don't be recording funcs now */
632         ftrace_record_suspend++;
633         save_ftrace_enabled = ftrace_enabled;
634         ftrace_enabled = 0;
635
636         start = ftrace_now(raw_smp_processor_id());
637         ftrace_update_cnt = 0;
638
639         /* No locks needed, the machine is stopped! */
640         for (i = 0; i < FTRACE_HASHSIZE; i++) {
641                 /* all CPUS are stopped, we are safe to modify code */
642                 hlist_for_each_entry_safe(p, t, n, &ftrace_hash[i], node) {
643                         /* Skip over failed records which have not been
644                          * freed. */
645                         if (p->flags & FTRACE_FL_FAILED)
646                                 continue;
647
648                         /* Unconverted records are always at the head of the
649                          * hash bucket. Once we encounter a converted record,
650                          * simply skip over to the next bucket. Saves ftraced
651                          * some processor cycles (ftrace does its bid for
652                          * global warming :-p ). */
653                         if (p->flags & (FTRACE_FL_CONVERTED))
654                                 break;
655
656                         if (ftrace_code_disable(p)) {
657                                 p->flags |= FTRACE_FL_CONVERTED;
658                                 ftrace_update_cnt++;
659                         } else {
660                                 if ((system_state == SYSTEM_BOOTING) ||
661                                     !core_kernel_text(p->ip)) {
662                                         ftrace_del_hash(p);
663                                         ftrace_free_rec(p);
664                                 }
665                         }
666                 }
667         }
668
669         stop = ftrace_now(raw_smp_processor_id());
670         ftrace_update_time = stop - start;
671         ftrace_update_tot_cnt += ftrace_update_cnt;
672         ftraced_trigger = 0;
673
674         ftrace_enabled = save_ftrace_enabled;
675         ftrace_record_suspend--;
676
677         return 0;
678 }
679
680 static int ftrace_update_code(void)
681 {
682         if (unlikely(ftrace_disabled) ||
683             !ftrace_enabled || !ftraced_trigger)
684                 return 0;
685
686         stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
687
688         return 1;
689 }
690
691 static int ftraced(void *ignore)
692 {
693         unsigned long usecs;
694
695         while (!kthread_should_stop()) {
696
697                 set_current_state(TASK_INTERRUPTIBLE);
698
699                 /* check once a second */
700                 schedule_timeout(HZ);
701
702                 if (unlikely(ftrace_disabled))
703                         continue;
704
705                 mutex_lock(&ftrace_sysctl_lock);
706                 mutex_lock(&ftraced_lock);
707                 if (!ftraced_suspend && !ftraced_stop &&
708                     ftrace_update_code()) {
709                         usecs = nsecs_to_usecs(ftrace_update_time);
710                         if (ftrace_update_tot_cnt > 100000) {
711                                 ftrace_update_tot_cnt = 0;
712                                 pr_info("hm, dftrace overflow: %lu change%s"
713                                         " (%lu total) in %lu usec%s\n",
714                                         ftrace_update_cnt,
715                                         ftrace_update_cnt != 1 ? "s" : "",
716                                         ftrace_update_tot_cnt,
717                                         usecs, usecs != 1 ? "s" : "");
718                                 ftrace_disabled = 1;
719                                 WARN_ON_ONCE(1);
720                         }
721                 }
722                 mutex_unlock(&ftraced_lock);
723                 mutex_unlock(&ftrace_sysctl_lock);
724
725                 ftrace_shutdown_replenish();
726         }
727         __set_current_state(TASK_RUNNING);
728         return 0;
729 }
730
731 static int __init ftrace_dyn_table_alloc(void)
732 {
733         struct ftrace_page *pg;
734         int cnt;
735         int i;
736
737         /* allocate a few pages */
738         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
739         if (!ftrace_pages_start)
740                 return -1;
741
742         /*
743          * Allocate a few more pages.
744          *
745          * TODO: have some parser search vmlinux before
746          *   final linking to find all calls to ftrace.
747          *   Then we can:
748          *    a) know how many pages to allocate.
749          *     and/or
750          *    b) set up the table then.
751          *
752          *  The dynamic code is still necessary for
753          *  modules.
754          */
755
756         pg = ftrace_pages = ftrace_pages_start;
757
758         cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
759
760         for (i = 0; i < cnt; i++) {
761                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
762
763                 /* If we fail, we'll try later anyway */
764                 if (!pg->next)
765                         break;
766
767                 pg = pg->next;
768         }
769
770         return 0;
771 }
772
773 enum {
774         FTRACE_ITER_FILTER      = (1 << 0),
775         FTRACE_ITER_CONT        = (1 << 1),
776         FTRACE_ITER_NOTRACE     = (1 << 2),
777         FTRACE_ITER_FAILURES    = (1 << 3),
778 };
779
780 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
781
782 struct ftrace_iterator {
783         loff_t                  pos;
784         struct ftrace_page      *pg;
785         unsigned                idx;
786         unsigned                flags;
787         unsigned char           buffer[FTRACE_BUFF_MAX+1];
788         unsigned                buffer_idx;
789         unsigned                filtered;
790 };
791
792 static void *
793 t_next(struct seq_file *m, void *v, loff_t *pos)
794 {
795         struct ftrace_iterator *iter = m->private;
796         struct dyn_ftrace *rec = NULL;
797
798         (*pos)++;
799
800  retry:
801         if (iter->idx >= iter->pg->index) {
802                 if (iter->pg->next) {
803                         iter->pg = iter->pg->next;
804                         iter->idx = 0;
805                         goto retry;
806                 }
807         } else {
808                 rec = &iter->pg->records[iter->idx++];
809                 if ((!(iter->flags & FTRACE_ITER_FAILURES) &&
810                      (rec->flags & FTRACE_FL_FAILED)) ||
811
812                     ((iter->flags & FTRACE_ITER_FAILURES) &&
813                      (!(rec->flags & FTRACE_FL_FAILED) ||
814                       (rec->flags & FTRACE_FL_FREE))) ||
815
816                     ((iter->flags & FTRACE_ITER_FILTER) &&
817                      !(rec->flags & FTRACE_FL_FILTER)) ||
818
819                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
820                      !(rec->flags & FTRACE_FL_NOTRACE))) {
821                         rec = NULL;
822                         goto retry;
823                 }
824         }
825
826         iter->pos = *pos;
827
828         return rec;
829 }
830
831 static void *t_start(struct seq_file *m, loff_t *pos)
832 {
833         struct ftrace_iterator *iter = m->private;
834         void *p = NULL;
835         loff_t l = -1;
836
837         if (*pos != iter->pos) {
838                 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
839                         ;
840         } else {
841                 l = *pos;
842                 p = t_next(m, p, &l);
843         }
844
845         return p;
846 }
847
848 static void t_stop(struct seq_file *m, void *p)
849 {
850 }
851
852 static int t_show(struct seq_file *m, void *v)
853 {
854         struct dyn_ftrace *rec = v;
855         char str[KSYM_SYMBOL_LEN];
856
857         if (!rec)
858                 return 0;
859
860         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
861
862         seq_printf(m, "%s\n", str);
863
864         return 0;
865 }
866
867 static struct seq_operations show_ftrace_seq_ops = {
868         .start = t_start,
869         .next = t_next,
870         .stop = t_stop,
871         .show = t_show,
872 };
873
874 static int
875 ftrace_avail_open(struct inode *inode, struct file *file)
876 {
877         struct ftrace_iterator *iter;
878         int ret;
879
880         if (unlikely(ftrace_disabled))
881                 return -ENODEV;
882
883         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
884         if (!iter)
885                 return -ENOMEM;
886
887         iter->pg = ftrace_pages_start;
888         iter->pos = -1;
889
890         ret = seq_open(file, &show_ftrace_seq_ops);
891         if (!ret) {
892                 struct seq_file *m = file->private_data;
893
894                 m->private = iter;
895         } else {
896                 kfree(iter);
897         }
898
899         return ret;
900 }
901
902 int ftrace_avail_release(struct inode *inode, struct file *file)
903 {
904         struct seq_file *m = (struct seq_file *)file->private_data;
905         struct ftrace_iterator *iter = m->private;
906
907         seq_release(inode, file);
908         kfree(iter);
909
910         return 0;
911 }
912
913 static int
914 ftrace_failures_open(struct inode *inode, struct file *file)
915 {
916         int ret;
917         struct seq_file *m;
918         struct ftrace_iterator *iter;
919
920         ret = ftrace_avail_open(inode, file);
921         if (!ret) {
922                 m = (struct seq_file *)file->private_data;
923                 iter = (struct ftrace_iterator *)m->private;
924                 iter->flags = FTRACE_ITER_FAILURES;
925         }
926
927         return ret;
928 }
929
930
931 static void ftrace_filter_reset(int enable)
932 {
933         struct ftrace_page *pg;
934         struct dyn_ftrace *rec;
935         unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
936         unsigned i;
937
938         /* keep kstop machine from running */
939         preempt_disable();
940         if (enable)
941                 ftrace_filtered = 0;
942         pg = ftrace_pages_start;
943         while (pg) {
944                 for (i = 0; i < pg->index; i++) {
945                         rec = &pg->records[i];
946                         if (rec->flags & FTRACE_FL_FAILED)
947                                 continue;
948                         rec->flags &= ~type;
949                 }
950                 pg = pg->next;
951         }
952         preempt_enable();
953 }
954
955 static int
956 ftrace_regex_open(struct inode *inode, struct file *file, int enable)
957 {
958         struct ftrace_iterator *iter;
959         int ret = 0;
960
961         if (unlikely(ftrace_disabled))
962                 return -ENODEV;
963
964         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
965         if (!iter)
966                 return -ENOMEM;
967
968         mutex_lock(&ftrace_regex_lock);
969         if ((file->f_mode & FMODE_WRITE) &&
970             !(file->f_flags & O_APPEND))
971                 ftrace_filter_reset(enable);
972
973         if (file->f_mode & FMODE_READ) {
974                 iter->pg = ftrace_pages_start;
975                 iter->pos = -1;
976                 iter->flags = enable ? FTRACE_ITER_FILTER :
977                         FTRACE_ITER_NOTRACE;
978
979                 ret = seq_open(file, &show_ftrace_seq_ops);
980                 if (!ret) {
981                         struct seq_file *m = file->private_data;
982                         m->private = iter;
983                 } else
984                         kfree(iter);
985         } else
986                 file->private_data = iter;
987         mutex_unlock(&ftrace_regex_lock);
988
989         return ret;
990 }
991
992 static int
993 ftrace_filter_open(struct inode *inode, struct file *file)
994 {
995         return ftrace_regex_open(inode, file, 1);
996 }
997
998 static int
999 ftrace_notrace_open(struct inode *inode, struct file *file)
1000 {
1001         return ftrace_regex_open(inode, file, 0);
1002 }
1003
1004 static ssize_t
1005 ftrace_regex_read(struct file *file, char __user *ubuf,
1006                        size_t cnt, loff_t *ppos)
1007 {
1008         if (file->f_mode & FMODE_READ)
1009                 return seq_read(file, ubuf, cnt, ppos);
1010         else
1011                 return -EPERM;
1012 }
1013
1014 static loff_t
1015 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1016 {
1017         loff_t ret;
1018
1019         if (file->f_mode & FMODE_READ)
1020                 ret = seq_lseek(file, offset, origin);
1021         else
1022                 file->f_pos = ret = 1;
1023
1024         return ret;
1025 }
1026
1027 enum {
1028         MATCH_FULL,
1029         MATCH_FRONT_ONLY,
1030         MATCH_MIDDLE_ONLY,
1031         MATCH_END_ONLY,
1032 };
1033
1034 static void
1035 ftrace_match(unsigned char *buff, int len, int enable)
1036 {
1037         char str[KSYM_SYMBOL_LEN];
1038         char *search = NULL;
1039         struct ftrace_page *pg;
1040         struct dyn_ftrace *rec;
1041         int type = MATCH_FULL;
1042         unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1043         unsigned i, match = 0, search_len = 0;
1044
1045         for (i = 0; i < len; i++) {
1046                 if (buff[i] == '*') {
1047                         if (!i) {
1048                                 search = buff + i + 1;
1049                                 type = MATCH_END_ONLY;
1050                                 search_len = len - (i + 1);
1051                         } else {
1052                                 if (type == MATCH_END_ONLY) {
1053                                         type = MATCH_MIDDLE_ONLY;
1054                                 } else {
1055                                         match = i;
1056                                         type = MATCH_FRONT_ONLY;
1057                                 }
1058                                 buff[i] = 0;
1059                                 break;
1060                         }
1061                 }
1062         }
1063
1064         /* keep kstop machine from running */
1065         preempt_disable();
1066         if (enable)
1067                 ftrace_filtered = 1;
1068         pg = ftrace_pages_start;
1069         while (pg) {
1070                 for (i = 0; i < pg->index; i++) {
1071                         int matched = 0;
1072                         char *ptr;
1073
1074                         rec = &pg->records[i];
1075                         if (rec->flags & FTRACE_FL_FAILED)
1076                                 continue;
1077                         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1078                         switch (type) {
1079                         case MATCH_FULL:
1080                                 if (strcmp(str, buff) == 0)
1081                                         matched = 1;
1082                                 break;
1083                         case MATCH_FRONT_ONLY:
1084                                 if (memcmp(str, buff, match) == 0)
1085                                         matched = 1;
1086                                 break;
1087                         case MATCH_MIDDLE_ONLY:
1088                                 if (strstr(str, search))
1089                                         matched = 1;
1090                                 break;
1091                         case MATCH_END_ONLY:
1092                                 ptr = strstr(str, search);
1093                                 if (ptr && (ptr[search_len] == 0))
1094                                         matched = 1;
1095                                 break;
1096                         }
1097                         if (matched)
1098                                 rec->flags |= flag;
1099                 }
1100                 pg = pg->next;
1101         }
1102         preempt_enable();
1103 }
1104
1105 static ssize_t
1106 ftrace_regex_write(struct file *file, const char __user *ubuf,
1107                    size_t cnt, loff_t *ppos, int enable)
1108 {
1109         struct ftrace_iterator *iter;
1110         char ch;
1111         size_t read = 0;
1112         ssize_t ret;
1113
1114         if (!cnt || cnt < 0)
1115                 return 0;
1116
1117         mutex_lock(&ftrace_regex_lock);
1118
1119         if (file->f_mode & FMODE_READ) {
1120                 struct seq_file *m = file->private_data;
1121                 iter = m->private;
1122         } else
1123                 iter = file->private_data;
1124
1125         if (!*ppos) {
1126                 iter->flags &= ~FTRACE_ITER_CONT;
1127                 iter->buffer_idx = 0;
1128         }
1129
1130         ret = get_user(ch, ubuf++);
1131         if (ret)
1132                 goto out;
1133         read++;
1134         cnt--;
1135
1136         if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1137                 /* skip white space */
1138                 while (cnt && isspace(ch)) {
1139                         ret = get_user(ch, ubuf++);
1140                         if (ret)
1141                                 goto out;
1142                         read++;
1143                         cnt--;
1144                 }
1145
1146                 if (isspace(ch)) {
1147                         file->f_pos += read;
1148                         ret = read;
1149                         goto out;
1150                 }
1151
1152                 iter->buffer_idx = 0;
1153         }
1154
1155         while (cnt && !isspace(ch)) {
1156                 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1157                         iter->buffer[iter->buffer_idx++] = ch;
1158                 else {
1159                         ret = -EINVAL;
1160                         goto out;
1161                 }
1162                 ret = get_user(ch, ubuf++);
1163                 if (ret)
1164                         goto out;
1165                 read++;
1166                 cnt--;
1167         }
1168
1169         if (isspace(ch)) {
1170                 iter->filtered++;
1171                 iter->buffer[iter->buffer_idx] = 0;
1172                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1173                 iter->buffer_idx = 0;
1174         } else
1175                 iter->flags |= FTRACE_ITER_CONT;
1176
1177
1178         file->f_pos += read;
1179
1180         ret = read;
1181  out:
1182         mutex_unlock(&ftrace_regex_lock);
1183
1184         return ret;
1185 }
1186
1187 static ssize_t
1188 ftrace_filter_write(struct file *file, const char __user *ubuf,
1189                     size_t cnt, loff_t *ppos)
1190 {
1191         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1192 }
1193
1194 static ssize_t
1195 ftrace_notrace_write(struct file *file, const char __user *ubuf,
1196                      size_t cnt, loff_t *ppos)
1197 {
1198         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1199 }
1200
1201 static void
1202 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1203 {
1204         if (unlikely(ftrace_disabled))
1205                 return;
1206
1207         mutex_lock(&ftrace_regex_lock);
1208         if (reset)
1209                 ftrace_filter_reset(enable);
1210         if (buf)
1211                 ftrace_match(buf, len, enable);
1212         mutex_unlock(&ftrace_regex_lock);
1213 }
1214
1215 /**
1216  * ftrace_set_filter - set a function to filter on in ftrace
1217  * @buf - the string that holds the function filter text.
1218  * @len - the length of the string.
1219  * @reset - non zero to reset all filters before applying this filter.
1220  *
1221  * Filters denote which functions should be enabled when tracing is enabled.
1222  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1223  */
1224 void ftrace_set_filter(unsigned char *buf, int len, int reset)
1225 {
1226         ftrace_set_regex(buf, len, reset, 1);
1227 }
1228
1229 /**
1230  * ftrace_set_notrace - set a function to not trace in ftrace
1231  * @buf - the string that holds the function notrace text.
1232  * @len - the length of the string.
1233  * @reset - non zero to reset all filters before applying this filter.
1234  *
1235  * Notrace Filters denote which functions should not be enabled when tracing
1236  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1237  * for tracing.
1238  */
1239 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1240 {
1241         ftrace_set_regex(buf, len, reset, 0);
1242 }
1243
1244 static int
1245 ftrace_regex_release(struct inode *inode, struct file *file, int enable)
1246 {
1247         struct seq_file *m = (struct seq_file *)file->private_data;
1248         struct ftrace_iterator *iter;
1249
1250         mutex_lock(&ftrace_regex_lock);
1251         if (file->f_mode & FMODE_READ) {
1252                 iter = m->private;
1253
1254                 seq_release(inode, file);
1255         } else
1256                 iter = file->private_data;
1257
1258         if (iter->buffer_idx) {
1259                 iter->filtered++;
1260                 iter->buffer[iter->buffer_idx] = 0;
1261                 ftrace_match(iter->buffer, iter->buffer_idx, enable);
1262         }
1263
1264         mutex_lock(&ftrace_sysctl_lock);
1265         mutex_lock(&ftraced_lock);
1266         if (iter->filtered && ftraced_suspend && ftrace_enabled)
1267                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1268         mutex_unlock(&ftraced_lock);
1269         mutex_unlock(&ftrace_sysctl_lock);
1270
1271         kfree(iter);
1272         mutex_unlock(&ftrace_regex_lock);
1273         return 0;
1274 }
1275
1276 static int
1277 ftrace_filter_release(struct inode *inode, struct file *file)
1278 {
1279         return ftrace_regex_release(inode, file, 1);
1280 }
1281
1282 static int
1283 ftrace_notrace_release(struct inode *inode, struct file *file)
1284 {
1285         return ftrace_regex_release(inode, file, 0);
1286 }
1287
1288 static ssize_t
1289 ftraced_read(struct file *filp, char __user *ubuf,
1290                      size_t cnt, loff_t *ppos)
1291 {
1292         /* don't worry about races */
1293         char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
1294         int r = strlen(buf);
1295
1296         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1297 }
1298
1299 static ssize_t
1300 ftraced_write(struct file *filp, const char __user *ubuf,
1301                       size_t cnt, loff_t *ppos)
1302 {
1303         char buf[64];
1304         long val;
1305         int ret;
1306
1307         if (cnt >= sizeof(buf))
1308                 return -EINVAL;
1309
1310         if (copy_from_user(&buf, ubuf, cnt))
1311                 return -EFAULT;
1312
1313         if (strncmp(buf, "enable", 6) == 0)
1314                 val = 1;
1315         else if (strncmp(buf, "disable", 7) == 0)
1316                 val = 0;
1317         else {
1318                 buf[cnt] = 0;
1319
1320                 ret = strict_strtoul(buf, 10, &val);
1321                 if (ret < 0)
1322                         return ret;
1323
1324                 val = !!val;
1325         }
1326
1327         if (val)
1328                 ftrace_enable_daemon();
1329         else
1330                 ftrace_disable_daemon();
1331
1332         filp->f_pos += cnt;
1333
1334         return cnt;
1335 }
1336
1337 static struct file_operations ftrace_avail_fops = {
1338         .open = ftrace_avail_open,
1339         .read = seq_read,
1340         .llseek = seq_lseek,
1341         .release = ftrace_avail_release,
1342 };
1343
1344 static struct file_operations ftrace_failures_fops = {
1345         .open = ftrace_failures_open,
1346         .read = seq_read,
1347         .llseek = seq_lseek,
1348         .release = ftrace_avail_release,
1349 };
1350
1351 static struct file_operations ftrace_filter_fops = {
1352         .open = ftrace_filter_open,
1353         .read = ftrace_regex_read,
1354         .write = ftrace_filter_write,
1355         .llseek = ftrace_regex_lseek,
1356         .release = ftrace_filter_release,
1357 };
1358
1359 static struct file_operations ftrace_notrace_fops = {
1360         .open = ftrace_notrace_open,
1361         .read = ftrace_regex_read,
1362         .write = ftrace_notrace_write,
1363         .llseek = ftrace_regex_lseek,
1364         .release = ftrace_notrace_release,
1365 };
1366
1367 static struct file_operations ftraced_fops = {
1368         .open = tracing_open_generic,
1369         .read = ftraced_read,
1370         .write = ftraced_write,
1371 };
1372
1373 /**
1374  * ftrace_force_update - force an update to all recording ftrace functions
1375  */
1376 int ftrace_force_update(void)
1377 {
1378         int ret = 0;
1379
1380         if (unlikely(ftrace_disabled))
1381                 return -ENODEV;
1382
1383         mutex_lock(&ftrace_sysctl_lock);
1384         mutex_lock(&ftraced_lock);
1385
1386         /*
1387          * If ftraced_trigger is not set, then there is nothing
1388          * to update.
1389          */
1390         if (ftraced_trigger && !ftrace_update_code())
1391                 ret = -EBUSY;
1392
1393         mutex_unlock(&ftraced_lock);
1394         mutex_unlock(&ftrace_sysctl_lock);
1395
1396         return ret;
1397 }
1398
1399 static void ftrace_force_shutdown(void)
1400 {
1401         struct task_struct *task;
1402         int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1403
1404         mutex_lock(&ftraced_lock);
1405         task = ftraced_task;
1406         ftraced_task = NULL;
1407         ftraced_suspend = -1;
1408         ftrace_run_update_code(command);
1409         mutex_unlock(&ftraced_lock);
1410
1411         if (task)
1412                 kthread_stop(task);
1413 }
1414
1415 static __init int ftrace_init_debugfs(void)
1416 {
1417         struct dentry *d_tracer;
1418         struct dentry *entry;
1419
1420         d_tracer = tracing_init_dentry();
1421
1422         entry = debugfs_create_file("available_filter_functions", 0444,
1423                                     d_tracer, NULL, &ftrace_avail_fops);
1424         if (!entry)
1425                 pr_warning("Could not create debugfs "
1426                            "'available_filter_functions' entry\n");
1427
1428         entry = debugfs_create_file("failures", 0444,
1429                                     d_tracer, NULL, &ftrace_failures_fops);
1430         if (!entry)
1431                 pr_warning("Could not create debugfs 'failures' entry\n");
1432
1433         entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1434                                     NULL, &ftrace_filter_fops);
1435         if (!entry)
1436                 pr_warning("Could not create debugfs "
1437                            "'set_ftrace_filter' entry\n");
1438
1439         entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1440                                     NULL, &ftrace_notrace_fops);
1441         if (!entry)
1442                 pr_warning("Could not create debugfs "
1443                            "'set_ftrace_notrace' entry\n");
1444
1445         entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
1446                                     NULL, &ftraced_fops);
1447         if (!entry)
1448                 pr_warning("Could not create debugfs "
1449                            "'ftraced_enabled' entry\n");
1450         return 0;
1451 }
1452
1453 fs_initcall(ftrace_init_debugfs);
1454
1455 static int __init ftrace_dynamic_init(void)
1456 {
1457         struct task_struct *p;
1458         unsigned long addr;
1459         int ret;
1460
1461         addr = (unsigned long)ftrace_record_ip;
1462
1463         stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1464
1465         /* ftrace_dyn_arch_init places the return code in addr */
1466         if (addr) {
1467                 ret = (int)addr;
1468                 goto failed;
1469         }
1470
1471         ret = ftrace_dyn_table_alloc();
1472         if (ret)
1473                 goto failed;
1474
1475         p = kthread_run(ftraced, NULL, "ftraced");
1476         if (IS_ERR(p)) {
1477                 ret = -1;
1478                 goto failed;
1479         }
1480
1481         last_ftrace_enabled = ftrace_enabled = 1;
1482         ftraced_task = p;
1483
1484         return 0;
1485
1486  failed:
1487         ftrace_disabled = 1;
1488         return ret;
1489 }
1490
1491 core_initcall(ftrace_dynamic_init);
1492 #else
1493 # define ftrace_startup()               do { } while (0)
1494 # define ftrace_shutdown()              do { } while (0)
1495 # define ftrace_startup_sysctl()        do { } while (0)
1496 # define ftrace_shutdown_sysctl()       do { } while (0)
1497 # define ftrace_force_shutdown()        do { } while (0)
1498 #endif /* CONFIG_DYNAMIC_FTRACE */
1499
1500 /**
1501  * ftrace_kill - totally shutdown ftrace
1502  *
1503  * This is a safety measure. If something was detected that seems
1504  * wrong, calling this function will keep ftrace from doing
1505  * any more modifications, and updates.
1506  * used when something went wrong.
1507  */
1508 void ftrace_kill(void)
1509 {
1510         mutex_lock(&ftrace_sysctl_lock);
1511         ftrace_disabled = 1;
1512         ftrace_enabled = 0;
1513
1514         clear_ftrace_function();
1515         mutex_unlock(&ftrace_sysctl_lock);
1516
1517         /* Try to totally disable ftrace */
1518         ftrace_force_shutdown();
1519 }
1520
1521 /**
1522  * register_ftrace_function - register a function for profiling
1523  * @ops - ops structure that holds the function for profiling.
1524  *
1525  * Register a function to be called by all functions in the
1526  * kernel.
1527  *
1528  * Note: @ops->func and all the functions it calls must be labeled
1529  *       with "notrace", otherwise it will go into a
1530  *       recursive loop.
1531  */
1532 int register_ftrace_function(struct ftrace_ops *ops)
1533 {
1534         int ret;
1535
1536         if (unlikely(ftrace_disabled))
1537                 return -1;
1538
1539         mutex_lock(&ftrace_sysctl_lock);
1540         ret = __register_ftrace_function(ops);
1541         ftrace_startup();
1542         mutex_unlock(&ftrace_sysctl_lock);
1543
1544         return ret;
1545 }
1546
1547 /**
1548  * unregister_ftrace_function - unresgister a function for profiling.
1549  * @ops - ops structure that holds the function to unregister
1550  *
1551  * Unregister a function that was added to be called by ftrace profiling.
1552  */
1553 int unregister_ftrace_function(struct ftrace_ops *ops)
1554 {
1555         int ret;
1556
1557         mutex_lock(&ftrace_sysctl_lock);
1558         ret = __unregister_ftrace_function(ops);
1559         ftrace_shutdown();
1560         mutex_unlock(&ftrace_sysctl_lock);
1561
1562         return ret;
1563 }
1564
1565 int
1566 ftrace_enable_sysctl(struct ctl_table *table, int write,
1567                      struct file *file, void __user *buffer, size_t *lenp,
1568                      loff_t *ppos)
1569 {
1570         int ret;
1571
1572         if (unlikely(ftrace_disabled))
1573                 return -ENODEV;
1574
1575         mutex_lock(&ftrace_sysctl_lock);
1576
1577         ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
1578
1579         if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1580                 goto out;
1581
1582         last_ftrace_enabled = ftrace_enabled;
1583
1584         if (ftrace_enabled) {
1585
1586                 ftrace_startup_sysctl();
1587
1588                 /* we are starting ftrace again */
1589                 if (ftrace_list != &ftrace_list_end) {
1590                         if (ftrace_list->next == &ftrace_list_end)
1591                                 ftrace_trace_function = ftrace_list->func;
1592                         else
1593                                 ftrace_trace_function = ftrace_list_func;
1594                 }
1595
1596         } else {
1597                 /* stopping ftrace calls (just send to ftrace_stub) */
1598                 ftrace_trace_function = ftrace_stub;
1599
1600                 ftrace_shutdown_sysctl();
1601         }
1602
1603  out:
1604         mutex_unlock(&ftrace_sysctl_lock);
1605         return ret;
1606 }