]> pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
ftrace: insert in the ftrace_preempt_disable()/enable() functions
authorSteven Rostedt <rostedt@goodmis.org>
Tue, 4 Nov 2008 04:15:56 +0000 (23:15 -0500)
committerIngo Molnar <mingo@elte.hu>
Tue, 4 Nov 2008 09:09:49 +0000 (10:09 +0100)
Impact: use new, consolidated APIs in ftrace plugins

This patch replaces the schedule safe preempt disable code with the
ftrace_preempt_disable() and ftrace_preempt_enable() safe functions.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_sched_wakeup.c
kernel/trace/trace_stack.c

index cedf4e2682855000f28c7f5ddbe3a8fabd2bc0d2..151f6a74867674a4ef48bb5c6320a7accfd3daeb 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/list.h>
 #include <linux/fs.h>
 
+#include "trace.h"
+
 /* Up this if you want to test the TIME_EXTENTS and normalization */
 #define DEBUG_SHIFT 0
 
@@ -1122,8 +1124,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
                return NULL;
 
        /* If we are tracing schedule, we don't want to recurse */
-       resched = need_resched();
-       preempt_disable_notrace();
+       resched = ftrace_preempt_disable();
 
        cpu = raw_smp_processor_id();
 
@@ -1154,10 +1155,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
        return event;
 
  out:
-       if (resched)
-               preempt_enable_notrace();
-       else
-               preempt_enable_notrace();
+       ftrace_preempt_enable(resched);
        return NULL;
 }
 
@@ -1199,12 +1197,9 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
        /*
         * Only the last preempt count needs to restore preemption.
         */
-       if (preempt_count() == 1) {
-               if (per_cpu(rb_need_resched, cpu))
-                       preempt_enable_no_resched_notrace();
-               else
-                       preempt_enable_notrace();
-       } else
+       if (preempt_count() == 1)
+               ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
+       else
                preempt_enable_no_resched_notrace();
 
        return 0;
@@ -1237,8 +1232,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
        if (atomic_read(&buffer->record_disabled))
                return -EBUSY;
 
-       resched = need_resched();
-       preempt_disable_notrace();
+       resched = ftrace_preempt_disable();
 
        cpu = raw_smp_processor_id();
 
@@ -1264,10 +1258,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
 
        ret = 0;
  out:
-       if (resched)
-               preempt_enable_no_resched_notrace();
-       else
-               preempt_enable_notrace();
+       ftrace_preempt_enable(resched);
 
        return ret;
 }
index e4c40c868d67fb0f3028d5bf2ef3f806b23ab62f..3e7bf5eb9007e7acf139a89107dd52fc6acbec7a 100644 (file)
@@ -904,8 +904,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
                return;
 
        pc = preempt_count();
-       resched = need_resched();
-       preempt_disable_notrace();
+       resched = ftrace_preempt_disable();
        local_save_flags(flags);
        cpu = raw_smp_processor_id();
        data = tr->data[cpu];
@@ -915,10 +914,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
                trace_function(tr, data, ip, parent_ip, flags, pc);
 
        atomic_dec(&data->disabled);
-       if (resched)
-               preempt_enable_no_resched_notrace();
-       else
-               preempt_enable_notrace();
+       ftrace_preempt_enable(resched);
 }
 
 static struct ftrace_ops trace_ops __read_mostly =
index 3ae93f16b565de131887da94ee10835c183b2d6a..7bc4abf6fca8217f1e393b5eb603c412fb503aab 100644 (file)
@@ -50,8 +50,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
                return;
 
        pc = preempt_count();
-       resched = need_resched();
-       preempt_disable_notrace();
+       resched = ftrace_preempt_disable();
 
        cpu = raw_smp_processor_id();
        data = tr->data[cpu];
@@ -81,15 +80,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
  out:
        atomic_dec(&data->disabled);
 
-       /*
-        * To prevent recursion from the scheduler, if the
-        * resched flag was set before we entered, then
-        * don't reschedule.
-        */
-       if (resched)
-               preempt_enable_no_resched_notrace();
-       else
-               preempt_enable_notrace();
+       ftrace_preempt_enable(resched);
 }
 
 static struct ftrace_ops trace_ops __read_mostly =
index be682b62fe586285c77c36d7e7c6d6beb929541f..d39e8b7de6a299af3d1d35706f51af769ce7293d 100644 (file)
@@ -107,8 +107,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
        if (unlikely(!ftrace_enabled || stack_trace_disabled))
                return;
 
-       resched = need_resched();
-       preempt_disable_notrace();
+       resched = ftrace_preempt_disable();
 
        cpu = raw_smp_processor_id();
        /* no atomic needed, we only modify this variable by this cpu */
@@ -120,10 +119,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
  out:
        per_cpu(trace_active, cpu)--;
        /* prevent recursion in schedule */
-       if (resched)
-               preempt_enable_no_resched_notrace();
-       else
-               preempt_enable_notrace();
+       ftrace_preempt_enable(resched);
 }
 
 static struct ftrace_ops trace_ops __read_mostly =