]> pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - kernel/trace/ring_buffer.c
Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
[linux-2.6-omap-h63xx.git] / kernel / trace / ring_buffer.c
index 7f69cfeaadf76a1bc002df343d2006ed20204a5e..76f34c0ef29c3aa9ea0123a3933e84c296a3caf9 100644 (file)
@@ -107,7 +107,7 @@ u64 ring_buffer_time_stamp(int cpu)
        preempt_disable_notrace();
        /* shift to debug/test normalization and TIME_EXTENTS */
        time = sched_clock() << DEBUG_SHIFT;
-       preempt_enable_notrace();
+       preempt_enable_no_resched_notrace();
 
        return time;
 }
@@ -258,7 +258,6 @@ struct ring_buffer_per_cpu {
 };
 
 struct ring_buffer {
-       unsigned long                   size;
        unsigned                        pages;
        unsigned                        flags;
        int                             cpus;
@@ -839,6 +838,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
         * back to us). This allows us to do a simple loop to
         * assign the commit to the tail.
         */
+ again:
        while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
                cpu_buffer->commit_page->page->commit =
                        cpu_buffer->commit_page->write;
@@ -854,6 +854,17 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
                        cpu_buffer->commit_page->write;
                barrier();
        }
+
+       /* again, keep gcc from optimizing */
+       barrier();
+
+       /*
+        * If an interrupt came in just after the first while loop
+        * and pushed the tail page forward, we will be left with
+        * a dangling commit that will never go forward.
+        */
+       if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
+               goto again;
 }
 
 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
@@ -951,12 +962,15 @@ static struct ring_buffer_event *
 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
                  unsigned type, unsigned long length, u64 *ts)
 {
-       struct buffer_page *tail_page, *head_page, *reader_page;
+       struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
        unsigned long tail, write;
        struct ring_buffer *buffer = cpu_buffer->buffer;
        struct ring_buffer_event *event;
        unsigned long flags;
 
+       commit_page = cpu_buffer->commit_page;
+       /* we just need to protect against interrupts */
+       barrier();
        tail_page = cpu_buffer->tail_page;
        write = local_add_return(length, &tail_page->write);
        tail = write - length;
@@ -982,7 +996,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
                 * it all the way around the buffer, bail, and warn
                 * about it.
                 */
-               if (unlikely(next_page == cpu_buffer->commit_page)) {
+               if (unlikely(next_page == commit_page)) {
                        WARN_ON_ONCE(1);
                        goto out_unlock;
                }
@@ -2210,8 +2224,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
                return -EINVAL;
 
        /* At least make sure the two buffers are somewhat the same */
-       if (buffer_a->size != buffer_b->size ||
-           buffer_a->pages != buffer_b->pages)
+       if (buffer_a->pages != buffer_b->pages)
                return -EINVAL;
 
        cpu_buffer_a = buffer_a->buffers[cpu];