]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
ring-buffer: Use sync sched protection on ring buffer resizing
authorSteven Rostedt <srostedt@redhat.com>
Fri, 11 Dec 2009 03:54:27 +0000 (22:54 -0500)
committerSteven Rostedt <rostedt@goodmis.org>
Fri, 11 Dec 2009 03:54:27 +0000 (22:54 -0500)
There was a comment in the ring buffer code that says the calling
layers should prevent tracing or reading of the ring buffer while
resizing. I have discovered that the tracers do not honor this
arrangement.

This patch moves the disabling and synchronizing the ring buffer to
a higher layer during resizing. This guarantees that no writes
are occurring while the resize takes place.

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
kernel/trace/ring_buffer.c

index a1ca4956ab5ec3673b04ddd839dd9edec26d1bf4..0d64c51ab4df0d89eaaaab6aef65d3fabe02126d 100644 (file)
@@ -1193,9 +1193,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
        struct list_head *p;
        unsigned i;
 
-       atomic_inc(&cpu_buffer->record_disabled);
-       synchronize_sched();
-
        spin_lock_irq(&cpu_buffer->reader_lock);
        rb_head_page_deactivate(cpu_buffer);
 
@@ -1214,9 +1211,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
        spin_unlock_irq(&cpu_buffer->reader_lock);
 
        rb_check_pages(cpu_buffer);
-
-       atomic_dec(&cpu_buffer->record_disabled);
-
 }
 
 static void
@@ -1227,9 +1221,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
        struct list_head *p;
        unsigned i;
 
-       atomic_inc(&cpu_buffer->record_disabled);
-       synchronize_sched();
-
        spin_lock_irq(&cpu_buffer->reader_lock);
        rb_head_page_deactivate(cpu_buffer);
 
@@ -1245,8 +1236,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
        spin_unlock_irq(&cpu_buffer->reader_lock);
 
        rb_check_pages(cpu_buffer);
-
-       atomic_dec(&cpu_buffer->record_disabled);
 }
 
 /**
@@ -1254,11 +1243,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
  * @buffer: the buffer to resize.
  * @size: the new size.
  *
- * The tracer is responsible for making sure that the buffer is
- * not being used while changing the size.
- * Note: We may be able to change the above requirement by using
- *  RCU synchronizations.
- *
  * Minimum size is 2 * BUF_PAGE_SIZE.
  *
  * Returns -1 on failure.
@@ -1290,6 +1274,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
        if (size == buffer_size)
                return size;
 
+       atomic_inc(&buffer->record_disabled);
+
+       /* Make sure all writers are done with this buffer. */
+       synchronize_sched();
+
        mutex_lock(&buffer->mutex);
        get_online_cpus();
 
@@ -1352,6 +1341,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
        put_online_cpus();
        mutex_unlock(&buffer->mutex);
 
+       atomic_dec(&buffer->record_disabled);
+
        return size;
 
  free_pages:
@@ -1361,6 +1352,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
        }
        put_online_cpus();
        mutex_unlock(&buffer->mutex);
+       atomic_dec(&buffer->record_disabled);
        return -ENOMEM;
 
        /*
@@ -1370,6 +1362,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
  out_fail:
        put_online_cpus();
        mutex_unlock(&buffer->mutex);
+       atomic_dec(&buffer->record_disabled);
        return -1;
 }
 EXPORT_SYMBOL_GPL(ring_buffer_resize);