]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
ring-buffer: Move zeroing out excess in page to ring buffer code
authorSteven Rostedt <srostedt@redhat.com>
Fri, 21 May 2010 17:32:26 +0000 (13:32 -0400)
committerSteven Rostedt <rostedt@goodmis.org>
Tue, 25 May 2010 15:57:26 +0000 (11:57 -0400)
Currently the trace splice code zeros out the excess bytes in the page before
sending it off to userspace.

This is to make sure userspace is not getting anything it should not be
when reading the pages, because the excess data was never initialized
to zero before writing (for perfomance reasons).

But the splice code has no business in doing this work, it should be
done by the ring buffer. With the latest changes for recording lost
events, the splice code gets it wrong anyway.

Move the zeroing out of excess bytes into the ring buffer code.

Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
kernel/trace/ring_buffer.c
kernel/trace/trace.c

index b0702ff782187a51eb57f146608c4e5a14cfe46e..1da7b6ea8b85d70dde15b50369202cb68acab0bf 100644 (file)
@@ -3902,12 +3902,12 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
        ret = read;
 
        cpu_buffer->lost_events = 0;
+
+       commit = local_read(&bpage->commit);
        /*
         * Set a flag in the commit field if we lost events
         */
        if (missed_events) {
-               commit = local_read(&bpage->commit);
-
                /* If there is room at the end of the page to save the
                 * missed events, then record it there.
                 */
@@ -3915,10 +3915,17 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
                        memcpy(&bpage->data[commit], &missed_events,
                               sizeof(missed_events));
                        local_add(RB_MISSED_STORED, &bpage->commit);
+                       commit += sizeof(missed_events);
                }
                local_add(RB_MISSED_EVENTS, &bpage->commit);
        }
 
+       /*
+        * This page may be off to user land. Zero it out here.
+        */
+       if (commit < BUF_PAGE_SIZE)
+               memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
+
  out_unlock:
        spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 
index ba0ec81158b268fc1d0d0fd1314a04d796d838e1..95d0b1a28f93848a0ff5beaf667faeb9e4892062 100644 (file)
@@ -3661,7 +3661,6 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
                     size_t count, loff_t *ppos)
 {
        struct ftrace_buffer_info *info = filp->private_data;
-       unsigned int pos;
        ssize_t ret;
        size_t size;
 
@@ -3688,11 +3687,6 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
        if (ret < 0)
                return 0;
 
-       pos = ring_buffer_page_len(info->spare);
-
-       if (pos < PAGE_SIZE)
-               memset(info->spare + pos, 0, PAGE_SIZE - pos);
-
 read:
        size = PAGE_SIZE - info->read;
        if (size > count)