]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - kernel/trace/ring_buffer.c
ring-buffer: Fix typo of time extends per page
[net-next-2.6.git] / kernel / trace / ring_buffer.c
index 1da7b6ea8b85d70dde15b50369202cb68acab0bf..bca96377fd4e8667df513dfd91ea56931e35578f 100644 (file)
@@ -405,7 +405,7 @@ static inline int test_time_stamp(u64 delta)
 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
 
 /* Max number of timestamps that can fit on a page */
-#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
+#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_EXTEND)
 
 int ring_buffer_print_page_header(struct trace_seq *s)
 {
@@ -443,6 +443,7 @@ int ring_buffer_print_page_header(struct trace_seq *s)
  */
 struct ring_buffer_per_cpu {
        int                             cpu;
+       atomic_t                        record_disabled;
        struct ring_buffer              *buffer;
        spinlock_t                      reader_lock;    /* serialize readers */
        arch_spinlock_t                 lock;
@@ -462,7 +463,6 @@ struct ring_buffer_per_cpu {
        unsigned long                   read;
        u64                             write_stamp;
        u64                             read_stamp;
-       atomic_t                        record_disabled;
 };
 
 struct ring_buffer {
@@ -2242,8 +2242,6 @@ static void trace_recursive_unlock(void)
 
 #endif
 
-static DEFINE_PER_CPU(int, rb_need_resched);
-
 /**
  * ring_buffer_lock_reserve - reserve a part of the buffer
  * @buffer: the ring buffer to reserve from
@@ -2264,13 +2262,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        struct ring_buffer_event *event;
-       int cpu, resched;
+       int cpu;
 
        if (ring_buffer_flags != RB_BUFFERS_ON)
                return NULL;
 
        /* If we are tracing schedule, we don't want to recurse */
-       resched = ftrace_preempt_disable();
+       preempt_disable_notrace();
 
        if (atomic_read(&buffer->record_disabled))
                goto out_nocheck;
@@ -2295,21 +2293,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
        if (!event)
                goto out;
 
-       /*
-        * Need to store resched state on this cpu.
-        * Only the first needs to.
-        */
-
-       if (preempt_count() == 1)
-               per_cpu(rb_need_resched, cpu) = resched;
-
        return event;
 
  out:
        trace_recursive_unlock();
 
  out_nocheck:
-       ftrace_preempt_enable(resched);
+       preempt_enable_notrace();
        return NULL;
 }
 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
@@ -2355,13 +2345,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
 
        trace_recursive_unlock();
 
-       /*
-        * Only the last preempt count needs to restore preemption.
-        */
-       if (preempt_count() == 1)
-               ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
-       else
-               preempt_enable_no_resched_notrace();
+       preempt_enable_notrace();
 
        return 0;
 }
@@ -2469,13 +2453,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
 
        trace_recursive_unlock();
 
-       /*
-        * Only the last preempt count needs to restore preemption.
-        */
-       if (preempt_count() == 1)
-               ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
-       else
-               preempt_enable_no_resched_notrace();
+       preempt_enable_notrace();
 
 }
 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
@@ -2501,12 +2479,12 @@ int ring_buffer_write(struct ring_buffer *buffer,
        struct ring_buffer_event *event;
        void *body;
        int ret = -EBUSY;
-       int cpu, resched;
+       int cpu;
 
        if (ring_buffer_flags != RB_BUFFERS_ON)
                return -EBUSY;
 
-       resched = ftrace_preempt_disable();
+       preempt_disable_notrace();
 
        if (atomic_read(&buffer->record_disabled))
                goto out;
@@ -2536,7 +2514,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
 
        ret = 0;
  out:
-       ftrace_preempt_enable(resched);
+       preempt_enable_notrace();
 
        return ret;
 }
@@ -3007,13 +2985,11 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
 
 static void rb_advance_iter(struct ring_buffer_iter *iter)
 {
-       struct ring_buffer *buffer;
        struct ring_buffer_per_cpu *cpu_buffer;
        struct ring_buffer_event *event;
        unsigned length;
 
        cpu_buffer = iter->cpu_buffer;
-       buffer = cpu_buffer->buffer;
 
        /*
         * Check if we are at the end of the buffer.
@@ -3868,6 +3844,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
                        rpos = reader->read;
                        pos += size;
 
+                       if (rpos >= commit)
+                               break;
+
                        event = rb_reader_event(cpu_buffer);
                        size = rb_event_length(event);
                } while (len > size);