]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
tracing: Reduce latency and remove percpu trace_seq
authorLai Jiangshan <laijs@cn.fujitsu.com>
Thu, 3 Jun 2010 10:26:24 +0000 (18:26 +0800)
committerSteven Rostedt <rostedt@goodmis.org>
Wed, 21 Jul 2010 02:05:34 +0000 (22:05 -0400)
__print_flags() and __print_symbolic() use percpu trace_seq:

1) Its memory is allocated at compile time, it wastes memory if we don't use tracing.
2) It is percpu data and it wastes more memory for multi-cpus system.
3) It disables preemption when it executes its core routine
   "trace_seq_printf(s, "%s: ", #call);" and introduces latency.

So we move this trace_seq to struct trace_iterator.

Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
LKML-Reference: <4C078350.7090106@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
include/linux/ftrace_event.h
include/trace/ftrace.h
kernel/trace/trace_output.c

index 2b7b1395b4d50864fb0246b01febb10f942ac772..02b8b24f8f51f0e37156731ba94da19ba2d19131 100644 (file)
@@ -11,8 +11,6 @@ struct trace_array;
 struct tracer;
 struct dentry;
 
-DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq);
-
 struct trace_print_flags {
        unsigned long           mask;
        const char              *name;
@@ -58,6 +56,9 @@ struct trace_iterator {
        struct ring_buffer_iter *buffer_iter[NR_CPUS];
        unsigned long           iter_flags;
 
+       /* trace_seq for __print_flags() and __print_symbolic() etc. */
+       struct trace_seq        tmp_seq;
+
        /* The below is zeroed out in pipe_read */
        struct trace_seq        seq;
        struct trace_entry      *ent;
index 55c1fd1bbc3d7470af712db6a3cf772bcb899537..fb783d94fc545a6b6f3cd291d9f0a693d6497577 100644 (file)
  *     struct trace_seq *s = &iter->seq;
  *     struct ftrace_raw_<call> *field; <-- defined in stage 1
  *     struct trace_entry *entry;
- *     struct trace_seq *p;
+ *     struct trace_seq *p = &iter->tmp_seq;
  *     int ret;
  *
  *     entry = iter->ent;
  *
  *     field = (typeof(field))entry;
  *
- *     p = &get_cpu_var(ftrace_event_seq);
  *     trace_seq_init(p);
  *     ret = trace_seq_printf(s, "%s: ", <call>);
  *     if (ret)
  *             ret = trace_seq_printf(s, <TP_printk> "\n");
- *     put_cpu();
  *     if (!ret)
  *             return TRACE_TYPE_PARTIAL_LINE;
  *
@@ -216,7 +214,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags,    \
        struct trace_seq *s = &iter->seq;                               \
        struct ftrace_raw_##call *field;                                \
        struct trace_entry *entry;                                      \
-       struct trace_seq *p;                                            \
+       struct trace_seq *p = &iter->tmp_seq;                           \
        int ret;                                                        \
                                                                        \
        event = container_of(trace_event, struct ftrace_event_call,     \
@@ -231,12 +229,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags,  \
                                                                        \
        field = (typeof(field))entry;                                   \
                                                                        \
-       p = &get_cpu_var(ftrace_event_seq);                             \
        trace_seq_init(p);                                              \
        ret = trace_seq_printf(s, "%s: ", event->name);                 \
        if (ret)                                                        \
                ret = trace_seq_printf(s, print);                       \
-       put_cpu();                                                      \
        if (!ret)                                                       \
                return TRACE_TYPE_PARTIAL_LINE;                         \
                                                                        \
@@ -255,7 +251,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags,    \
        struct trace_seq *s = &iter->seq;                               \
        struct ftrace_raw_##template *field;                            \
        struct trace_entry *entry;                                      \
-       struct trace_seq *p;                                            \
+       struct trace_seq *p = &iter->tmp_seq;                           \
        int ret;                                                        \
                                                                        \
        entry = iter->ent;                                              \
@@ -267,12 +263,10 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags,  \
                                                                        \
        field = (typeof(field))entry;                                   \
                                                                        \
-       p = &get_cpu_var(ftrace_event_seq);                             \
        trace_seq_init(p);                                              \
        ret = trace_seq_printf(s, "%s: ", #call);                       \
        if (ret)                                                        \
                ret = trace_seq_printf(s, print);                       \
-       put_cpu();                                                      \
        if (!ret)                                                       \
                return TRACE_TYPE_PARTIAL_LINE;                         \
                                                                        \
index 57c1b45964703d8c0af75c1ffed5c3e31df66737..1ba64d3cc567b9702b13072565e5a2611c389f15 100644 (file)
@@ -16,9 +16,6 @@
 
 DECLARE_RWSEM(trace_event_mutex);
 
-DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
-EXPORT_PER_CPU_SYMBOL(ftrace_event_seq);
-
 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
 
 static int next_event_type = __TRACE_LAST_TYPE + 1;