]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux...
authorSteven Rostedt <srostedt@redhat.com>
Tue, 18 May 2010 02:26:53 +0000 (22:26 -0400)
committerSteven Rostedt <rostedt@goodmis.org>
Tue, 18 May 2010 04:35:23 +0000 (00:35 -0400)
Conflicts:
include/trace/ftrace.h
kernel/trace/trace_kprobe.c

Acked-by: Masami Hiramatsu <mhiramat@redhat.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
1  2 
include/linux/ftrace.h
include/linux/sched.h
include/trace/ftrace.h
kernel/sched.c
kernel/trace/trace.h
kernel/trace/trace_events_filter.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_selftest.c

Simple merge
Simple merge
index 4866c109fa9a58230cec916d22acfc313eda458f,882c64832ffe07b774df0d8932eadc8b919ea51a..e0e8daa6767e052d1a95b1bf1c95016de8049e85
@@@ -690,18 -757,17 +690,20 @@@ __attribute__((section("_ftrace_events"
  #undef DECLARE_EVENT_CLASS
  #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)        \
  static notrace void                                                   \
 -perf_trace_templ_##call(struct ftrace_event_call *event_call,         \
 -                      struct pt_regs *__regs, proto)                  \
 +perf_trace_##call(void *__data, proto)                                        \
  {                                                                     \
 +      struct ftrace_event_call *event_call = __data;                  \
        struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
        struct ftrace_raw_##call *entry;                                \
++      struct pt_regs *__regs = &get_cpu_var(perf_trace_regs);         \
        u64 __addr = 0, __count = 1;                                    \
        unsigned long irq_flags;                                        \
-       struct pt_regs *__regs;                                         \
        int __entry_size;                                               \
        int __data_size;                                                \
        int rctx;                                                       \
                                                                        \
++      perf_fetch_caller_regs(__regs, 1);                              \
++                                                                      \
        __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
        __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
                             sizeof(u64));                              \
                                                                        \
        if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE,               \
                      "profile buffer not large enough"))               \
--              return;                                                 \
++              goto out;                                               \
        entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare(     \
 -              __entry_size, event_call->id, &rctx, &irq_flags);       \
 +              __entry_size, event_call->event.type, &rctx, &irq_flags); \
        if (!entry)                                                     \
--              return;                                                 \
++              goto out;                                               \
        tstruct                                                         \
                                                                        \
        { assign; }                                                     \
                                                                        \
-       __regs = &__get_cpu_var(perf_trace_regs);                       \
-       perf_fetch_caller_regs(__regs, 2);                              \
-                                                                       \
        perf_trace_buf_submit(entry, __entry_size, rctx, __addr,        \
                               __count, irq_flags, __regs);             \
++ out:                                                                 \
++      put_cpu_var(perf_trace_regs);                                   \
  }
  
 +/*
 + * This part is compiled out, it is only here as a build time check
 + * to make sure that if the tracepoint handling changes, the
 + * perf probe will fail to compile unless it too is updated.
 + */
  #undef DEFINE_EVENT
  #define DEFINE_EVENT(template, call, proto, args)                     \
 -static notrace void perf_trace_##call(proto)                          \
 +static inline void perf_test_probe_##call(void)                               \
  {                                                                     \
 -      struct ftrace_event_call *event_call = &event_##call;           \
 -      struct pt_regs *__regs = &get_cpu_var(perf_trace_regs);         \
 -                                                                      \
 -      perf_fetch_caller_regs(__regs, 1);                              \
 +      check_trace_callback_type_##call(perf_trace_##template);        \
                                                                        \
 -      perf_trace_templ_##template(event_call, __regs, args);          \
 -                                                                      \
 -      put_cpu_var(perf_trace_regs);                                   \
  }
  
 +
  #undef DEFINE_EVENT_PRINT
  #define DEFINE_EVENT_PRINT(template, name, proto, args, print)        \
        DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
diff --cc kernel/sched.c
index b531d7934083b3216e50d4e4f6536ef7f08285ed,b11b80a3eed36335c5a9ae24ffd8e9e2540114b8..78554dd0d1a4181e585f11b329ed6cf85d3f88e7
@@@ -2054,52 -2076,17 +2064,9 @@@ static bool migrate_task(struct task_st
         * If the task is not on a runqueue (and not running), then
         * the next wake-up will properly place the task.
         */
 -      if (!p->se.on_rq && !task_running(rq, p))
 -              return 0;
 -
 -      init_completion(&req->done);
 -      req->task = p;
 -      req->dest_cpu = dest_cpu;
 -      list_add(&req->list, &rq->migration_queue);
 -
 -      return 1;
 +      return p->se.on_rq || task_running(rq, p);
  }
  
- /*
-  * wait_task_context_switch - wait for a thread to complete at least one
-  *                            context switch.
-  *
-  * @p must not be current.
-  */
- void wait_task_context_switch(struct task_struct *p)
- {
-       unsigned long nvcsw, nivcsw, flags;
-       int running;
-       struct rq *rq;
-       nvcsw   = p->nvcsw;
-       nivcsw  = p->nivcsw;
-       for (;;) {
-               /*
-                * The runqueue is assigned before the actual context
-                * switch. We need to take the runqueue lock.
-                *
-                * We could check initially without the lock but it is
-                * very likely that we need to take the lock in every
-                * iteration.
-                */
-               rq = task_rq_lock(p, &flags);
-               running = task_running(rq, p);
-               task_rq_unlock(rq, &flags);
-               if (likely(!running))
-                       break;
-               /*
-                * The switch count is incremented before the actual
-                * context switch. We thus wait for two switches to be
-                * sure at least one completed.
-                */
-               if ((p->nvcsw - nvcsw) > 1)
-                       break;
-               if ((p->nivcsw - nivcsw) > 1)
-                       break;
-               cpu_relax();
-       }
- }
  /*
   * wait_task_inactive - wait for a thread to unschedule.
   *
Simple merge
Simple merge
index 0e3ded64cdb72081bc52028b550eed76bb05575f,a7514326052b658b88e69029a7754e197b7c2f56..9a082bba95379d89c9aa04a140b6c0b0fd555fa8
@@@ -202,8 -324,9 +324,9 @@@ struct trace_probe 
        unsigned long           nhit;
        unsigned int            flags;  /* For TP_FLAG_* */
        const char              *symbol;        /* symbol name */
 +      struct ftrace_event_class       class;
        struct ftrace_event_call        call;
 -      struct trace_event              event;
+       ssize_t                 size;           /* trace entry size */
        unsigned int            nr_args;
        struct probe_arg        args[];
  };
@@@ -795,11 -901,10 +902,10 @@@ static void probes_seq_stop(struct seq_
  static int probes_seq_show(struct seq_file *m, void *v)
  {
        struct trace_probe *tp = v;
-       int i, ret;
-       char buf[MAX_ARGSTR_LEN + 1];
+       int i;
  
        seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
 -      seq_printf(m, ":%s/%s", tp->call.system, tp->call.name);
 +      seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
  
        if (!tp->symbol)
                seq_printf(m, " 0x%p", tp->rp.kp.addr);
@@@ -958,10 -1059,10 +1060,10 @@@ static __kprobes void kprobe_trace_func
        local_save_flags(irq_flags);
        pc = preempt_count();
  
-       size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
+       size = sizeof(*entry) + tp->size;
  
 -      event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
 -                                                irq_flags, pc);
 +      event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
 +                                                size, irq_flags, pc);
        if (!event)
                return;
  
@@@ -990,10 -1092,10 +1093,10 @@@ static __kprobes void kretprobe_trace_f
        local_save_flags(irq_flags);
        pc = preempt_count();
  
-       size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
+       size = sizeof(*entry) + tp->size;
  
 -      event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
 -                                                irq_flags, pc);
 +      event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
 +                                                size, irq_flags, pc);
        if (!event)
                return;
  
  
  /* Event entry printers */
  enum print_line_t
 -print_kprobe_event(struct trace_iterator *iter, int flags)
 +print_kprobe_event(struct trace_iterator *iter, int flags,
 +                 struct trace_event *event)
  {
-       struct kprobe_trace_entry *field;
+       struct kprobe_trace_entry_head *field;
        struct trace_seq *s = &iter->seq;
 -      struct trace_event *event;
        struct trace_probe *tp;
+       u8 *data;
        int i;
  
-       field = (struct kprobe_trace_entry *)iter->ent;
+       field = (struct kprobe_trace_entry_head *)iter->ent;
 -      event = ftrace_find_event(field->ent.type);
 -      tp = container_of(event, struct trace_probe, event);
 +      tp = container_of(event, struct trace_probe, call.event);
  
        if (!trace_seq_printf(s, "%s: (", tp->call.name))
                goto partial;
@@@ -1044,16 -1149,18 +1149,17 @@@ partial
  }
  
  enum print_line_t
 -print_kretprobe_event(struct trace_iterator *iter, int flags)
 +print_kretprobe_event(struct trace_iterator *iter, int flags,
 +                    struct trace_event *event)
  {
-       struct kretprobe_trace_entry *field;
+       struct kretprobe_trace_entry_head *field;
        struct trace_seq *s = &iter->seq;
 -      struct trace_event *event;
        struct trace_probe *tp;
+       u8 *data;
        int i;
  
-       field = (struct kretprobe_trace_entry *)iter->ent;
+       field = (struct kretprobe_trace_entry_head *)iter->ent;
 -      event = ftrace_find_event(field->ent.type);
 -      tp = container_of(event, struct trace_probe, event);
 +      tp = container_of(event, struct trace_probe, call.event);
  
        if (!trace_seq_printf(s, "%s: (", tp->call.name))
                goto partial;
Simple merge