]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'linus' into perf/core
authorIngo Molnar <mingo@elte.hu>
Wed, 21 Jul 2010 19:43:03 +0000 (21:43 +0200)
committerIngo Molnar <mingo@elte.hu>
Wed, 21 Jul 2010 19:43:06 +0000 (21:43 +0200)
Merge reason: Pick up the latest perf fixes.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
1  2 
MAINTAINERS
Makefile
arch/powerpc/kernel/perf_event.c
arch/sparc/kernel/perf_event.c
arch/x86/kernel/kprobes.c
include/linux/syscalls.h
tools/perf/builtin-report.c
tools/perf/util/callchain.c
tools/perf/util/callchain.h

diff --combined MAINTAINERS
index 99e9b20e8f0e8545c4c9e91d36ba61c910102726,db3d0f5061f9c42e301e420878b0dc5cbe31e6e1..b7f6a88ce12902a787bf891908403f266fa0f781
@@@ -2121,7 -2121,9 +2121,9 @@@ M:      Mauro Carvalho Chehab <mchehab@redha
  L:    linux-edac@vger.kernel.org
  W:    bluesmoke.sourceforge.net
  S:    Maintained
- F:    drivers/edac/i7core_edac.c linux/edac_mce.h drivers/edac/edac_mce.c
+ F:    drivers/edac/i7core_edac.c
+ F:    drivers/edac/edac_mce.c
+ F:    include/linux/edac_mce.h
  
  EDAC-I82975X
  M:    Ranganathan Desikan <ravi@jetztechnologies.com>
@@@ -3378,6 -3380,13 +3380,6 @@@ F:     include/linux/kmemleak.
  F:    mm/kmemleak.c
  F:    mm/kmemleak-test.c
  
 -KMEMTRACE
 -M:    Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
 -S:    Maintained
 -F:    Documentation/trace/kmemtrace.txt
 -F:    include/linux/kmemtrace.h
 -F:    kernel/trace/kmemtrace.c
 -
  KPROBES
  M:    Ananth N Mavinakayanahalli <ananth@in.ibm.com>
  M:    Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
@@@ -5327,6 -5336,7 +5329,7 @@@ T:      git git://git.kernel.org/pub/scm/lin
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6.git
  S:    Maintained
  F:    arch/sparc/
+ F:    drivers/sbus
  
  SPARC SERIAL DRIVERS
  M:    "David S. Miller" <davem@davemloft.net>
@@@ -5644,7 -5654,7 +5647,7 @@@ TRACIN
  M:    Steven Rostedt <rostedt@goodmis.org>
  M:    Frederic Weisbecker <fweisbec@gmail.com>
  M:    Ingo Molnar <mingo@redhat.com>
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git tracing/core
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git perf/core
  S:    Maintained
  F:    Documentation/trace/ftrace.txt
  F:    arch/*/*/*/ftrace.h
diff --combined Makefile
index 0a20cd413b02cc6f4a0aee32076583178461b5b1,037ff4e62ca0ac605c2d63cdaa1cda79220f3cb4..f5787e2e150453a762774b62705e7185cfa7e24d
+++ b/Makefile
@@@ -1,7 -1,7 +1,7 @@@
  VERSION = 2
  PATCHLEVEL = 6
  SUBLEVEL = 35
- EXTRAVERSION = -rc4
+ EXTRAVERSION = -rc5
  NAME = Sheep on Meth
  
  # *DOCUMENTATION*
@@@ -414,7 -414,7 +414,7 @@@ endi
  no-dot-config-targets := clean mrproper distclean \
                         cscope TAGS tags help %docs check% \
                         include/linux/version.h headers_% \
 -                       kernelrelease kernelversion
 +                       kernelrelease kernelversion %src-pkg
  
  config-targets := 0
  mixed-targets  := 0
@@@ -886,7 -886,7 +886,7 @@@ $(vmlinux-dirs): prepare script
  # Store (new) KERNELRELASE string in include/config/kernel.release
  include/config/kernel.release: include/config/auto.conf FORCE
        $(Q)rm -f $@
-       $(Q)echo "$(KERNELVERSION)$$($(CONFIG_SHELL) scripts/setlocalversion $(srctree))" > $@
+       $(Q)echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))" > $@
  
  
  # Things we need to do before we recursively start building the kernel
@@@ -1158,8 -1158,6 +1158,8 @@@ distclean: mrprope
  # rpm target kept for backward compatibility
  package-dir   := $(srctree)/scripts/package
  
 +%src-pkg: FORCE
 +      $(Q)$(MAKE) $(build)=$(package-dir) $@
  %pkg: include/config/kernel.release FORCE
        $(Q)$(MAKE) $(build)=$(package-dir) $@
  rpm: include/config/kernel.release FORCE
index af1d9a7c65d1f769689cf84330dfbd5f19e8e6ee,5c14ffe5125813d19e6793ebf69e132fa19dbffe..d301a30445e09a49cec4a3d4dcf2ea01529934b3
@@@ -410,15 -410,15 +410,15 @@@ static void power_pmu_read(struct perf_
         * Therefore we treat them like NMIs.
         */
        do {
 -              prev = atomic64_read(&event->hw.prev_count);
 +              prev = local64_read(&event->hw.prev_count);
                barrier();
                val = read_pmc(event->hw.idx);
 -      } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
 +      } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
  
        /* The counters are only 32 bits wide */
        delta = (val - prev) & 0xfffffffful;
 -      atomic64_add(delta, &event->count);
 -      atomic64_sub(delta, &event->hw.period_left);
 +      local64_add(delta, &event->count);
 +      local64_sub(delta, &event->hw.period_left);
  }
  
  /*
@@@ -444,10 -444,10 +444,10 @@@ static void freeze_limited_counters(str
                if (!event->hw.idx)
                        continue;
                val = (event->hw.idx == 5) ? pmc5 : pmc6;
 -              prev = atomic64_read(&event->hw.prev_count);
 +              prev = local64_read(&event->hw.prev_count);
                event->hw.idx = 0;
                delta = (val - prev) & 0xfffffffful;
 -              atomic64_add(delta, &event->count);
 +              local64_add(delta, &event->count);
        }
  }
  
@@@ -462,7 -462,7 +462,7 @@@ static void thaw_limited_counters(struc
                event = cpuhw->limited_counter[i];
                event->hw.idx = cpuhw->limited_hwidx[i];
                val = (event->hw.idx == 5) ? pmc5 : pmc6;
 -              atomic64_set(&event->hw.prev_count, val);
 +              local64_set(&event->hw.prev_count, val);
                perf_event_update_userpage(event);
        }
  }
@@@ -666,11 -666,11 +666,11 @@@ void hw_perf_enable(void
                }
                val = 0;
                if (event->hw.sample_period) {
 -                      left = atomic64_read(&event->hw.period_left);
 +                      left = local64_read(&event->hw.period_left);
                        if (left < 0x80000000L)
                                val = 0x80000000L - left;
                }
 -              atomic64_set(&event->hw.prev_count, val);
 +              local64_set(&event->hw.prev_count, val);
                event->hw.idx = idx;
                write_pmc(idx, val);
                perf_event_update_userpage(event);
@@@ -754,7 -754,7 +754,7 @@@ static int power_pmu_enable(struct perf
         * skip the schedulability test here, it will be peformed
         * at commit time(->commit_txn) as a whole
         */
 -      if (cpuhw->group_flag & PERF_EVENT_TXN_STARTED)
 +      if (cpuhw->group_flag & PERF_EVENT_TXN)
                goto nocheck;
  
        if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
@@@ -791,8 -791,11 +791,11 @@@ static void power_pmu_disable(struct pe
        cpuhw = &__get_cpu_var(cpu_hw_events);
        for (i = 0; i < cpuhw->n_events; ++i) {
                if (event == cpuhw->event[i]) {
-                       while (++i < cpuhw->n_events)
+                       while (++i < cpuhw->n_events) {
                                cpuhw->event[i-1] = cpuhw->event[i];
+                               cpuhw->events[i-1] = cpuhw->events[i];
+                               cpuhw->flags[i-1] = cpuhw->flags[i];
+                       }
                        --cpuhw->n_events;
                        ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
                        if (event->hw.idx) {
@@@ -842,8 -845,8 +845,8 @@@ static void power_pmu_unthrottle(struc
        if (left < 0x80000000L)
                val = 0x80000000L - left;
        write_pmc(event->hw.idx, val);
 -      atomic64_set(&event->hw.prev_count, val);
 -      atomic64_set(&event->hw.period_left, left);
 +      local64_set(&event->hw.prev_count, val);
 +      local64_set(&event->hw.period_left, left);
        perf_event_update_userpage(event);
        perf_enable();
        local_irq_restore(flags);
@@@ -858,7 -861,7 +861,7 @@@ void power_pmu_start_txn(const struct p
  {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
  
 -      cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
 +      cpuhw->group_flag |= PERF_EVENT_TXN;
        cpuhw->n_txn_start = cpuhw->n_events;
  }
  
@@@ -871,7 -874,7 +874,7 @@@ void power_pmu_cancel_txn(const struct 
  {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
  
 -      cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
 +      cpuhw->group_flag &= ~PERF_EVENT_TXN;
  }
  
  /*
@@@ -897,7 -900,6 +900,7 @@@ int power_pmu_commit_txn(const struct p
        for (i = cpuhw->n_txn_start; i < n; ++i)
                cpuhw->event[i]->hw.config = cpuhw->events[i];
  
 +      cpuhw->group_flag &= ~PERF_EVENT_TXN;
        return 0;
  }
  
@@@ -1109,7 -1111,7 +1112,7 @@@ const struct pmu *hw_perf_event_init(st
        event->hw.config = events[n];
        event->hw.event_base = cflags[n];
        event->hw.last_period = event->hw.sample_period;
 -      atomic64_set(&event->hw.period_left, event->hw.last_period);
 +      local64_set(&event->hw.period_left, event->hw.last_period);
  
        /*
         * See if we need to reserve the PMU.
@@@ -1147,16 -1149,16 +1150,16 @@@ static void record_and_restart(struct p
        int record = 0;
  
        /* we don't have to worry about interrupts here */
 -      prev = atomic64_read(&event->hw.prev_count);
 +      prev = local64_read(&event->hw.prev_count);
        delta = (val - prev) & 0xfffffffful;
 -      atomic64_add(delta, &event->count);
 +      local64_add(delta, &event->count);
  
        /*
         * See if the total period for this event has expired,
         * and update for the next period.
         */
        val = 0;
 -      left = atomic64_read(&event->hw.period_left) - delta;
 +      left = local64_read(&event->hw.period_left) - delta;
        if (period) {
                if (left <= 0) {
                        left += period;
        }
  
        write_pmc(event->hw.idx, val);
 -      atomic64_set(&event->hw.prev_count, val);
 -      atomic64_set(&event->hw.period_left, left);
 +      local64_set(&event->hw.prev_count, val);
 +      local64_set(&event->hw.period_left, left);
        perf_event_update_userpage(event);
  }
  
index 8a6660da8e080486b86d5e3056f36d5e65210160,44faabc3c02c920bba1b9f12272d1a907895161e..357ced3c33ffac87a992e01b6820a77084cfb8de
@@@ -572,18 -572,18 +572,18 @@@ static u64 sparc_perf_event_update(stru
        s64 delta;
  
  again:
 -      prev_raw_count = atomic64_read(&hwc->prev_count);
 +      prev_raw_count = local64_read(&hwc->prev_count);
        new_raw_count = read_pmc(idx);
  
 -      if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
 +      if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
                             new_raw_count) != prev_raw_count)
                goto again;
  
        delta = (new_raw_count << shift) - (prev_raw_count << shift);
        delta >>= shift;
  
 -      atomic64_add(delta, &event->count);
 -      atomic64_sub(delta, &hwc->period_left);
 +      local64_add(delta, &event->count);
 +      local64_sub(delta, &hwc->period_left);
  
        return new_raw_count;
  }
  static int sparc_perf_event_set_period(struct perf_event *event,
                                       struct hw_perf_event *hwc, int idx)
  {
 -      s64 left = atomic64_read(&hwc->period_left);
 +      s64 left = local64_read(&hwc->period_left);
        s64 period = hwc->sample_period;
        int ret = 0;
  
        if (unlikely(left <= -period)) {
                left = period;
 -              atomic64_set(&hwc->period_left, left);
 +              local64_set(&hwc->period_left, left);
                hwc->last_period = period;
                ret = 1;
        }
  
        if (unlikely(left <= 0)) {
                left += period;
 -              atomic64_set(&hwc->period_left, left);
 +              local64_set(&hwc->period_left, left);
                hwc->last_period = period;
                ret = 1;
        }
        if (left > MAX_PERIOD)
                left = MAX_PERIOD;
  
 -      atomic64_set(&hwc->prev_count, (u64)-left);
 +      local64_set(&hwc->prev_count, (u64)-left);
  
        write_pmc(idx, (u64)(-left) & 0xffffffff);
  
@@@ -657,6 -657,7 +657,7 @@@ static u64 maybe_change_configuration(s
                cpuc->current_idx[i] = idx;
  
                enc = perf_event_get_enc(cpuc->events[i]);
+               pcr &= ~mask_for_index(idx);
                pcr |= event_encoding(enc, idx);
        }
  out:
@@@ -1005,7 -1006,7 +1006,7 @@@ static int sparc_pmu_enable(struct perf
         * skip the schedulability test here, it will be peformed
         * at commit time(->commit_txn) as a whole
         */
 -      if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
 +      if (cpuc->group_flag & PERF_EVENT_TXN)
                goto nocheck;
  
        if (check_excludes(cpuc->event, n0, 1))
@@@ -1087,7 -1088,7 +1088,7 @@@ static int __hw_perf_event_init(struct 
        if (!hwc->sample_period) {
                hwc->sample_period = MAX_PERIOD;
                hwc->last_period = hwc->sample_period;
 -              atomic64_set(&hwc->period_left, hwc->sample_period);
 +              local64_set(&hwc->period_left, hwc->sample_period);
        }
  
        return 0;
@@@ -1102,7 -1103,7 +1103,7 @@@ static void sparc_pmu_start_txn(const s
  {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
  
 -      cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
 +      cpuhw->group_flag |= PERF_EVENT_TXN;
  }
  
  /*
@@@ -1114,7 -1115,7 +1115,7 @@@ static void sparc_pmu_cancel_txn(const 
  {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
  
 -      cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
 +      cpuhw->group_flag &= ~PERF_EVENT_TXN;
  }
  
  /*
@@@ -1137,7 -1138,6 +1138,7 @@@ static int sparc_pmu_commit_txn(const s
        if (sparc_check_constraints(cpuc->event, cpuc->events, n))
                return -EAGAIN;
  
 +      cpuc->group_flag &= ~PERF_EVENT_TXN;
        return 0;
  }
  
index 175f85ceace31a352c85162ed0146e2bcfc080fa,675879b65ce666c91b868c96972ea35f107810f4..1bfb6cf4dd55d67aeeebbbf89d0bb60283a94c94
@@@ -126,22 -126,16 +126,22 @@@ static void __kprobes synthesize_reljum
  }
  
  /*
 - * Check for the REX prefix which can only exist on X86_64
 - * X86_32 always returns 0
 + * Skip the prefixes of the instruction.
   */
 -static int __kprobes is_REX_prefix(kprobe_opcode_t *insn)
 +static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn)
  {
 +      insn_attr_t attr;
 +
 +      attr = inat_get_opcode_attribute((insn_byte_t)*insn);
 +      while (inat_is_legacy_prefix(attr)) {
 +              insn++;
 +              attr = inat_get_opcode_attribute((insn_byte_t)*insn);
 +      }
  #ifdef CONFIG_X86_64
 -      if ((*insn & 0xf0) == 0x40)
 -              return 1;
 +      if (inat_is_rex_prefix(attr))
 +              insn++;
  #endif
 -      return 0;
 +      return insn;
  }
  
  /*
@@@ -278,9 -272,6 +278,9 @@@ static int __kprobes can_probe(unsigne
   */
  static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
  {
 +      /* Skip prefixes */
 +      insn = skip_prefixes(insn);
 +
        switch (*insn) {
        case 0xfa:              /* cli */
        case 0xfb:              /* sti */
                return 1;
        }
  
 -      /*
 -       * on X86_64, 0x40-0x4f are REX prefixes so we need to look
 -       * at the next byte instead.. but of course not recurse infinitely
 -       */
 -      if (is_REX_prefix(insn))
 -              return is_IF_modifier(++insn);
 -
        return 0;
  }
  
@@@ -642,8 -640,8 +642,8 @@@ static int __kprobes kprobe_handler(str
        /* Skip cs, ip, orig_ax and gs. */      \
        "       subl $16, %esp\n"       \
        "       pushl %fs\n"            \
-       "       pushl %ds\n"            \
        "       pushl %es\n"            \
+       "       pushl %ds\n"            \
        "       pushl %eax\n"           \
        "       pushl %ebp\n"           \
        "       pushl %edi\n"           \
@@@ -805,8 -803,9 +805,8 @@@ static void __kprobes resume_execution(
        unsigned long orig_ip = (unsigned long)p->addr;
        kprobe_opcode_t *insn = p->ainsn.insn;
  
 -      /*skip the REX prefix*/
 -      if (is_REX_prefix(insn))
 -              insn++;
 +      /* Skip prefixes */
 +      insn = skip_prefixes(insn);
  
        regs->flags &= ~X86_EFLAGS_TF;
        switch (*insn) {
diff --combined include/linux/syscalls.h
index 7994bd44eb56faf697f21467184b1df82b4fa999,13ebb5413a7982c5fdd1153432794f39a708f362..a6bfd1367d2a8493cbe7534cddbc1e2841e12fa6
@@@ -124,7 -124,8 +124,8 @@@ extern struct trace_event_functions ent
  extern struct trace_event_functions exit_syscall_print_funcs;
  
  #define SYSCALL_TRACE_ENTER_EVENT(sname)                              \
-       static struct syscall_metadata __syscall_meta_##sname;          \
+       static struct syscall_metadata                                  \
+       __attribute__((__aligned__(4))) __syscall_meta_##sname;         \
        static struct ftrace_event_call                                 \
        __attribute__((__aligned__(4))) event_enter_##sname;            \
        static struct ftrace_event_call __used                          \
        }
  
  #define SYSCALL_TRACE_EXIT_EVENT(sname)                                       \
-       static struct syscall_metadata __syscall_meta_##sname;          \
+       static struct syscall_metadata                                  \
+       __attribute__((__aligned__(4))) __syscall_meta_##sname;         \
        static struct ftrace_event_call                                 \
        __attribute__((__aligned__(4))) event_exit_##sname;             \
        static struct ftrace_event_call __used                          \
                .enter_event    = &event_enter_##sname,         \
                .exit_event     = &event_exit_##sname,          \
                .enter_fields   = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \
 -              .exit_fields    = LIST_HEAD_INIT(__syscall_meta_##sname.exit_fields), \
        };
  
  #define SYSCALL_DEFINE0(sname)                                        \
                .enter_event    = &event_enter__##sname,        \
                .exit_event     = &event_exit__##sname,         \
                .enter_fields   = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \
 -              .exit_fields    = LIST_HEAD_INIT(__syscall_meta__##sname.exit_fields), \
        };                                                      \
        asmlinkage long sys_##sname(void)
  #else
index 371a3c995806ade919deb62a95dcc0ffa5329a91,fd7407c7205c7be809ce4c3b048e68ab9b7b9487..ce42bbaa252d822be55aec11fa6392fd67793798
@@@ -107,7 -107,7 +107,7 @@@ static int perf_session__add_hist_entry
                goto out_free_syms;
        err = 0;
        if (symbol_conf.use_callchain) {
-               err = append_chain(he->callchain, data->callchain, syms);
+               err = append_chain(he->callchain, data->callchain, syms, data->period);
                if (err)
                        goto out_free_syms;
        }
@@@ -155,7 -155,30 +155,7 @@@ static int process_sample_event(event_
        struct addr_location al;
        struct perf_event_attr *attr;
  
 -      event__parse_sample(event, session->sample_type, &data);
 -
 -      dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
 -                  data.pid, data.tid, data.ip, data.period);
 -
 -      if (session->sample_type & PERF_SAMPLE_CALLCHAIN) {
 -              unsigned int i;
 -
 -              dump_printf("... chain: nr:%Lu\n", data.callchain->nr);
 -
 -              if (!ip_callchain__valid(data.callchain, event)) {
 -                      pr_debug("call-chain problem with event, "
 -                               "skipping it.\n");
 -                      return 0;
 -              }
 -
 -              if (dump_trace) {
 -                      for (i = 0; i < data.callchain->nr; i++)
 -                              dump_printf("..... %2d: %016Lx\n",
 -                                          i, data.callchain->ips[i]);
 -              }
 -      }
 -
 -      if (event__preprocess_sample(event, session, &al, NULL) < 0) {
 +      if (event__preprocess_sample(event, session, &al, &data, NULL) < 0) {
                fprintf(stderr, "problem processing %d event, skipping it.\n",
                        event->header.type);
                return -1;
index e63c997d6c1b8ada3d338f386d35a45c5d658e5a,52c777e451ed8ebc049f39edfb651bfe848d66cc..f231f43424d27930a286cb52902c21cb4534a068
@@@ -18,7 -18,7 +18,7 @@@
  #include "util.h"
  #include "callchain.h"
  
 -bool ip_callchain__valid(struct ip_callchain *chain, event_t *event)
 +bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event)
  {
        unsigned int chain_size = event->header.size;
        chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
@@@ -230,7 -230,7 +230,7 @@@ fill_node(struct callchain_node *node, 
  
  static void
  add_child(struct callchain_node *parent, struct resolved_chain *chain,
-         int start)
+         int start, u64 period)
  {
        struct callchain_node *new;
  
        fill_node(new, chain, start);
  
        new->children_hit = 0;
-       new->hit = 1;
+       new->hit = period;
  }
  
  /*
   */
  static void
  split_add_child(struct callchain_node *parent, struct resolved_chain *chain,
-               struct callchain_list *to_split, int idx_parents, int idx_local)
+               struct callchain_list *to_split, int idx_parents, int idx_local,
+               u64 period)
  {
        struct callchain_node *new;
        struct list_head *old_tail;
        /* create a new child for the new branch if any */
        if (idx_total < chain->nr) {
                parent->hit = 0;
-               add_child(parent, chain, idx_total);
-               parent->children_hit++;
+               add_child(parent, chain, idx_total, period);
+               parent->children_hit += period;
        } else {
-               parent->hit = 1;
+               parent->hit = period;
        }
  }
  
  static int
  __append_chain(struct callchain_node *root, struct resolved_chain *chain,
-              unsigned int start);
+              unsigned int start, u64 period);
  
  static void
  __append_chain_children(struct callchain_node *root,
                        struct resolved_chain *chain,
-                       unsigned int start)
+                       unsigned int start, u64 period)
  {
        struct callchain_node *rnode;
  
        /* lookup in childrens */
        chain_for_each_child(rnode, root) {
-               unsigned int ret = __append_chain(rnode, chain, start);
+               unsigned int ret = __append_chain(rnode, chain, start, period);
  
                if (!ret)
                        goto inc_children_hit;
        }
        /* nothing in children, add to the current node */
-       add_child(root, chain, start);
+       add_child(root, chain, start, period);
  
  inc_children_hit:
-       root->children_hit++;
+       root->children_hit += period;
  }
  
  static int
  __append_chain(struct callchain_node *root, struct resolved_chain *chain,
-              unsigned int start)
+              unsigned int start, u64 period)
  {
        struct callchain_list *cnode;
        unsigned int i = start;
  
        /* we match only a part of the node. Split it and add the new chain */
        if (i - start < root->val_nr) {
-               split_add_child(root, chain, cnode, start, i - start);
+               split_add_child(root, chain, cnode, start, i - start, period);
                return 0;
        }
  
        /* we match 100% of the path, increment the hit */
        if (i - start == root->val_nr && i == chain->nr) {
-               root->hit++;
+               root->hit += period;
                return 0;
        }
  
        /* We match the node and still have a part remaining */
-       __append_chain_children(root, chain, i);
+       __append_chain_children(root, chain, i, period);
  
        return 0;
  }
@@@ -380,7 -381,7 +381,7 @@@ static void filter_context(struct ip_ca
  
  
  int append_chain(struct callchain_node *root, struct ip_callchain *chain,
-                struct map_symbol *syms)
+                struct map_symbol *syms, u64 period)
  {
        struct resolved_chain *filtered;
  
        if (!filtered->nr)
                goto end;
  
-       __append_chain_children(root, filtered, 0);
+       __append_chain_children(root, filtered, 0, period);
  end:
        free(filtered);
  
index 809850fb75fbcf4d27f92fee8c9f8ea0ae5df320,f2e9ee164bd8ce7bcf2bb7c7af5901e6c78ebcc3..624a96c636fdbc36a472ecaadf5fcb72c226bf38
@@@ -49,6 -49,9 +49,9 @@@ static inline void callchain_init(struc
        INIT_LIST_HEAD(&node->brothers);
        INIT_LIST_HEAD(&node->children);
        INIT_LIST_HEAD(&node->val);
+       node->parent = NULL;
+       node->hit = 0;
  }
  
  static inline u64 cumul_hits(struct callchain_node *node)
@@@ -58,7 -61,7 +61,7 @@@
  
  int register_callchain_param(struct callchain_param *param);
  int append_chain(struct callchain_node *root, struct ip_callchain *chain,
-                struct map_symbol *syms);
+                struct map_symbol *syms, u64 period);
  
 -bool ip_callchain__valid(struct ip_callchain *chain, event_t *event);
 +bool ip_callchain__valid(struct ip_callchain *chain, const event_t *event);
  #endif        /* __PERF_CALLCHAIN_H */