Now that we rely on the number of handled overflows, ensure all
handle_irq implementations actually return the right number.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Don Zickus <dzickus@redhat.com>
Cc: peterz@infradead.org
Cc: robert.richter@amd.com
Cc: gorcunov@gmail.com
Cc: fweisbec@gmail.com
Cc: ying.huang@intel.com
Cc: ming.m.lin@intel.com
Cc: eranian@google.com
LKML-Reference: <
1283454469-1909-4-git-send-email-dzickus@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
struct cpu_hw_events *cpuc;
int bit, loops;
u64 status;
struct cpu_hw_events *cpuc;
int bit, loops;
u64 status;
perf_sample_data_init(&data, 0);
perf_sample_data_init(&data, 0);
/*
* PEBS overflow sets bit 62 in the global status register
*/
/*
* PEBS overflow sets bit 62 in the global status register
*/
- if (__test_and_clear_bit(62, (unsigned long *)&status))
+ if (__test_and_clear_bit(62, (unsigned long *)&status)) {
+ handled++;
x86_pmu.drain_pebs(regs);
x86_pmu.drain_pebs(regs);
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[bit];
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[bit];
if (!test_bit(bit, cpuc->active_mask))
continue;
if (!test_bit(bit, cpuc->active_mask))
continue;
done:
intel_pmu_enable_all(0);
done:
intel_pmu_enable_all(0);
}
static struct event_constraint *
}
static struct event_constraint *
inc_irq_stat(apic_perf_irqs);
}
inc_irq_stat(apic_perf_irqs);
}