]> bbs.cooldavid.org Git - net-next-2.6.git/blob - arch/x86/kernel/cpu/perf_event.c
perf, x86: Add PEBS infrastructure
[net-next-2.6.git] / arch / x86 / kernel / cpu / perf_event.c
1 /*
2  * Performance events x86 architecture code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/cpu.h>
26 #include <linux/bitops.h>
27
28 #include <asm/apic.h>
29 #include <asm/stacktrace.h>
30 #include <asm/nmi.h>
31
32 static u64 perf_event_mask __read_mostly;
33
34 struct event_constraint {
35         union {
36                 unsigned long   idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
37                 u64             idxmsk64;
38         };
39         u64     code;
40         u64     cmask;
41         int     weight;
42 };
43
44 struct amd_nb {
45         int nb_id;  /* NorthBridge id */
46         int refcnt; /* reference count */
47         struct perf_event *owners[X86_PMC_IDX_MAX];
48         struct event_constraint event_constraints[X86_PMC_IDX_MAX];
49 };
50
51 struct cpu_hw_events {
52         /*
53          * Generic x86 PMC bits
54          */
55         struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
56         unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
57         unsigned long           interrupts;
58         int                     enabled;
59
60         int                     n_events;
61         int                     n_added;
62         int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
63         u64                     tags[X86_PMC_IDX_MAX];
64         struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
65
66         /*
67          * Intel DebugStore bits
68          */
69         struct debug_store      *ds;
70         u64                     pebs_enabled;
71
72         /*
73          * AMD specific bits
74          */
75         struct amd_nb           *amd_nb;
76 };
77
78 #define __EVENT_CONSTRAINT(c, n, m, w) {\
79         { .idxmsk64 = (n) },            \
80         .code = (c),                    \
81         .cmask = (m),                   \
82         .weight = (w),                  \
83 }
84
85 #define EVENT_CONSTRAINT(c, n, m)       \
86         __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
87
88 /*
89  * Constraint on the Event code.
90  */
91 #define INTEL_EVENT_CONSTRAINT(c, n)    \
92         EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
93
94 /*
95  * Constraint on the Event code + UMask + fixed-mask
96  */
97 #define FIXED_EVENT_CONSTRAINT(c, n)    \
98         EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK)
99
100 /*
101  * Constraint on the Event code + UMask
102  */
103 #define PEBS_EVENT_CONSTRAINT(c, n)     \
104         EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
105
106 #define EVENT_CONSTRAINT_END            \
107         EVENT_CONSTRAINT(0, 0, 0)
108
109 #define for_each_event_constraint(e, c) \
110         for ((e) = (c); (e)->cmask; (e)++)
111
112 /*
113  * struct x86_pmu - generic x86 pmu
114  */
115 struct x86_pmu {
116         /*
117          * Generic x86 PMC bits
118          */
119         const char      *name;
120         int             version;
121         int             (*handle_irq)(struct pt_regs *);
122         void            (*disable_all)(void);
123         void            (*enable_all)(void);
124         void            (*enable)(struct perf_event *);
125         void            (*disable)(struct perf_event *);
126         unsigned        eventsel;
127         unsigned        perfctr;
128         u64             (*event_map)(int);
129         u64             (*raw_event)(u64);
130         int             max_events;
131         int             num_events;
132         int             num_events_fixed;
133         int             event_bits;
134         u64             event_mask;
135         int             apic;
136         u64             max_period;
137         struct event_constraint *
138                         (*get_event_constraints)(struct cpu_hw_events *cpuc,
139                                                  struct perf_event *event);
140
141         void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
142                                                  struct perf_event *event);
143         struct event_constraint *event_constraints;
144
145         void            (*cpu_prepare)(int cpu);
146         void            (*cpu_starting)(int cpu);
147         void            (*cpu_dying)(int cpu);
148         void            (*cpu_dead)(int cpu);
149
150         /*
151          * Intel Arch Perfmon v2+
152          */
153         u64             intel_ctrl;
154
155         /*
156          * Intel DebugStore bits
157          */
158         int             bts, pebs;
159         int             pebs_record_size;
160         void            (*drain_pebs)(struct pt_regs *regs);
161         struct event_constraint *pebs_constraints;
162 };
163
164 static struct x86_pmu x86_pmu __read_mostly;
165
166 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
167         .enabled = 1,
168 };
169
170 static int x86_perf_event_set_period(struct perf_event *event);
171
172 /*
173  * Generalized hw caching related hw_event table, filled
174  * in on a per model basis. A value of 0 means
175  * 'not supported', -1 means 'hw_event makes no sense on
176  * this CPU', any other value means the raw hw_event
177  * ID.
178  */
179
180 #define C(x) PERF_COUNT_HW_CACHE_##x
181
182 static u64 __read_mostly hw_cache_event_ids
183                                 [PERF_COUNT_HW_CACHE_MAX]
184                                 [PERF_COUNT_HW_CACHE_OP_MAX]
185                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
186
187 /*
188  * Propagate event elapsed time into the generic event.
189  * Can only be executed on the CPU where the event is active.
190  * Returns the delta events processed.
191  */
192 static u64
193 x86_perf_event_update(struct perf_event *event)
194 {
195         struct hw_perf_event *hwc = &event->hw;
196         int shift = 64 - x86_pmu.event_bits;
197         u64 prev_raw_count, new_raw_count;
198         int idx = hwc->idx;
199         s64 delta;
200
201         if (idx == X86_PMC_IDX_FIXED_BTS)
202                 return 0;
203
204         /*
205          * Careful: an NMI might modify the previous event value.
206          *
207          * Our tactic to handle this is to first atomically read and
208          * exchange a new raw count - then add that new-prev delta
209          * count to the generic event atomically:
210          */
211 again:
212         prev_raw_count = atomic64_read(&hwc->prev_count);
213         rdmsrl(hwc->event_base + idx, new_raw_count);
214
215         if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
216                                         new_raw_count) != prev_raw_count)
217                 goto again;
218
219         /*
220          * Now we have the new raw value and have updated the prev
221          * timestamp already. We can now calculate the elapsed delta
222          * (event-)time and add that to the generic event.
223          *
224          * Careful, not all hw sign-extends above the physical width
225          * of the count.
226          */
227         delta = (new_raw_count << shift) - (prev_raw_count << shift);
228         delta >>= shift;
229
230         atomic64_add(delta, &event->count);
231         atomic64_sub(delta, &hwc->period_left);
232
233         return new_raw_count;
234 }
235
236 static atomic_t active_events;
237 static DEFINE_MUTEX(pmc_reserve_mutex);
238
239 static bool reserve_pmc_hardware(void)
240 {
241 #ifdef CONFIG_X86_LOCAL_APIC
242         int i;
243
244         if (nmi_watchdog == NMI_LOCAL_APIC)
245                 disable_lapic_nmi_watchdog();
246
247         for (i = 0; i < x86_pmu.num_events; i++) {
248                 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
249                         goto perfctr_fail;
250         }
251
252         for (i = 0; i < x86_pmu.num_events; i++) {
253                 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
254                         goto eventsel_fail;
255         }
256 #endif
257
258         return true;
259
260 #ifdef CONFIG_X86_LOCAL_APIC
261 eventsel_fail:
262         for (i--; i >= 0; i--)
263                 release_evntsel_nmi(x86_pmu.eventsel + i);
264
265         i = x86_pmu.num_events;
266
267 perfctr_fail:
268         for (i--; i >= 0; i--)
269                 release_perfctr_nmi(x86_pmu.perfctr + i);
270
271         if (nmi_watchdog == NMI_LOCAL_APIC)
272                 enable_lapic_nmi_watchdog();
273
274         return false;
275 #endif
276 }
277
278 static void release_pmc_hardware(void)
279 {
280 #ifdef CONFIG_X86_LOCAL_APIC
281         int i;
282
283         for (i = 0; i < x86_pmu.num_events; i++) {
284                 release_perfctr_nmi(x86_pmu.perfctr + i);
285                 release_evntsel_nmi(x86_pmu.eventsel + i);
286         }
287
288         if (nmi_watchdog == NMI_LOCAL_APIC)
289                 enable_lapic_nmi_watchdog();
290 #endif
291 }
292
293 static int reserve_ds_buffers(void);
294 static void release_ds_buffers(void);
295
296 static void hw_perf_event_destroy(struct perf_event *event)
297 {
298         if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
299                 release_pmc_hardware();
300                 release_ds_buffers();
301                 mutex_unlock(&pmc_reserve_mutex);
302         }
303 }
304
305 static inline int x86_pmu_initialized(void)
306 {
307         return x86_pmu.handle_irq != NULL;
308 }
309
310 static inline int
311 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
312 {
313         unsigned int cache_type, cache_op, cache_result;
314         u64 config, val;
315
316         config = attr->config;
317
318         cache_type = (config >>  0) & 0xff;
319         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
320                 return -EINVAL;
321
322         cache_op = (config >>  8) & 0xff;
323         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
324                 return -EINVAL;
325
326         cache_result = (config >> 16) & 0xff;
327         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
328                 return -EINVAL;
329
330         val = hw_cache_event_ids[cache_type][cache_op][cache_result];
331
332         if (val == 0)
333                 return -ENOENT;
334
335         if (val == -1)
336                 return -EINVAL;
337
338         hwc->config |= val;
339
340         return 0;
341 }
342
343 /*
344  * Setup the hardware configuration for a given attr_type
345  */
346 static int __hw_perf_event_init(struct perf_event *event)
347 {
348         struct perf_event_attr *attr = &event->attr;
349         struct hw_perf_event *hwc = &event->hw;
350         u64 config;
351         int err;
352
353         if (!x86_pmu_initialized())
354                 return -ENODEV;
355
356         err = 0;
357         if (!atomic_inc_not_zero(&active_events)) {
358                 mutex_lock(&pmc_reserve_mutex);
359                 if (atomic_read(&active_events) == 0) {
360                         if (!reserve_pmc_hardware())
361                                 err = -EBUSY;
362                         else
363                                 err = reserve_ds_buffers();
364                 }
365                 if (!err)
366                         atomic_inc(&active_events);
367                 mutex_unlock(&pmc_reserve_mutex);
368         }
369         if (err)
370                 return err;
371
372         event->destroy = hw_perf_event_destroy;
373
374         /*
375          * Generate PMC IRQs:
376          * (keep 'enabled' bit clear for now)
377          */
378         hwc->config = ARCH_PERFMON_EVENTSEL_INT;
379
380         hwc->idx = -1;
381         hwc->last_cpu = -1;
382         hwc->last_tag = ~0ULL;
383
384         /*
385          * Count user and OS events unless requested not to.
386          */
387         if (!attr->exclude_user)
388                 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
389         if (!attr->exclude_kernel)
390                 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
391
392         if (!hwc->sample_period) {
393                 hwc->sample_period = x86_pmu.max_period;
394                 hwc->last_period = hwc->sample_period;
395                 atomic64_set(&hwc->period_left, hwc->sample_period);
396         } else {
397                 /*
398                  * If we have a PMU initialized but no APIC
399                  * interrupts, we cannot sample hardware
400                  * events (user-space has to fall back and
401                  * sample via a hrtimer based software event):
402                  */
403                 if (!x86_pmu.apic)
404                         return -EOPNOTSUPP;
405         }
406
407         /*
408          * Raw hw_event type provide the config in the hw_event structure
409          */
410         if (attr->type == PERF_TYPE_RAW) {
411                 hwc->config |= x86_pmu.raw_event(attr->config);
412                 if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) &&
413                     perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
414                         return -EACCES;
415                 return 0;
416         }
417
418         if (attr->type == PERF_TYPE_HW_CACHE)
419                 return set_ext_hw_attr(hwc, attr);
420
421         if (attr->config >= x86_pmu.max_events)
422                 return -EINVAL;
423
424         /*
425          * The generic map:
426          */
427         config = x86_pmu.event_map(attr->config);
428
429         if (config == 0)
430                 return -ENOENT;
431
432         if (config == -1LL)
433                 return -EINVAL;
434
435         /*
436          * Branch tracing:
437          */
438         if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
439             (hwc->sample_period == 1)) {
440                 /* BTS is not supported by this architecture. */
441                 if (!x86_pmu.bts)
442                         return -EOPNOTSUPP;
443
444                 /* BTS is currently only allowed for user-mode. */
445                 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
446                         return -EOPNOTSUPP;
447         }
448
449         hwc->config |= config;
450
451         return 0;
452 }
453
454 static void x86_pmu_disable_all(void)
455 {
456         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
457         int idx;
458
459         for (idx = 0; idx < x86_pmu.num_events; idx++) {
460                 u64 val;
461
462                 if (!test_bit(idx, cpuc->active_mask))
463                         continue;
464                 rdmsrl(x86_pmu.eventsel + idx, val);
465                 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
466                         continue;
467                 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
468                 wrmsrl(x86_pmu.eventsel + idx, val);
469         }
470 }
471
472 void hw_perf_disable(void)
473 {
474         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
475
476         if (!x86_pmu_initialized())
477                 return;
478
479         if (!cpuc->enabled)
480                 return;
481
482         cpuc->n_added = 0;
483         cpuc->enabled = 0;
484         barrier();
485
486         x86_pmu.disable_all();
487 }
488
489 static void x86_pmu_enable_all(void)
490 {
491         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
492         int idx;
493
494         for (idx = 0; idx < x86_pmu.num_events; idx++) {
495                 struct perf_event *event = cpuc->events[idx];
496                 u64 val;
497
498                 if (!test_bit(idx, cpuc->active_mask))
499                         continue;
500
501                 val = event->hw.config;
502                 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
503                 wrmsrl(x86_pmu.eventsel + idx, val);
504         }
505 }
506
507 static const struct pmu pmu;
508
509 static inline int is_x86_event(struct perf_event *event)
510 {
511         return event->pmu == &pmu;
512 }
513
514 static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
515 {
516         struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
517         unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
518         int i, j, w, wmax, num = 0;
519         struct hw_perf_event *hwc;
520
521         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
522
523         for (i = 0; i < n; i++) {
524                 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
525                 constraints[i] = c;
526         }
527
528         /*
529          * fastpath, try to reuse previous register
530          */
531         for (i = 0; i < n; i++) {
532                 hwc = &cpuc->event_list[i]->hw;
533                 c = constraints[i];
534
535                 /* never assigned */
536                 if (hwc->idx == -1)
537                         break;
538
539                 /* constraint still honored */
540                 if (!test_bit(hwc->idx, c->idxmsk))
541                         break;
542
543                 /* not already used */
544                 if (test_bit(hwc->idx, used_mask))
545                         break;
546
547                 __set_bit(hwc->idx, used_mask);
548                 if (assign)
549                         assign[i] = hwc->idx;
550         }
551         if (i == n)
552                 goto done;
553
554         /*
555          * begin slow path
556          */
557
558         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
559
560         /*
561          * weight = number of possible counters
562          *
563          * 1    = most constrained, only works on one counter
564          * wmax = least constrained, works on any counter
565          *
566          * assign events to counters starting with most
567          * constrained events.
568          */
569         wmax = x86_pmu.num_events;
570
571         /*
572          * when fixed event counters are present,
573          * wmax is incremented by 1 to account
574          * for one more choice
575          */
576         if (x86_pmu.num_events_fixed)
577                 wmax++;
578
579         for (w = 1, num = n; num && w <= wmax; w++) {
580                 /* for each event */
581                 for (i = 0; num && i < n; i++) {
582                         c = constraints[i];
583                         hwc = &cpuc->event_list[i]->hw;
584
585                         if (c->weight != w)
586                                 continue;
587
588                         for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
589                                 if (!test_bit(j, used_mask))
590                                         break;
591                         }
592
593                         if (j == X86_PMC_IDX_MAX)
594                                 break;
595
596                         __set_bit(j, used_mask);
597
598                         if (assign)
599                                 assign[i] = j;
600                         num--;
601                 }
602         }
603 done:
604         /*
605          * scheduling failed or is just a simulation,
606          * free resources if necessary
607          */
608         if (!assign || num) {
609                 for (i = 0; i < n; i++) {
610                         if (x86_pmu.put_event_constraints)
611                                 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
612                 }
613         }
614         return num ? -ENOSPC : 0;
615 }
616
617 /*
618  * dogrp: true if must collect siblings events (group)
619  * returns total number of events and error code
620  */
621 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
622 {
623         struct perf_event *event;
624         int n, max_count;
625
626         max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
627
628         /* current number of events already accepted */
629         n = cpuc->n_events;
630
631         if (is_x86_event(leader)) {
632                 if (n >= max_count)
633                         return -ENOSPC;
634                 cpuc->event_list[n] = leader;
635                 n++;
636         }
637         if (!dogrp)
638                 return n;
639
640         list_for_each_entry(event, &leader->sibling_list, group_entry) {
641                 if (!is_x86_event(event) ||
642                     event->state <= PERF_EVENT_STATE_OFF)
643                         continue;
644
645                 if (n >= max_count)
646                         return -ENOSPC;
647
648                 cpuc->event_list[n] = event;
649                 n++;
650         }
651         return n;
652 }
653
654 static inline void x86_assign_hw_event(struct perf_event *event,
655                                 struct cpu_hw_events *cpuc, int i)
656 {
657         struct hw_perf_event *hwc = &event->hw;
658
659         hwc->idx = cpuc->assign[i];
660         hwc->last_cpu = smp_processor_id();
661         hwc->last_tag = ++cpuc->tags[i];
662
663         if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
664                 hwc->config_base = 0;
665                 hwc->event_base = 0;
666         } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
667                 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
668                 /*
669                  * We set it so that event_base + idx in wrmsr/rdmsr maps to
670                  * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
671                  */
672                 hwc->event_base =
673                         MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
674         } else {
675                 hwc->config_base = x86_pmu.eventsel;
676                 hwc->event_base  = x86_pmu.perfctr;
677         }
678 }
679
680 static inline int match_prev_assignment(struct hw_perf_event *hwc,
681                                         struct cpu_hw_events *cpuc,
682                                         int i)
683 {
684         return hwc->idx == cpuc->assign[i] &&
685                 hwc->last_cpu == smp_processor_id() &&
686                 hwc->last_tag == cpuc->tags[i];
687 }
688
689 static int x86_pmu_start(struct perf_event *event);
690 static void x86_pmu_stop(struct perf_event *event);
691
692 void hw_perf_enable(void)
693 {
694         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
695         struct perf_event *event;
696         struct hw_perf_event *hwc;
697         int i;
698
699         if (!x86_pmu_initialized())
700                 return;
701
702         if (cpuc->enabled)
703                 return;
704
705         if (cpuc->n_added) {
706                 int n_running = cpuc->n_events - cpuc->n_added;
707                 /*
708                  * apply assignment obtained either from
709                  * hw_perf_group_sched_in() or x86_pmu_enable()
710                  *
711                  * step1: save events moving to new counters
712                  * step2: reprogram moved events into new counters
713                  */
714                 for (i = 0; i < n_running; i++) {
715
716                         event = cpuc->event_list[i];
717                         hwc = &event->hw;
718
719                         /*
720                          * we can avoid reprogramming counter if:
721                          * - assigned same counter as last time
722                          * - running on same CPU as last time
723                          * - no other event has used the counter since
724                          */
725                         if (hwc->idx == -1 ||
726                             match_prev_assignment(hwc, cpuc, i))
727                                 continue;
728
729                         x86_pmu_stop(event);
730
731                         hwc->idx = -1;
732                 }
733
734                 for (i = 0; i < cpuc->n_events; i++) {
735
736                         event = cpuc->event_list[i];
737                         hwc = &event->hw;
738
739                         if (i < n_running &&
740                             match_prev_assignment(hwc, cpuc, i))
741                                 continue;
742
743                         if (hwc->idx == -1)
744                                 x86_assign_hw_event(event, cpuc, i);
745
746                         x86_pmu_start(event);
747                 }
748                 cpuc->n_added = 0;
749                 perf_events_lapic_init();
750         }
751
752         cpuc->enabled = 1;
753         barrier();
754
755         x86_pmu.enable_all();
756 }
757
758 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
759 {
760         (void)checking_wrmsrl(hwc->config_base + hwc->idx,
761                               hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
762 }
763
764 static inline void x86_pmu_disable_event(struct perf_event *event)
765 {
766         struct hw_perf_event *hwc = &event->hw;
767         (void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config);
768 }
769
770 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
771
772 /*
773  * Set the next IRQ period, based on the hwc->period_left value.
774  * To be called with the event disabled in hw:
775  */
776 static int
777 x86_perf_event_set_period(struct perf_event *event)
778 {
779         struct hw_perf_event *hwc = &event->hw;
780         s64 left = atomic64_read(&hwc->period_left);
781         s64 period = hwc->sample_period;
782         int err, ret = 0, idx = hwc->idx;
783
784         if (idx == X86_PMC_IDX_FIXED_BTS)
785                 return 0;
786
787         /*
788          * If we are way outside a reasonable range then just skip forward:
789          */
790         if (unlikely(left <= -period)) {
791                 left = period;
792                 atomic64_set(&hwc->period_left, left);
793                 hwc->last_period = period;
794                 ret = 1;
795         }
796
797         if (unlikely(left <= 0)) {
798                 left += period;
799                 atomic64_set(&hwc->period_left, left);
800                 hwc->last_period = period;
801                 ret = 1;
802         }
803         /*
804          * Quirk: certain CPUs dont like it if just 1 hw_event is left:
805          */
806         if (unlikely(left < 2))
807                 left = 2;
808
809         if (left > x86_pmu.max_period)
810                 left = x86_pmu.max_period;
811
812         per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
813
814         /*
815          * The hw event starts counting from this event offset,
816          * mark it to be able to extra future deltas:
817          */
818         atomic64_set(&hwc->prev_count, (u64)-left);
819
820         err = checking_wrmsrl(hwc->event_base + idx,
821                              (u64)(-left) & x86_pmu.event_mask);
822
823         perf_event_update_userpage(event);
824
825         return ret;
826 }
827
828 static void x86_pmu_enable_event(struct perf_event *event)
829 {
830         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
831         if (cpuc->enabled)
832                 __x86_pmu_enable_event(&event->hw);
833 }
834
835 /*
836  * activate a single event
837  *
838  * The event is added to the group of enabled events
839  * but only if it can be scehduled with existing events.
840  *
841  * Called with PMU disabled. If successful and return value 1,
842  * then guaranteed to call perf_enable() and hw_perf_enable()
843  */
844 static int x86_pmu_enable(struct perf_event *event)
845 {
846         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
847         struct hw_perf_event *hwc;
848         int assign[X86_PMC_IDX_MAX];
849         int n, n0, ret;
850
851         hwc = &event->hw;
852
853         n0 = cpuc->n_events;
854         n = collect_events(cpuc, event, false);
855         if (n < 0)
856                 return n;
857
858         ret = x86_schedule_events(cpuc, n, assign);
859         if (ret)
860                 return ret;
861         /*
862          * copy new assignment, now we know it is possible
863          * will be used by hw_perf_enable()
864          */
865         memcpy(cpuc->assign, assign, n*sizeof(int));
866
867         cpuc->n_events = n;
868         cpuc->n_added += n - n0;
869
870         return 0;
871 }
872
873 static int x86_pmu_start(struct perf_event *event)
874 {
875         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
876         int idx = event->hw.idx;
877
878         if (idx == -1)
879                 return -EAGAIN;
880
881         x86_perf_event_set_period(event);
882         cpuc->events[idx] = event;
883         __set_bit(idx, cpuc->active_mask);
884         x86_pmu.enable(event);
885         perf_event_update_userpage(event);
886
887         return 0;
888 }
889
890 static void x86_pmu_unthrottle(struct perf_event *event)
891 {
892         int ret = x86_pmu_start(event);
893         WARN_ON_ONCE(ret);
894 }
895
896 void perf_event_print_debug(void)
897 {
898         u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
899         u64 pebs;
900         struct cpu_hw_events *cpuc;
901         unsigned long flags;
902         int cpu, idx;
903
904         if (!x86_pmu.num_events)
905                 return;
906
907         local_irq_save(flags);
908
909         cpu = smp_processor_id();
910         cpuc = &per_cpu(cpu_hw_events, cpu);
911
912         if (x86_pmu.version >= 2) {
913                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
914                 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
915                 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
916                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
917                 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
918
919                 pr_info("\n");
920                 pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
921                 pr_info("CPU#%d: status:     %016llx\n", cpu, status);
922                 pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
923                 pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
924                 pr_info("CPU#%d: pebs:       %016llx\n", cpu, pebs);
925         }
926         pr_info("CPU#%d: active:       %016llx\n", cpu, *(u64 *)cpuc->active_mask);
927
928         for (idx = 0; idx < x86_pmu.num_events; idx++) {
929                 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
930                 rdmsrl(x86_pmu.perfctr  + idx, pmc_count);
931
932                 prev_left = per_cpu(pmc_prev_left[idx], cpu);
933
934                 pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
935                         cpu, idx, pmc_ctrl);
936                 pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
937                         cpu, idx, pmc_count);
938                 pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
939                         cpu, idx, prev_left);
940         }
941         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
942                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
943
944                 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
945                         cpu, idx, pmc_count);
946         }
947         local_irq_restore(flags);
948 }
949
950 static void x86_pmu_stop(struct perf_event *event)
951 {
952         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
953         struct hw_perf_event *hwc = &event->hw;
954         int idx = hwc->idx;
955
956         if (!__test_and_clear_bit(idx, cpuc->active_mask))
957                 return;
958
959         x86_pmu.disable(event);
960
961         /*
962          * Drain the remaining delta count out of a event
963          * that we are disabling:
964          */
965         x86_perf_event_update(event);
966
967         cpuc->events[idx] = NULL;
968 }
969
970 static void x86_pmu_disable(struct perf_event *event)
971 {
972         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
973         int i;
974
975         x86_pmu_stop(event);
976
977         for (i = 0; i < cpuc->n_events; i++) {
978                 if (event == cpuc->event_list[i]) {
979
980                         if (x86_pmu.put_event_constraints)
981                                 x86_pmu.put_event_constraints(cpuc, event);
982
983                         while (++i < cpuc->n_events)
984                                 cpuc->event_list[i-1] = cpuc->event_list[i];
985
986                         --cpuc->n_events;
987                         break;
988                 }
989         }
990         perf_event_update_userpage(event);
991 }
992
993 static int x86_pmu_handle_irq(struct pt_regs *regs)
994 {
995         struct perf_sample_data data;
996         struct cpu_hw_events *cpuc;
997         struct perf_event *event;
998         struct hw_perf_event *hwc;
999         int idx, handled = 0;
1000         u64 val;
1001
1002         perf_sample_data_init(&data, 0);
1003
1004         cpuc = &__get_cpu_var(cpu_hw_events);
1005
1006         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1007                 if (!test_bit(idx, cpuc->active_mask))
1008                         continue;
1009
1010                 event = cpuc->events[idx];
1011                 hwc = &event->hw;
1012
1013                 val = x86_perf_event_update(event);
1014                 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1015                         continue;
1016
1017                 /*
1018                  * event overflow
1019                  */
1020                 handled         = 1;
1021                 data.period     = event->hw.last_period;
1022
1023                 if (!x86_perf_event_set_period(event))
1024                         continue;
1025
1026                 if (perf_event_overflow(event, 1, &data, regs))
1027                         x86_pmu_stop(event);
1028         }
1029
1030         if (handled)
1031                 inc_irq_stat(apic_perf_irqs);
1032
1033         return handled;
1034 }
1035
1036 void smp_perf_pending_interrupt(struct pt_regs *regs)
1037 {
1038         irq_enter();
1039         ack_APIC_irq();
1040         inc_irq_stat(apic_pending_irqs);
1041         perf_event_do_pending();
1042         irq_exit();
1043 }
1044
1045 void set_perf_event_pending(void)
1046 {
1047 #ifdef CONFIG_X86_LOCAL_APIC
1048         if (!x86_pmu.apic || !x86_pmu_initialized())
1049                 return;
1050
1051         apic->send_IPI_self(LOCAL_PENDING_VECTOR);
1052 #endif
1053 }
1054
1055 void perf_events_lapic_init(void)
1056 {
1057 #ifdef CONFIG_X86_LOCAL_APIC
1058         if (!x86_pmu.apic || !x86_pmu_initialized())
1059                 return;
1060
1061         /*
1062          * Always use NMI for PMU
1063          */
1064         apic_write(APIC_LVTPC, APIC_DM_NMI);
1065 #endif
1066 }
1067
1068 static int __kprobes
1069 perf_event_nmi_handler(struct notifier_block *self,
1070                          unsigned long cmd, void *__args)
1071 {
1072         struct die_args *args = __args;
1073         struct pt_regs *regs;
1074
1075         if (!atomic_read(&active_events))
1076                 return NOTIFY_DONE;
1077
1078         switch (cmd) {
1079         case DIE_NMI:
1080         case DIE_NMI_IPI:
1081                 break;
1082
1083         default:
1084                 return NOTIFY_DONE;
1085         }
1086
1087         regs = args->regs;
1088
1089 #ifdef CONFIG_X86_LOCAL_APIC
1090         apic_write(APIC_LVTPC, APIC_DM_NMI);
1091 #endif
1092         /*
1093          * Can't rely on the handled return value to say it was our NMI, two
1094          * events could trigger 'simultaneously' raising two back-to-back NMIs.
1095          *
1096          * If the first NMI handles both, the latter will be empty and daze
1097          * the CPU.
1098          */
1099         x86_pmu.handle_irq(regs);
1100
1101         return NOTIFY_STOP;
1102 }
1103
1104 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1105         .notifier_call          = perf_event_nmi_handler,
1106         .next                   = NULL,
1107         .priority               = 1
1108 };
1109
1110 static struct event_constraint unconstrained;
1111 static struct event_constraint emptyconstraint;
1112
1113 static struct event_constraint *
1114 x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1115 {
1116         struct event_constraint *c;
1117
1118         if (x86_pmu.event_constraints) {
1119                 for_each_event_constraint(c, x86_pmu.event_constraints) {
1120                         if ((event->hw.config & c->cmask) == c->code)
1121                                 return c;
1122                 }
1123         }
1124
1125         return &unconstrained;
1126 }
1127
1128 static int x86_event_sched_in(struct perf_event *event,
1129                           struct perf_cpu_context *cpuctx)
1130 {
1131         int ret = 0;
1132
1133         event->state = PERF_EVENT_STATE_ACTIVE;
1134         event->oncpu = smp_processor_id();
1135         event->tstamp_running += event->ctx->time - event->tstamp_stopped;
1136
1137         if (!is_x86_event(event))
1138                 ret = event->pmu->enable(event);
1139
1140         if (!ret && !is_software_event(event))
1141                 cpuctx->active_oncpu++;
1142
1143         if (!ret && event->attr.exclusive)
1144                 cpuctx->exclusive = 1;
1145
1146         return ret;
1147 }
1148
1149 static void x86_event_sched_out(struct perf_event *event,
1150                             struct perf_cpu_context *cpuctx)
1151 {
1152         event->state = PERF_EVENT_STATE_INACTIVE;
1153         event->oncpu = -1;
1154
1155         if (!is_x86_event(event))
1156                 event->pmu->disable(event);
1157
1158         event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
1159
1160         if (!is_software_event(event))
1161                 cpuctx->active_oncpu--;
1162
1163         if (event->attr.exclusive || !cpuctx->active_oncpu)
1164                 cpuctx->exclusive = 0;
1165 }
1166
1167 /*
1168  * Called to enable a whole group of events.
1169  * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
1170  * Assumes the caller has disabled interrupts and has
1171  * frozen the PMU with hw_perf_save_disable.
1172  *
1173  * called with PMU disabled. If successful and return value 1,
1174  * then guaranteed to call perf_enable() and hw_perf_enable()
1175  */
1176 int hw_perf_group_sched_in(struct perf_event *leader,
1177                struct perf_cpu_context *cpuctx,
1178                struct perf_event_context *ctx)
1179 {
1180         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1181         struct perf_event *sub;
1182         int assign[X86_PMC_IDX_MAX];
1183         int n0, n1, ret;
1184
1185         /* n0 = total number of events */
1186         n0 = collect_events(cpuc, leader, true);
1187         if (n0 < 0)
1188                 return n0;
1189
1190         ret = x86_schedule_events(cpuc, n0, assign);
1191         if (ret)
1192                 return ret;
1193
1194         ret = x86_event_sched_in(leader, cpuctx);
1195         if (ret)
1196                 return ret;
1197
1198         n1 = 1;
1199         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1200                 if (sub->state > PERF_EVENT_STATE_OFF) {
1201                         ret = x86_event_sched_in(sub, cpuctx);
1202                         if (ret)
1203                                 goto undo;
1204                         ++n1;
1205                 }
1206         }
1207         /*
1208          * copy new assignment, now we know it is possible
1209          * will be used by hw_perf_enable()
1210          */
1211         memcpy(cpuc->assign, assign, n0*sizeof(int));
1212
1213         cpuc->n_events  = n0;
1214         cpuc->n_added  += n1;
1215         ctx->nr_active += n1;
1216
1217         /*
1218          * 1 means successful and events are active
1219          * This is not quite true because we defer
1220          * actual activation until hw_perf_enable() but
1221          * this way we* ensure caller won't try to enable
1222          * individual events
1223          */
1224         return 1;
1225 undo:
1226         x86_event_sched_out(leader, cpuctx);
1227         n0  = 1;
1228         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1229                 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
1230                         x86_event_sched_out(sub, cpuctx);
1231                         if (++n0 == n1)
1232                                 break;
1233                 }
1234         }
1235         return ret;
1236 }
1237
1238 #include "perf_event_amd.c"
1239 #include "perf_event_p6.c"
1240 #include "perf_event_intel_ds.c"
1241 #include "perf_event_intel.c"
1242
1243 static int __cpuinit
1244 x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1245 {
1246         unsigned int cpu = (long)hcpu;
1247
1248         switch (action & ~CPU_TASKS_FROZEN) {
1249         case CPU_UP_PREPARE:
1250                 if (x86_pmu.cpu_prepare)
1251                         x86_pmu.cpu_prepare(cpu);
1252                 break;
1253
1254         case CPU_STARTING:
1255                 if (x86_pmu.cpu_starting)
1256                         x86_pmu.cpu_starting(cpu);
1257                 break;
1258
1259         case CPU_DYING:
1260                 if (x86_pmu.cpu_dying)
1261                         x86_pmu.cpu_dying(cpu);
1262                 break;
1263
1264         case CPU_DEAD:
1265                 if (x86_pmu.cpu_dead)
1266                         x86_pmu.cpu_dead(cpu);
1267                 break;
1268
1269         default:
1270                 break;
1271         }
1272
1273         return NOTIFY_OK;
1274 }
1275
1276 static void __init pmu_check_apic(void)
1277 {
1278         if (cpu_has_apic)
1279                 return;
1280
1281         x86_pmu.apic = 0;
1282         pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1283         pr_info("no hardware sampling interrupt available.\n");
1284 }
1285
1286 void __init init_hw_perf_events(void)
1287 {
1288         struct event_constraint *c;
1289         int err;
1290
1291         pr_info("Performance Events: ");
1292
1293         switch (boot_cpu_data.x86_vendor) {
1294         case X86_VENDOR_INTEL:
1295                 err = intel_pmu_init();
1296                 break;
1297         case X86_VENDOR_AMD:
1298                 err = amd_pmu_init();
1299                 break;
1300         default:
1301                 return;
1302         }
1303         if (err != 0) {
1304                 pr_cont("no PMU driver, software events only.\n");
1305                 return;
1306         }
1307
1308         pmu_check_apic();
1309
1310         pr_cont("%s PMU driver.\n", x86_pmu.name);
1311
1312         if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
1313                 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
1314                      x86_pmu.num_events, X86_PMC_MAX_GENERIC);
1315                 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
1316         }
1317         perf_event_mask = (1 << x86_pmu.num_events) - 1;
1318         perf_max_events = x86_pmu.num_events;
1319
1320         if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
1321                 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
1322                      x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
1323                 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
1324         }
1325
1326         perf_event_mask |=
1327                 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
1328         x86_pmu.intel_ctrl = perf_event_mask;
1329
1330         perf_events_lapic_init();
1331         register_die_notifier(&perf_event_nmi_notifier);
1332
1333         unconstrained = (struct event_constraint)
1334                 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
1335                                    0, x86_pmu.num_events);
1336
1337         if (x86_pmu.event_constraints) {
1338                 for_each_event_constraint(c, x86_pmu.event_constraints) {
1339                         if (c->cmask != INTEL_ARCH_FIXED_MASK)
1340                                 continue;
1341
1342                         c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1;
1343                         c->weight += x86_pmu.num_events;
1344                 }
1345         }
1346
1347         pr_info("... version:                %d\n",     x86_pmu.version);
1348         pr_info("... bit width:              %d\n",     x86_pmu.event_bits);
1349         pr_info("... generic registers:      %d\n",     x86_pmu.num_events);
1350         pr_info("... value mask:             %016Lx\n", x86_pmu.event_mask);
1351         pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
1352         pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_events_fixed);
1353         pr_info("... event mask:             %016Lx\n", perf_event_mask);
1354
1355         perf_cpu_notifier(x86_pmu_notifier);
1356 }
1357
1358 static inline void x86_pmu_read(struct perf_event *event)
1359 {
1360         x86_perf_event_update(event);
1361 }
1362
1363 static const struct pmu pmu = {
1364         .enable         = x86_pmu_enable,
1365         .disable        = x86_pmu_disable,
1366         .start          = x86_pmu_start,
1367         .stop           = x86_pmu_stop,
1368         .read           = x86_pmu_read,
1369         .unthrottle     = x86_pmu_unthrottle,
1370 };
1371
1372 /*
1373  * validate that we can schedule this event
1374  */
1375 static int validate_event(struct perf_event *event)
1376 {
1377         struct cpu_hw_events *fake_cpuc;
1378         struct event_constraint *c;
1379         int ret = 0;
1380
1381         fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1382         if (!fake_cpuc)
1383                 return -ENOMEM;
1384
1385         c = x86_pmu.get_event_constraints(fake_cpuc, event);
1386
1387         if (!c || !c->weight)
1388                 ret = -ENOSPC;
1389
1390         if (x86_pmu.put_event_constraints)
1391                 x86_pmu.put_event_constraints(fake_cpuc, event);
1392
1393         kfree(fake_cpuc);
1394
1395         return ret;
1396 }
1397
1398 /*
1399  * validate a single event group
1400  *
1401  * validation include:
1402  *      - check events are compatible which each other
1403  *      - events do not compete for the same counter
1404  *      - number of events <= number of counters
1405  *
1406  * validation ensures the group can be loaded onto the
1407  * PMU if it was the only group available.
1408  */
1409 static int validate_group(struct perf_event *event)
1410 {
1411         struct perf_event *leader = event->group_leader;
1412         struct cpu_hw_events *fake_cpuc;
1413         int ret, n;
1414
1415         ret = -ENOMEM;
1416         fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1417         if (!fake_cpuc)
1418                 goto out;
1419
1420         /*
1421          * the event is not yet connected with its
1422          * siblings therefore we must first collect
1423          * existing siblings, then add the new event
1424          * before we can simulate the scheduling
1425          */
1426         ret = -ENOSPC;
1427         n = collect_events(fake_cpuc, leader, true);
1428         if (n < 0)
1429                 goto out_free;
1430
1431         fake_cpuc->n_events = n;
1432         n = collect_events(fake_cpuc, event, false);
1433         if (n < 0)
1434                 goto out_free;
1435
1436         fake_cpuc->n_events = n;
1437
1438         ret = x86_schedule_events(fake_cpuc, n, NULL);
1439
1440 out_free:
1441         kfree(fake_cpuc);
1442 out:
1443         return ret;
1444 }
1445
1446 const struct pmu *hw_perf_event_init(struct perf_event *event)
1447 {
1448         const struct pmu *tmp;
1449         int err;
1450
1451         err = __hw_perf_event_init(event);
1452         if (!err) {
1453                 /*
1454                  * we temporarily connect event to its pmu
1455                  * such that validate_group() can classify
1456                  * it as an x86 event using is_x86_event()
1457                  */
1458                 tmp = event->pmu;
1459                 event->pmu = &pmu;
1460
1461                 if (event->group_leader != event)
1462                         err = validate_group(event);
1463                 else
1464                         err = validate_event(event);
1465
1466                 event->pmu = tmp;
1467         }
1468         if (err) {
1469                 if (event->destroy)
1470                         event->destroy(event);
1471                 return ERR_PTR(err);
1472         }
1473
1474         return &pmu;
1475 }
1476
1477 /*
1478  * callchain support
1479  */
1480
1481 static inline
1482 void callchain_store(struct perf_callchain_entry *entry, u64 ip)
1483 {
1484         if (entry->nr < PERF_MAX_STACK_DEPTH)
1485                 entry->ip[entry->nr++] = ip;
1486 }
1487
1488 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
1489 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
1490
1491
1492 static void
1493 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1494 {
1495         /* Ignore warnings */
1496 }
1497
1498 static void backtrace_warning(void *data, char *msg)
1499 {
1500         /* Ignore warnings */
1501 }
1502
1503 static int backtrace_stack(void *data, char *name)
1504 {
1505         return 0;
1506 }
1507
1508 static void backtrace_address(void *data, unsigned long addr, int reliable)
1509 {
1510         struct perf_callchain_entry *entry = data;
1511
1512         if (reliable)
1513                 callchain_store(entry, addr);
1514 }
1515
1516 static const struct stacktrace_ops backtrace_ops = {
1517         .warning                = backtrace_warning,
1518         .warning_symbol         = backtrace_warning_symbol,
1519         .stack                  = backtrace_stack,
1520         .address                = backtrace_address,
1521         .walk_stack             = print_context_stack_bp,
1522 };
1523
1524 #include "../dumpstack.h"
1525
1526 static void
1527 perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1528 {
1529         callchain_store(entry, PERF_CONTEXT_KERNEL);
1530         callchain_store(entry, regs->ip);
1531
1532         dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
1533 }
1534
1535 /*
1536  * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
1537  */
1538 static unsigned long
1539 copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
1540 {
1541         unsigned long offset, addr = (unsigned long)from;
1542         int type = in_nmi() ? KM_NMI : KM_IRQ0;
1543         unsigned long size, len = 0;
1544         struct page *page;
1545         void *map;
1546         int ret;
1547
1548         do {
1549                 ret = __get_user_pages_fast(addr, 1, 0, &page);
1550                 if (!ret)
1551                         break;
1552
1553                 offset = addr & (PAGE_SIZE - 1);
1554                 size = min(PAGE_SIZE - offset, n - len);
1555
1556                 map = kmap_atomic(page, type);
1557                 memcpy(to, map+offset, size);
1558                 kunmap_atomic(map, type);
1559                 put_page(page);
1560
1561                 len  += size;
1562                 to   += size;
1563                 addr += size;
1564
1565         } while (len < n);
1566
1567         return len;
1568 }
1569
1570 static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
1571 {
1572         unsigned long bytes;
1573
1574         bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
1575
1576         return bytes == sizeof(*frame);
1577 }
1578
1579 static void
1580 perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1581 {
1582         struct stack_frame frame;
1583         const void __user *fp;
1584
1585         if (!user_mode(regs))
1586                 regs = task_pt_regs(current);
1587
1588         fp = (void __user *)regs->bp;
1589
1590         callchain_store(entry, PERF_CONTEXT_USER);
1591         callchain_store(entry, regs->ip);
1592
1593         while (entry->nr < PERF_MAX_STACK_DEPTH) {
1594                 frame.next_frame             = NULL;
1595                 frame.return_address = 0;
1596
1597                 if (!copy_stack_frame(fp, &frame))
1598                         break;
1599
1600                 if ((unsigned long)fp < regs->sp)
1601                         break;
1602
1603                 callchain_store(entry, frame.return_address);
1604                 fp = frame.next_frame;
1605         }
1606 }
1607
1608 static void
1609 perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
1610 {
1611         int is_user;
1612
1613         if (!regs)
1614                 return;
1615
1616         is_user = user_mode(regs);
1617
1618         if (is_user && current->state != TASK_RUNNING)
1619                 return;
1620
1621         if (!is_user)
1622                 perf_callchain_kernel(regs, entry);
1623
1624         if (current->mm)
1625                 perf_callchain_user(regs, entry);
1626 }
1627
1628 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1629 {
1630         struct perf_callchain_entry *entry;
1631
1632         if (in_nmi())
1633                 entry = &__get_cpu_var(pmc_nmi_entry);
1634         else
1635                 entry = &__get_cpu_var(pmc_irq_entry);
1636
1637         entry->nr = 0;
1638
1639         perf_do_callchain(regs, entry);
1640
1641         return entry;
1642 }