2 * Performance events core code:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright � 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/slab.h>
19 #include <linux/sysfs.h>
20 #include <linux/dcache.h>
21 #include <linux/percpu.h>
22 #include <linux/ptrace.h>
23 #include <linux/vmstat.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hardirq.h>
26 #include <linux/rculist.h>
27 #include <linux/uaccess.h>
28 #include <linux/syscalls.h>
29 #include <linux/anon_inodes.h>
30 #include <linux/kernel_stat.h>
31 #include <linux/perf_event.h>
32 #include <linux/ftrace_event.h>
33 #include <linux/hw_breakpoint.h>
35 #include <asm/irq_regs.h>
38 * Each CPU has a list of per CPU events:
40 static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
42 int perf_max_events __read_mostly = 1;
43 static int perf_reserved_percpu __read_mostly;
44 static int perf_overcommit __read_mostly = 1;
46 static atomic_t nr_events __read_mostly;
47 static atomic_t nr_mmap_events __read_mostly;
48 static atomic_t nr_comm_events __read_mostly;
49 static atomic_t nr_task_events __read_mostly;
52 * perf event paranoia level:
53 * -1 - not paranoid at all
54 * 0 - disallow raw tracepoint access for unpriv
55 * 1 - disallow cpu events for unpriv
56 * 2 - disallow kernel profiling for unpriv
58 int sysctl_perf_event_paranoid __read_mostly = 1;
60 int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
63 * max perf event sample rate
65 int sysctl_perf_event_sample_rate __read_mostly = 100000;
67 static atomic64_t perf_event_id;
70 * Lock for (sysadmin-configurable) event reservations:
72 static DEFINE_SPINLOCK(perf_resource_lock);
75 * Architecture provided APIs - weak aliases:
77 extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
82 void __weak hw_perf_disable(void) { barrier(); }
83 void __weak hw_perf_enable(void) { barrier(); }
86 hw_perf_group_sched_in(struct perf_event *group_leader,
87 struct perf_cpu_context *cpuctx,
88 struct perf_event_context *ctx)
93 void __weak perf_event_print_debug(void) { }
95 static DEFINE_PER_CPU(int, perf_disable_count);
97 void perf_disable(void)
99 if (!__get_cpu_var(perf_disable_count)++)
103 void perf_enable(void)
105 if (!--__get_cpu_var(perf_disable_count))
109 static void get_ctx(struct perf_event_context *ctx)
111 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
114 static void free_ctx(struct rcu_head *head)
116 struct perf_event_context *ctx;
118 ctx = container_of(head, struct perf_event_context, rcu_head);
122 static void put_ctx(struct perf_event_context *ctx)
124 if (atomic_dec_and_test(&ctx->refcount)) {
126 put_ctx(ctx->parent_ctx);
128 put_task_struct(ctx->task);
129 call_rcu(&ctx->rcu_head, free_ctx);
133 static void unclone_ctx(struct perf_event_context *ctx)
135 if (ctx->parent_ctx) {
136 put_ctx(ctx->parent_ctx);
137 ctx->parent_ctx = NULL;
142 * If we inherit events we want to return the parent event id
145 static u64 primary_event_id(struct perf_event *event)
150 id = event->parent->id;
156 * Get the perf_event_context for a task and lock it.
157 * This has to cope with with the fact that until it is locked,
158 * the context could get moved to another task.
160 static struct perf_event_context *
161 perf_lock_task_context(struct task_struct *task, unsigned long *flags)
163 struct perf_event_context *ctx;
167 ctx = rcu_dereference(task->perf_event_ctxp);
170 * If this context is a clone of another, it might
171 * get swapped for another underneath us by
172 * perf_event_task_sched_out, though the
173 * rcu_read_lock() protects us from any context
174 * getting freed. Lock the context and check if it
175 * got swapped before we could get the lock, and retry
176 * if so. If we locked the right context, then it
177 * can't get swapped on us any more.
179 raw_spin_lock_irqsave(&ctx->lock, *flags);
180 if (ctx != rcu_dereference(task->perf_event_ctxp)) {
181 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
185 if (!atomic_inc_not_zero(&ctx->refcount)) {
186 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
195 * Get the context for a task and increment its pin_count so it
196 * can't get swapped to another task. This also increments its
197 * reference count so that the context can't get freed.
199 static struct perf_event_context *perf_pin_task_context(struct task_struct *task)
201 struct perf_event_context *ctx;
204 ctx = perf_lock_task_context(task, &flags);
207 raw_spin_unlock_irqrestore(&ctx->lock, flags);
212 static void perf_unpin_context(struct perf_event_context *ctx)
216 raw_spin_lock_irqsave(&ctx->lock, flags);
218 raw_spin_unlock_irqrestore(&ctx->lock, flags);
222 static inline u64 perf_clock(void)
224 return cpu_clock(raw_smp_processor_id());
228 * Update the record of the current time in a context.
230 static void update_context_time(struct perf_event_context *ctx)
232 u64 now = perf_clock();
234 ctx->time += now - ctx->timestamp;
235 ctx->timestamp = now;
239 * Update the total_time_enabled and total_time_running fields for a event.
241 static void update_event_times(struct perf_event *event)
243 struct perf_event_context *ctx = event->ctx;
246 if (event->state < PERF_EVENT_STATE_INACTIVE ||
247 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
253 run_end = event->tstamp_stopped;
255 event->total_time_enabled = run_end - event->tstamp_enabled;
257 if (event->state == PERF_EVENT_STATE_INACTIVE)
258 run_end = event->tstamp_stopped;
262 event->total_time_running = run_end - event->tstamp_running;
265 static struct list_head *
266 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
268 if (event->attr.pinned)
269 return &ctx->pinned_groups;
271 return &ctx->flexible_groups;
275 * Add a event from the lists for its context.
276 * Must be called with ctx->mutex and ctx->lock held.
279 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
281 struct perf_event *group_leader = event->group_leader;
284 * Depending on whether it is a standalone or sibling event,
285 * add it straight to the context's event list, or to the group
286 * leader's sibling list:
288 if (group_leader == event) {
289 struct list_head *list;
291 if (is_software_event(event))
292 event->group_flags |= PERF_GROUP_SOFTWARE;
294 list = ctx_group_list(event, ctx);
295 list_add_tail(&event->group_entry, list);
297 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
298 !is_software_event(event))
299 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
301 list_add_tail(&event->group_entry, &group_leader->sibling_list);
302 group_leader->nr_siblings++;
305 list_add_rcu(&event->event_entry, &ctx->event_list);
307 if (event->attr.inherit_stat)
312 * Remove a event from the lists for its context.
313 * Must be called with ctx->mutex and ctx->lock held.
316 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
318 struct perf_event *sibling, *tmp;
320 if (list_empty(&event->group_entry))
323 if (event->attr.inherit_stat)
326 list_del_init(&event->group_entry);
327 list_del_rcu(&event->event_entry);
329 if (event->group_leader != event)
330 event->group_leader->nr_siblings--;
332 update_event_times(event);
335 * If event was in error state, then keep it
336 * that way, otherwise bogus counts will be
337 * returned on read(). The only way to get out
338 * of error state is by explicit re-enabling
341 if (event->state > PERF_EVENT_STATE_OFF)
342 event->state = PERF_EVENT_STATE_OFF;
344 if (event->state > PERF_EVENT_STATE_FREE)
348 * If this was a group event with sibling events then
349 * upgrade the siblings to singleton events by adding them
350 * to the context list directly:
352 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
353 struct list_head *list;
355 list = ctx_group_list(event, ctx);
356 list_move_tail(&sibling->group_entry, list);
357 sibling->group_leader = sibling;
359 /* Inherit group flags from the previous leader */
360 sibling->group_flags = event->group_flags;
365 event_sched_out(struct perf_event *event,
366 struct perf_cpu_context *cpuctx,
367 struct perf_event_context *ctx)
369 if (event->state != PERF_EVENT_STATE_ACTIVE)
372 event->state = PERF_EVENT_STATE_INACTIVE;
373 if (event->pending_disable) {
374 event->pending_disable = 0;
375 event->state = PERF_EVENT_STATE_OFF;
377 event->tstamp_stopped = ctx->time;
378 event->pmu->disable(event);
381 if (!is_software_event(event))
382 cpuctx->active_oncpu--;
384 if (event->attr.exclusive || !cpuctx->active_oncpu)
385 cpuctx->exclusive = 0;
389 group_sched_out(struct perf_event *group_event,
390 struct perf_cpu_context *cpuctx,
391 struct perf_event_context *ctx)
393 struct perf_event *event;
395 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
398 event_sched_out(group_event, cpuctx, ctx);
401 * Schedule out siblings (if any):
403 list_for_each_entry(event, &group_event->sibling_list, group_entry)
404 event_sched_out(event, cpuctx, ctx);
406 if (group_event->attr.exclusive)
407 cpuctx->exclusive = 0;
411 * Cross CPU call to remove a performance event
413 * We disable the event on the hardware level first. After that we
414 * remove it from the context list.
416 static void __perf_event_remove_from_context(void *info)
418 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
419 struct perf_event *event = info;
420 struct perf_event_context *ctx = event->ctx;
423 * If this is a task context, we need to check whether it is
424 * the current task context of this cpu. If not it has been
425 * scheduled out before the smp call arrived.
427 if (ctx->task && cpuctx->task_ctx != ctx)
430 raw_spin_lock(&ctx->lock);
432 * Protect the list operation against NMI by disabling the
433 * events on a global level.
437 event_sched_out(event, cpuctx, ctx);
439 list_del_event(event, ctx);
443 * Allow more per task events with respect to the
446 cpuctx->max_pertask =
447 min(perf_max_events - ctx->nr_events,
448 perf_max_events - perf_reserved_percpu);
452 raw_spin_unlock(&ctx->lock);
457 * Remove the event from a task's (or a CPU's) list of events.
459 * Must be called with ctx->mutex held.
461 * CPU events are removed with a smp call. For task events we only
462 * call when the task is on a CPU.
464 * If event->ctx is a cloned context, callers must make sure that
465 * every task struct that event->ctx->task could possibly point to
466 * remains valid. This is OK when called from perf_release since
467 * that only calls us on the top-level context, which can't be a clone.
468 * When called from perf_event_exit_task, it's OK because the
469 * context has been detached from its task.
471 static void perf_event_remove_from_context(struct perf_event *event)
473 struct perf_event_context *ctx = event->ctx;
474 struct task_struct *task = ctx->task;
478 * Per cpu events are removed via an smp call and
479 * the removal is always successful.
481 smp_call_function_single(event->cpu,
482 __perf_event_remove_from_context,
488 task_oncpu_function_call(task, __perf_event_remove_from_context,
491 raw_spin_lock_irq(&ctx->lock);
493 * If the context is active we need to retry the smp call.
495 if (ctx->nr_active && !list_empty(&event->group_entry)) {
496 raw_spin_unlock_irq(&ctx->lock);
501 * The lock prevents that this context is scheduled in so we
502 * can remove the event safely, if the call above did not
505 if (!list_empty(&event->group_entry))
506 list_del_event(event, ctx);
507 raw_spin_unlock_irq(&ctx->lock);
511 * Update total_time_enabled and total_time_running for all events in a group.
513 static void update_group_times(struct perf_event *leader)
515 struct perf_event *event;
517 update_event_times(leader);
518 list_for_each_entry(event, &leader->sibling_list, group_entry)
519 update_event_times(event);
523 * Cross CPU call to disable a performance event
525 static void __perf_event_disable(void *info)
527 struct perf_event *event = info;
528 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
529 struct perf_event_context *ctx = event->ctx;
532 * If this is a per-task event, need to check whether this
533 * event's task is the current task on this cpu.
535 if (ctx->task && cpuctx->task_ctx != ctx)
538 raw_spin_lock(&ctx->lock);
541 * If the event is on, turn it off.
542 * If it is in error state, leave it in error state.
544 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
545 update_context_time(ctx);
546 update_group_times(event);
547 if (event == event->group_leader)
548 group_sched_out(event, cpuctx, ctx);
550 event_sched_out(event, cpuctx, ctx);
551 event->state = PERF_EVENT_STATE_OFF;
554 raw_spin_unlock(&ctx->lock);
560 * If event->ctx is a cloned context, callers must make sure that
561 * every task struct that event->ctx->task could possibly point to
562 * remains valid. This condition is satisifed when called through
563 * perf_event_for_each_child or perf_event_for_each because they
564 * hold the top-level event's child_mutex, so any descendant that
565 * goes to exit will block in sync_child_event.
566 * When called from perf_pending_event it's OK because event->ctx
567 * is the current context on this CPU and preemption is disabled,
568 * hence we can't get into perf_event_task_sched_out for this context.
570 void perf_event_disable(struct perf_event *event)
572 struct perf_event_context *ctx = event->ctx;
573 struct task_struct *task = ctx->task;
577 * Disable the event on the cpu that it's on
579 smp_call_function_single(event->cpu, __perf_event_disable,
585 task_oncpu_function_call(task, __perf_event_disable, event);
587 raw_spin_lock_irq(&ctx->lock);
589 * If the event is still active, we need to retry the cross-call.
591 if (event->state == PERF_EVENT_STATE_ACTIVE) {
592 raw_spin_unlock_irq(&ctx->lock);
597 * Since we have the lock this context can't be scheduled
598 * in, so we can change the state safely.
600 if (event->state == PERF_EVENT_STATE_INACTIVE) {
601 update_group_times(event);
602 event->state = PERF_EVENT_STATE_OFF;
605 raw_spin_unlock_irq(&ctx->lock);
609 event_sched_in(struct perf_event *event,
610 struct perf_cpu_context *cpuctx,
611 struct perf_event_context *ctx)
613 if (event->state <= PERF_EVENT_STATE_OFF)
616 event->state = PERF_EVENT_STATE_ACTIVE;
617 event->oncpu = smp_processor_id();
619 * The new state must be visible before we turn it on in the hardware:
623 if (event->pmu->enable(event)) {
624 event->state = PERF_EVENT_STATE_INACTIVE;
629 event->tstamp_running += ctx->time - event->tstamp_stopped;
631 if (!is_software_event(event))
632 cpuctx->active_oncpu++;
635 if (event->attr.exclusive)
636 cpuctx->exclusive = 1;
642 group_sched_in(struct perf_event *group_event,
643 struct perf_cpu_context *cpuctx,
644 struct perf_event_context *ctx)
646 struct perf_event *event, *partial_group;
649 if (group_event->state == PERF_EVENT_STATE_OFF)
652 ret = hw_perf_group_sched_in(group_event, cpuctx, ctx);
654 return ret < 0 ? ret : 0;
656 if (event_sched_in(group_event, cpuctx, ctx))
660 * Schedule in siblings as one group (if any):
662 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
663 if (event_sched_in(event, cpuctx, ctx)) {
664 partial_group = event;
673 * Groups can be scheduled in as one unit only, so undo any
674 * partial group before returning:
676 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
677 if (event == partial_group)
679 event_sched_out(event, cpuctx, ctx);
681 event_sched_out(group_event, cpuctx, ctx);
687 * Work out whether we can put this event group on the CPU now.
689 static int group_can_go_on(struct perf_event *event,
690 struct perf_cpu_context *cpuctx,
694 * Groups consisting entirely of software events can always go on.
696 if (event->group_flags & PERF_GROUP_SOFTWARE)
699 * If an exclusive group is already on, no other hardware
702 if (cpuctx->exclusive)
705 * If this group is exclusive and there are already
706 * events on the CPU, it can't go on.
708 if (event->attr.exclusive && cpuctx->active_oncpu)
711 * Otherwise, try to add it if all previous groups were able
717 static void add_event_to_ctx(struct perf_event *event,
718 struct perf_event_context *ctx)
720 list_add_event(event, ctx);
721 event->tstamp_enabled = ctx->time;
722 event->tstamp_running = ctx->time;
723 event->tstamp_stopped = ctx->time;
727 * Cross CPU call to install and enable a performance event
729 * Must be called with ctx->mutex held
731 static void __perf_install_in_context(void *info)
733 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
734 struct perf_event *event = info;
735 struct perf_event_context *ctx = event->ctx;
736 struct perf_event *leader = event->group_leader;
740 * If this is a task context, we need to check whether it is
741 * the current task context of this cpu. If not it has been
742 * scheduled out before the smp call arrived.
743 * Or possibly this is the right context but it isn't
744 * on this cpu because it had no events.
746 if (ctx->task && cpuctx->task_ctx != ctx) {
747 if (cpuctx->task_ctx || ctx->task != current)
749 cpuctx->task_ctx = ctx;
752 raw_spin_lock(&ctx->lock);
754 update_context_time(ctx);
757 * Protect the list operation against NMI by disabling the
758 * events on a global level. NOP for non NMI based events.
762 add_event_to_ctx(event, ctx);
764 if (event->cpu != -1 && event->cpu != smp_processor_id())
768 * Don't put the event on if it is disabled or if
769 * it is in a group and the group isn't on.
771 if (event->state != PERF_EVENT_STATE_INACTIVE ||
772 (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
776 * An exclusive event can't go on if there are already active
777 * hardware events, and no hardware event can go on if there
778 * is already an exclusive event on.
780 if (!group_can_go_on(event, cpuctx, 1))
783 err = event_sched_in(event, cpuctx, ctx);
787 * This event couldn't go on. If it is in a group
788 * then we have to pull the whole group off.
789 * If the event group is pinned then put it in error state.
792 group_sched_out(leader, cpuctx, ctx);
793 if (leader->attr.pinned) {
794 update_group_times(leader);
795 leader->state = PERF_EVENT_STATE_ERROR;
799 if (!err && !ctx->task && cpuctx->max_pertask)
800 cpuctx->max_pertask--;
805 raw_spin_unlock(&ctx->lock);
809 * Attach a performance event to a context
811 * First we add the event to the list with the hardware enable bit
812 * in event->hw_config cleared.
814 * If the event is attached to a task which is on a CPU we use a smp
815 * call to enable it in the task context. The task might have been
816 * scheduled away, but we check this in the smp call again.
818 * Must be called with ctx->mutex held.
821 perf_install_in_context(struct perf_event_context *ctx,
822 struct perf_event *event,
825 struct task_struct *task = ctx->task;
829 * Per cpu events are installed via an smp call and
830 * the install is always successful.
832 smp_call_function_single(cpu, __perf_install_in_context,
838 task_oncpu_function_call(task, __perf_install_in_context,
841 raw_spin_lock_irq(&ctx->lock);
843 * we need to retry the smp call.
845 if (ctx->is_active && list_empty(&event->group_entry)) {
846 raw_spin_unlock_irq(&ctx->lock);
851 * The lock prevents that this context is scheduled in so we
852 * can add the event safely, if it the call above did not
855 if (list_empty(&event->group_entry))
856 add_event_to_ctx(event, ctx);
857 raw_spin_unlock_irq(&ctx->lock);
861 * Put a event into inactive state and update time fields.
862 * Enabling the leader of a group effectively enables all
863 * the group members that aren't explicitly disabled, so we
864 * have to update their ->tstamp_enabled also.
865 * Note: this works for group members as well as group leaders
866 * since the non-leader members' sibling_lists will be empty.
868 static void __perf_event_mark_enabled(struct perf_event *event,
869 struct perf_event_context *ctx)
871 struct perf_event *sub;
873 event->state = PERF_EVENT_STATE_INACTIVE;
874 event->tstamp_enabled = ctx->time - event->total_time_enabled;
875 list_for_each_entry(sub, &event->sibling_list, group_entry)
876 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
877 sub->tstamp_enabled =
878 ctx->time - sub->total_time_enabled;
882 * Cross CPU call to enable a performance event
884 static void __perf_event_enable(void *info)
886 struct perf_event *event = info;
887 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
888 struct perf_event_context *ctx = event->ctx;
889 struct perf_event *leader = event->group_leader;
893 * If this is a per-task event, need to check whether this
894 * event's task is the current task on this cpu.
896 if (ctx->task && cpuctx->task_ctx != ctx) {
897 if (cpuctx->task_ctx || ctx->task != current)
899 cpuctx->task_ctx = ctx;
902 raw_spin_lock(&ctx->lock);
904 update_context_time(ctx);
906 if (event->state >= PERF_EVENT_STATE_INACTIVE)
908 __perf_event_mark_enabled(event, ctx);
910 if (event->cpu != -1 && event->cpu != smp_processor_id())
914 * If the event is in a group and isn't the group leader,
915 * then don't put it on unless the group is on.
917 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
920 if (!group_can_go_on(event, cpuctx, 1)) {
925 err = group_sched_in(event, cpuctx, ctx);
927 err = event_sched_in(event, cpuctx, ctx);
933 * If this event can't go on and it's part of a
934 * group, then the whole group has to come off.
937 group_sched_out(leader, cpuctx, ctx);
938 if (leader->attr.pinned) {
939 update_group_times(leader);
940 leader->state = PERF_EVENT_STATE_ERROR;
945 raw_spin_unlock(&ctx->lock);
951 * If event->ctx is a cloned context, callers must make sure that
952 * every task struct that event->ctx->task could possibly point to
953 * remains valid. This condition is satisfied when called through
954 * perf_event_for_each_child or perf_event_for_each as described
955 * for perf_event_disable.
957 void perf_event_enable(struct perf_event *event)
959 struct perf_event_context *ctx = event->ctx;
960 struct task_struct *task = ctx->task;
964 * Enable the event on the cpu that it's on
966 smp_call_function_single(event->cpu, __perf_event_enable,
971 raw_spin_lock_irq(&ctx->lock);
972 if (event->state >= PERF_EVENT_STATE_INACTIVE)
976 * If the event is in error state, clear that first.
977 * That way, if we see the event in error state below, we
978 * know that it has gone back into error state, as distinct
979 * from the task having been scheduled away before the
980 * cross-call arrived.
982 if (event->state == PERF_EVENT_STATE_ERROR)
983 event->state = PERF_EVENT_STATE_OFF;
986 raw_spin_unlock_irq(&ctx->lock);
987 task_oncpu_function_call(task, __perf_event_enable, event);
989 raw_spin_lock_irq(&ctx->lock);
992 * If the context is active and the event is still off,
993 * we need to retry the cross-call.
995 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
999 * Since we have the lock this context can't be scheduled
1000 * in, so we can change the state safely.
1002 if (event->state == PERF_EVENT_STATE_OFF)
1003 __perf_event_mark_enabled(event, ctx);
1006 raw_spin_unlock_irq(&ctx->lock);
1009 static int perf_event_refresh(struct perf_event *event, int refresh)
1012 * not supported on inherited events
1014 if (event->attr.inherit)
1017 atomic_add(refresh, &event->event_limit);
1018 perf_event_enable(event);
1024 EVENT_FLEXIBLE = 0x1,
1026 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
1029 static void ctx_sched_out(struct perf_event_context *ctx,
1030 struct perf_cpu_context *cpuctx,
1031 enum event_type_t event_type)
1033 struct perf_event *event;
1035 raw_spin_lock(&ctx->lock);
1037 if (likely(!ctx->nr_events))
1039 update_context_time(ctx);
1042 if (!ctx->nr_active)
1045 if (event_type & EVENT_PINNED)
1046 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1047 group_sched_out(event, cpuctx, ctx);
1049 if (event_type & EVENT_FLEXIBLE)
1050 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1051 group_sched_out(event, cpuctx, ctx);
1056 raw_spin_unlock(&ctx->lock);
1060 * Test whether two contexts are equivalent, i.e. whether they
1061 * have both been cloned from the same version of the same context
1062 * and they both have the same number of enabled events.
1063 * If the number of enabled events is the same, then the set
1064 * of enabled events should be the same, because these are both
1065 * inherited contexts, therefore we can't access individual events
1066 * in them directly with an fd; we can only enable/disable all
1067 * events via prctl, or enable/disable all events in a family
1068 * via ioctl, which will have the same effect on both contexts.
1070 static int context_equiv(struct perf_event_context *ctx1,
1071 struct perf_event_context *ctx2)
1073 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1074 && ctx1->parent_gen == ctx2->parent_gen
1075 && !ctx1->pin_count && !ctx2->pin_count;
1078 static void __perf_event_sync_stat(struct perf_event *event,
1079 struct perf_event *next_event)
1083 if (!event->attr.inherit_stat)
1087 * Update the event value, we cannot use perf_event_read()
1088 * because we're in the middle of a context switch and have IRQs
1089 * disabled, which upsets smp_call_function_single(), however
1090 * we know the event must be on the current CPU, therefore we
1091 * don't need to use it.
1093 switch (event->state) {
1094 case PERF_EVENT_STATE_ACTIVE:
1095 event->pmu->read(event);
1098 case PERF_EVENT_STATE_INACTIVE:
1099 update_event_times(event);
1107 * In order to keep per-task stats reliable we need to flip the event
1108 * values when we flip the contexts.
1110 value = atomic64_read(&next_event->count);
1111 value = atomic64_xchg(&event->count, value);
1112 atomic64_set(&next_event->count, value);
1114 swap(event->total_time_enabled, next_event->total_time_enabled);
1115 swap(event->total_time_running, next_event->total_time_running);
1118 * Since we swizzled the values, update the user visible data too.
1120 perf_event_update_userpage(event);
1121 perf_event_update_userpage(next_event);
1124 #define list_next_entry(pos, member) \
1125 list_entry(pos->member.next, typeof(*pos), member)
1127 static void perf_event_sync_stat(struct perf_event_context *ctx,
1128 struct perf_event_context *next_ctx)
1130 struct perf_event *event, *next_event;
1135 update_context_time(ctx);
1137 event = list_first_entry(&ctx->event_list,
1138 struct perf_event, event_entry);
1140 next_event = list_first_entry(&next_ctx->event_list,
1141 struct perf_event, event_entry);
1143 while (&event->event_entry != &ctx->event_list &&
1144 &next_event->event_entry != &next_ctx->event_list) {
1146 __perf_event_sync_stat(event, next_event);
1148 event = list_next_entry(event, event_entry);
1149 next_event = list_next_entry(next_event, event_entry);
1154 * Called from scheduler to remove the events of the current task,
1155 * with interrupts disabled.
1157 * We stop each event and update the event value in event->count.
1159 * This does not protect us against NMI, but disable()
1160 * sets the disabled bit in the control field of event _before_
1161 * accessing the event control register. If a NMI hits, then it will
1162 * not restart the event.
1164 void perf_event_task_sched_out(struct task_struct *task,
1165 struct task_struct *next)
1167 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1168 struct perf_event_context *ctx = task->perf_event_ctxp;
1169 struct perf_event_context *next_ctx;
1170 struct perf_event_context *parent;
1173 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1175 if (likely(!ctx || !cpuctx->task_ctx))
1179 parent = rcu_dereference(ctx->parent_ctx);
1180 next_ctx = next->perf_event_ctxp;
1181 if (parent && next_ctx &&
1182 rcu_dereference(next_ctx->parent_ctx) == parent) {
1184 * Looks like the two contexts are clones, so we might be
1185 * able to optimize the context switch. We lock both
1186 * contexts and check that they are clones under the
1187 * lock (including re-checking that neither has been
1188 * uncloned in the meantime). It doesn't matter which
1189 * order we take the locks because no other cpu could
1190 * be trying to lock both of these tasks.
1192 raw_spin_lock(&ctx->lock);
1193 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1194 if (context_equiv(ctx, next_ctx)) {
1196 * XXX do we need a memory barrier of sorts
1197 * wrt to rcu_dereference() of perf_event_ctxp
1199 task->perf_event_ctxp = next_ctx;
1200 next->perf_event_ctxp = ctx;
1202 next_ctx->task = task;
1205 perf_event_sync_stat(ctx, next_ctx);
1207 raw_spin_unlock(&next_ctx->lock);
1208 raw_spin_unlock(&ctx->lock);
1213 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
1214 cpuctx->task_ctx = NULL;
1218 static void task_ctx_sched_out(struct perf_event_context *ctx,
1219 enum event_type_t event_type)
1221 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1223 if (!cpuctx->task_ctx)
1226 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1229 ctx_sched_out(ctx, cpuctx, event_type);
1230 cpuctx->task_ctx = NULL;
1234 * Called with IRQs disabled
1236 static void __perf_event_task_sched_out(struct perf_event_context *ctx)
1238 task_ctx_sched_out(ctx, EVENT_ALL);
1242 * Called with IRQs disabled
1244 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1245 enum event_type_t event_type)
1247 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
1251 ctx_pinned_sched_in(struct perf_event_context *ctx,
1252 struct perf_cpu_context *cpuctx)
1254 struct perf_event *event;
1256 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1257 if (event->state <= PERF_EVENT_STATE_OFF)
1259 if (event->cpu != -1 && event->cpu != smp_processor_id())
1262 if (group_can_go_on(event, cpuctx, 1))
1263 group_sched_in(event, cpuctx, ctx);
1266 * If this pinned group hasn't been scheduled,
1267 * put it in error state.
1269 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1270 update_group_times(event);
1271 event->state = PERF_EVENT_STATE_ERROR;
1277 ctx_flexible_sched_in(struct perf_event_context *ctx,
1278 struct perf_cpu_context *cpuctx)
1280 struct perf_event *event;
1283 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1284 /* Ignore events in OFF or ERROR state */
1285 if (event->state <= PERF_EVENT_STATE_OFF)
1288 * Listen to the 'cpu' scheduling filter constraint
1291 if (event->cpu != -1 && event->cpu != smp_processor_id())
1294 if (group_can_go_on(event, cpuctx, can_add_hw))
1295 if (group_sched_in(event, cpuctx, ctx))
1301 ctx_sched_in(struct perf_event_context *ctx,
1302 struct perf_cpu_context *cpuctx,
1303 enum event_type_t event_type)
1305 raw_spin_lock(&ctx->lock);
1307 if (likely(!ctx->nr_events))
1310 ctx->timestamp = perf_clock();
1315 * First go through the list and put on any pinned groups
1316 * in order to give them the best chance of going on.
1318 if (event_type & EVENT_PINNED)
1319 ctx_pinned_sched_in(ctx, cpuctx);
1321 /* Then walk through the lower prio flexible groups */
1322 if (event_type & EVENT_FLEXIBLE)
1323 ctx_flexible_sched_in(ctx, cpuctx);
1327 raw_spin_unlock(&ctx->lock);
1330 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
1331 enum event_type_t event_type)
1333 struct perf_event_context *ctx = &cpuctx->ctx;
1335 ctx_sched_in(ctx, cpuctx, event_type);
1338 static void task_ctx_sched_in(struct task_struct *task,
1339 enum event_type_t event_type)
1341 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1342 struct perf_event_context *ctx = task->perf_event_ctxp;
1346 if (cpuctx->task_ctx == ctx)
1348 ctx_sched_in(ctx, cpuctx, event_type);
1349 cpuctx->task_ctx = ctx;
1352 * Called from scheduler to add the events of the current task
1353 * with interrupts disabled.
1355 * We restore the event value and then enable it.
1357 * This does not protect us against NMI, but enable()
1358 * sets the enabled bit in the control field of event _before_
1359 * accessing the event control register. If a NMI hits, then it will
1360 * keep the event running.
1362 void perf_event_task_sched_in(struct task_struct *task)
1364 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1365 struct perf_event_context *ctx = task->perf_event_ctxp;
1370 if (cpuctx->task_ctx == ctx)
1374 * We want to keep the following priority order:
1375 * cpu pinned (that don't need to move), task pinned,
1376 * cpu flexible, task flexible.
1378 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1380 ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
1381 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1382 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
1384 cpuctx->task_ctx = ctx;
1387 #define MAX_INTERRUPTS (~0ULL)
1389 static void perf_log_throttle(struct perf_event *event, int enable);
1391 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
1393 u64 frequency = event->attr.sample_freq;
1394 u64 sec = NSEC_PER_SEC;
1395 u64 divisor, dividend;
1397 int count_fls, nsec_fls, frequency_fls, sec_fls;
1399 count_fls = fls64(count);
1400 nsec_fls = fls64(nsec);
1401 frequency_fls = fls64(frequency);
1405 * We got @count in @nsec, with a target of sample_freq HZ
1406 * the target period becomes:
1409 * period = -------------------
1410 * @nsec * sample_freq
1415 * Reduce accuracy by one bit such that @a and @b converge
1416 * to a similar magnitude.
1418 #define REDUCE_FLS(a, b) \
1420 if (a##_fls > b##_fls) { \
1430 * Reduce accuracy until either term fits in a u64, then proceed with
1431 * the other, so that finally we can do a u64/u64 division.
1433 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
1434 REDUCE_FLS(nsec, frequency);
1435 REDUCE_FLS(sec, count);
1438 if (count_fls + sec_fls > 64) {
1439 divisor = nsec * frequency;
1441 while (count_fls + sec_fls > 64) {
1442 REDUCE_FLS(count, sec);
1446 dividend = count * sec;
1448 dividend = count * sec;
1450 while (nsec_fls + frequency_fls > 64) {
1451 REDUCE_FLS(nsec, frequency);
1455 divisor = nsec * frequency;
1458 return div64_u64(dividend, divisor);
1461 static void perf_event_stop(struct perf_event *event)
1463 if (!event->pmu->stop)
1464 return event->pmu->disable(event);
1466 return event->pmu->stop(event);
1469 static int perf_event_start(struct perf_event *event)
1471 if (!event->pmu->start)
1472 return event->pmu->enable(event);
1474 return event->pmu->start(event);
1477 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
1479 struct hw_perf_event *hwc = &event->hw;
1480 u64 period, sample_period;
1483 period = perf_calculate_period(event, nsec, count);
1485 delta = (s64)(period - hwc->sample_period);
1486 delta = (delta + 7) / 8; /* low pass filter */
1488 sample_period = hwc->sample_period + delta;
1493 hwc->sample_period = sample_period;
1495 if (atomic64_read(&hwc->period_left) > 8*sample_period) {
1497 perf_event_stop(event);
1498 atomic64_set(&hwc->period_left, 0);
1499 perf_event_start(event);
1504 static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1506 struct perf_event *event;
1507 struct hw_perf_event *hwc;
1508 u64 interrupts, now;
1511 raw_spin_lock(&ctx->lock);
1512 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
1513 if (event->state != PERF_EVENT_STATE_ACTIVE)
1516 if (event->cpu != -1 && event->cpu != smp_processor_id())
1521 interrupts = hwc->interrupts;
1522 hwc->interrupts = 0;
1525 * unthrottle events on the tick
1527 if (interrupts == MAX_INTERRUPTS) {
1528 perf_log_throttle(event, 1);
1530 event->pmu->unthrottle(event);
1534 if (!event->attr.freq || !event->attr.sample_freq)
1538 event->pmu->read(event);
1539 now = atomic64_read(&event->count);
1540 delta = now - hwc->freq_count_stamp;
1541 hwc->freq_count_stamp = now;
1544 perf_adjust_period(event, TICK_NSEC, delta);
1547 raw_spin_unlock(&ctx->lock);
1551 * Round-robin a context's events:
1553 static void rotate_ctx(struct perf_event_context *ctx)
1555 raw_spin_lock(&ctx->lock);
1557 /* Rotate the first entry last of non-pinned groups */
1558 list_rotate_left(&ctx->flexible_groups);
1560 raw_spin_unlock(&ctx->lock);
1563 void perf_event_task_tick(struct task_struct *curr)
1565 struct perf_cpu_context *cpuctx;
1566 struct perf_event_context *ctx;
1569 if (!atomic_read(&nr_events))
1572 cpuctx = &__get_cpu_var(perf_cpu_context);
1573 if (cpuctx->ctx.nr_events &&
1574 cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1577 ctx = curr->perf_event_ctxp;
1578 if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
1581 perf_ctx_adjust_freq(&cpuctx->ctx);
1583 perf_ctx_adjust_freq(ctx);
1589 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1591 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
1593 rotate_ctx(&cpuctx->ctx);
1597 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1599 task_ctx_sched_in(curr, EVENT_FLEXIBLE);
1603 static int event_enable_on_exec(struct perf_event *event,
1604 struct perf_event_context *ctx)
1606 if (!event->attr.enable_on_exec)
1609 event->attr.enable_on_exec = 0;
1610 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1613 __perf_event_mark_enabled(event, ctx);
1619 * Enable all of a task's events that have been marked enable-on-exec.
1620 * This expects task == current.
1622 static void perf_event_enable_on_exec(struct task_struct *task)
1624 struct perf_event_context *ctx;
1625 struct perf_event *event;
1626 unsigned long flags;
1630 local_irq_save(flags);
1631 ctx = task->perf_event_ctxp;
1632 if (!ctx || !ctx->nr_events)
1635 __perf_event_task_sched_out(ctx);
1637 raw_spin_lock(&ctx->lock);
1639 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1640 ret = event_enable_on_exec(event, ctx);
1645 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1646 ret = event_enable_on_exec(event, ctx);
1652 * Unclone this context if we enabled any event.
1657 raw_spin_unlock(&ctx->lock);
1659 perf_event_task_sched_in(task);
1661 local_irq_restore(flags);
1665 * Cross CPU call to read the hardware event
1667 static void __perf_event_read(void *info)
1669 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1670 struct perf_event *event = info;
1671 struct perf_event_context *ctx = event->ctx;
1674 * If this is a task context, we need to check whether it is
1675 * the current task context of this cpu. If not it has been
1676 * scheduled out before the smp call arrived. In that case
1677 * event->count would have been updated to a recent sample
1678 * when the event was scheduled out.
1680 if (ctx->task && cpuctx->task_ctx != ctx)
1683 raw_spin_lock(&ctx->lock);
1684 update_context_time(ctx);
1685 update_event_times(event);
1686 raw_spin_unlock(&ctx->lock);
1688 event->pmu->read(event);
1691 static u64 perf_event_read(struct perf_event *event)
1694 * If event is enabled and currently active on a CPU, update the
1695 * value in the event structure:
1697 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1698 smp_call_function_single(event->oncpu,
1699 __perf_event_read, event, 1);
1700 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
1701 struct perf_event_context *ctx = event->ctx;
1702 unsigned long flags;
1704 raw_spin_lock_irqsave(&ctx->lock, flags);
1705 update_context_time(ctx);
1706 update_event_times(event);
1707 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1710 return atomic64_read(&event->count);
1714 * Initialize the perf_event context in a task_struct:
1717 __perf_event_init_context(struct perf_event_context *ctx,
1718 struct task_struct *task)
1720 raw_spin_lock_init(&ctx->lock);
1721 mutex_init(&ctx->mutex);
1722 INIT_LIST_HEAD(&ctx->pinned_groups);
1723 INIT_LIST_HEAD(&ctx->flexible_groups);
1724 INIT_LIST_HEAD(&ctx->event_list);
1725 atomic_set(&ctx->refcount, 1);
1729 static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1731 struct perf_event_context *ctx;
1732 struct perf_cpu_context *cpuctx;
1733 struct task_struct *task;
1734 unsigned long flags;
1737 if (pid == -1 && cpu != -1) {
1738 /* Must be root to operate on a CPU event: */
1739 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1740 return ERR_PTR(-EACCES);
1742 if (cpu < 0 || cpu >= nr_cpumask_bits)
1743 return ERR_PTR(-EINVAL);
1746 * We could be clever and allow to attach a event to an
1747 * offline CPU and activate it when the CPU comes up, but
1750 if (!cpu_online(cpu))
1751 return ERR_PTR(-ENODEV);
1753 cpuctx = &per_cpu(perf_cpu_context, cpu);
1764 task = find_task_by_vpid(pid);
1766 get_task_struct(task);
1770 return ERR_PTR(-ESRCH);
1773 * Can't attach events to a dying task.
1776 if (task->flags & PF_EXITING)
1779 /* Reuse ptrace permission checks for now. */
1781 if (!ptrace_may_access(task, PTRACE_MODE_READ))
1785 ctx = perf_lock_task_context(task, &flags);
1788 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1792 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
1796 __perf_event_init_context(ctx, task);
1798 if (cmpxchg(&task->perf_event_ctxp, NULL, ctx)) {
1800 * We raced with some other task; use
1801 * the context they set.
1806 get_task_struct(task);
1809 put_task_struct(task);
1813 put_task_struct(task);
1814 return ERR_PTR(err);
1817 static void perf_event_free_filter(struct perf_event *event);
1819 static void free_event_rcu(struct rcu_head *head)
1821 struct perf_event *event;
1823 event = container_of(head, struct perf_event, rcu_head);
1825 put_pid_ns(event->ns);
1826 perf_event_free_filter(event);
1830 static void perf_pending_sync(struct perf_event *event);
1832 static void free_event(struct perf_event *event)
1834 perf_pending_sync(event);
1836 if (!event->parent) {
1837 atomic_dec(&nr_events);
1838 if (event->attr.mmap)
1839 atomic_dec(&nr_mmap_events);
1840 if (event->attr.comm)
1841 atomic_dec(&nr_comm_events);
1842 if (event->attr.task)
1843 atomic_dec(&nr_task_events);
1846 if (event->output) {
1847 fput(event->output->filp);
1848 event->output = NULL;
1852 event->destroy(event);
1854 put_ctx(event->ctx);
1855 call_rcu(&event->rcu_head, free_event_rcu);
1858 int perf_event_release_kernel(struct perf_event *event)
1860 struct perf_event_context *ctx = event->ctx;
1862 event->state = PERF_EVENT_STATE_FREE;
1864 WARN_ON_ONCE(ctx->parent_ctx);
1865 mutex_lock(&ctx->mutex);
1866 perf_event_remove_from_context(event);
1867 mutex_unlock(&ctx->mutex);
1869 mutex_lock(&event->owner->perf_event_mutex);
1870 list_del_init(&event->owner_entry);
1871 mutex_unlock(&event->owner->perf_event_mutex);
1872 put_task_struct(event->owner);
1878 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
1881 * Called when the last reference to the file is gone.
1883 static int perf_release(struct inode *inode, struct file *file)
1885 struct perf_event *event = file->private_data;
1887 file->private_data = NULL;
1889 return perf_event_release_kernel(event);
1892 static int perf_event_read_size(struct perf_event *event)
1894 int entry = sizeof(u64); /* value */
1898 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1899 size += sizeof(u64);
1901 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1902 size += sizeof(u64);
1904 if (event->attr.read_format & PERF_FORMAT_ID)
1905 entry += sizeof(u64);
1907 if (event->attr.read_format & PERF_FORMAT_GROUP) {
1908 nr += event->group_leader->nr_siblings;
1909 size += sizeof(u64);
1917 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
1919 struct perf_event *child;
1925 mutex_lock(&event->child_mutex);
1926 total += perf_event_read(event);
1927 *enabled += event->total_time_enabled +
1928 atomic64_read(&event->child_total_time_enabled);
1929 *running += event->total_time_running +
1930 atomic64_read(&event->child_total_time_running);
1932 list_for_each_entry(child, &event->child_list, child_list) {
1933 total += perf_event_read(child);
1934 *enabled += child->total_time_enabled;
1935 *running += child->total_time_running;
1937 mutex_unlock(&event->child_mutex);
1941 EXPORT_SYMBOL_GPL(perf_event_read_value);
1943 static int perf_event_read_group(struct perf_event *event,
1944 u64 read_format, char __user *buf)
1946 struct perf_event *leader = event->group_leader, *sub;
1947 int n = 0, size = 0, ret = -EFAULT;
1948 struct perf_event_context *ctx = leader->ctx;
1950 u64 count, enabled, running;
1952 mutex_lock(&ctx->mutex);
1953 count = perf_event_read_value(leader, &enabled, &running);
1955 values[n++] = 1 + leader->nr_siblings;
1956 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1957 values[n++] = enabled;
1958 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1959 values[n++] = running;
1960 values[n++] = count;
1961 if (read_format & PERF_FORMAT_ID)
1962 values[n++] = primary_event_id(leader);
1964 size = n * sizeof(u64);
1966 if (copy_to_user(buf, values, size))
1971 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1974 values[n++] = perf_event_read_value(sub, &enabled, &running);
1975 if (read_format & PERF_FORMAT_ID)
1976 values[n++] = primary_event_id(sub);
1978 size = n * sizeof(u64);
1980 if (copy_to_user(buf + ret, values, size)) {
1988 mutex_unlock(&ctx->mutex);
1993 static int perf_event_read_one(struct perf_event *event,
1994 u64 read_format, char __user *buf)
1996 u64 enabled, running;
2000 values[n++] = perf_event_read_value(event, &enabled, &running);
2001 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2002 values[n++] = enabled;
2003 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2004 values[n++] = running;
2005 if (read_format & PERF_FORMAT_ID)
2006 values[n++] = primary_event_id(event);
2008 if (copy_to_user(buf, values, n * sizeof(u64)))
2011 return n * sizeof(u64);
2015 * Read the performance event - simple non blocking version for now
2018 perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
2020 u64 read_format = event->attr.read_format;
2024 * Return end-of-file for a read on a event that is in
2025 * error state (i.e. because it was pinned but it couldn't be
2026 * scheduled on to the CPU at some point).
2028 if (event->state == PERF_EVENT_STATE_ERROR)
2031 if (count < perf_event_read_size(event))
2034 WARN_ON_ONCE(event->ctx->parent_ctx);
2035 if (read_format & PERF_FORMAT_GROUP)
2036 ret = perf_event_read_group(event, read_format, buf);
2038 ret = perf_event_read_one(event, read_format, buf);
2044 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
2046 struct perf_event *event = file->private_data;
2048 return perf_read_hw(event, buf, count);
2051 static unsigned int perf_poll(struct file *file, poll_table *wait)
2053 struct perf_event *event = file->private_data;
2054 struct perf_mmap_data *data;
2055 unsigned int events = POLL_HUP;
2058 data = rcu_dereference(event->data);
2060 events = atomic_xchg(&data->poll, 0);
2063 poll_wait(file, &event->waitq, wait);
2068 static void perf_event_reset(struct perf_event *event)
2070 (void)perf_event_read(event);
2071 atomic64_set(&event->count, 0);
2072 perf_event_update_userpage(event);
2076 * Holding the top-level event's child_mutex means that any
2077 * descendant process that has inherited this event will block
2078 * in sync_child_event if it goes to exit, thus satisfying the
2079 * task existence requirements of perf_event_enable/disable.
2081 static void perf_event_for_each_child(struct perf_event *event,
2082 void (*func)(struct perf_event *))
2084 struct perf_event *child;
2086 WARN_ON_ONCE(event->ctx->parent_ctx);
2087 mutex_lock(&event->child_mutex);
2089 list_for_each_entry(child, &event->child_list, child_list)
2091 mutex_unlock(&event->child_mutex);
2094 static void perf_event_for_each(struct perf_event *event,
2095 void (*func)(struct perf_event *))
2097 struct perf_event_context *ctx = event->ctx;
2098 struct perf_event *sibling;
2100 WARN_ON_ONCE(ctx->parent_ctx);
2101 mutex_lock(&ctx->mutex);
2102 event = event->group_leader;
2104 perf_event_for_each_child(event, func);
2106 list_for_each_entry(sibling, &event->sibling_list, group_entry)
2107 perf_event_for_each_child(event, func);
2108 mutex_unlock(&ctx->mutex);
2111 static int perf_event_period(struct perf_event *event, u64 __user *arg)
2113 struct perf_event_context *ctx = event->ctx;
2118 if (!event->attr.sample_period)
2121 size = copy_from_user(&value, arg, sizeof(value));
2122 if (size != sizeof(value))
2128 raw_spin_lock_irq(&ctx->lock);
2129 if (event->attr.freq) {
2130 if (value > sysctl_perf_event_sample_rate) {
2135 event->attr.sample_freq = value;
2137 event->attr.sample_period = value;
2138 event->hw.sample_period = value;
2141 raw_spin_unlock_irq(&ctx->lock);
2146 static int perf_event_set_output(struct perf_event *event, int output_fd);
2147 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
2149 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2151 struct perf_event *event = file->private_data;
2152 void (*func)(struct perf_event *);
2156 case PERF_EVENT_IOC_ENABLE:
2157 func = perf_event_enable;
2159 case PERF_EVENT_IOC_DISABLE:
2160 func = perf_event_disable;
2162 case PERF_EVENT_IOC_RESET:
2163 func = perf_event_reset;
2166 case PERF_EVENT_IOC_REFRESH:
2167 return perf_event_refresh(event, arg);
2169 case PERF_EVENT_IOC_PERIOD:
2170 return perf_event_period(event, (u64 __user *)arg);
2172 case PERF_EVENT_IOC_SET_OUTPUT:
2173 return perf_event_set_output(event, arg);
2175 case PERF_EVENT_IOC_SET_FILTER:
2176 return perf_event_set_filter(event, (void __user *)arg);
2182 if (flags & PERF_IOC_FLAG_GROUP)
2183 perf_event_for_each(event, func);
2185 perf_event_for_each_child(event, func);
2190 int perf_event_task_enable(void)
2192 struct perf_event *event;
2194 mutex_lock(¤t->perf_event_mutex);
2195 list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
2196 perf_event_for_each_child(event, perf_event_enable);
2197 mutex_unlock(¤t->perf_event_mutex);
2202 int perf_event_task_disable(void)
2204 struct perf_event *event;
2206 mutex_lock(¤t->perf_event_mutex);
2207 list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
2208 perf_event_for_each_child(event, perf_event_disable);
2209 mutex_unlock(¤t->perf_event_mutex);
2214 #ifndef PERF_EVENT_INDEX_OFFSET
2215 # define PERF_EVENT_INDEX_OFFSET 0
2218 static int perf_event_index(struct perf_event *event)
2220 if (event->state != PERF_EVENT_STATE_ACTIVE)
2223 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2227 * Callers need to ensure there can be no nesting of this function, otherwise
2228 * the seqlock logic goes bad. We can not serialize this because the arch
2229 * code calls this from NMI context.
2231 void perf_event_update_userpage(struct perf_event *event)
2233 struct perf_event_mmap_page *userpg;
2234 struct perf_mmap_data *data;
2237 data = rcu_dereference(event->data);
2241 userpg = data->user_page;
2244 * Disable preemption so as to not let the corresponding user-space
2245 * spin too long if we get preempted.
2250 userpg->index = perf_event_index(event);
2251 userpg->offset = atomic64_read(&event->count);
2252 if (event->state == PERF_EVENT_STATE_ACTIVE)
2253 userpg->offset -= atomic64_read(&event->hw.prev_count);
2255 userpg->time_enabled = event->total_time_enabled +
2256 atomic64_read(&event->child_total_time_enabled);
2258 userpg->time_running = event->total_time_running +
2259 atomic64_read(&event->child_total_time_running);
2268 static unsigned long perf_data_size(struct perf_mmap_data *data)
2270 return data->nr_pages << (PAGE_SHIFT + data->data_order);
2273 #ifndef CONFIG_PERF_USE_VMALLOC
2276 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2279 static struct page *
2280 perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2282 if (pgoff > data->nr_pages)
2286 return virt_to_page(data->user_page);
2288 return virt_to_page(data->data_pages[pgoff - 1]);
2291 static struct perf_mmap_data *
2292 perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2294 struct perf_mmap_data *data;
2298 WARN_ON(atomic_read(&event->mmap_count));
2300 size = sizeof(struct perf_mmap_data);
2301 size += nr_pages * sizeof(void *);
2303 data = kzalloc(size, GFP_KERNEL);
2307 data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
2308 if (!data->user_page)
2309 goto fail_user_page;
2311 for (i = 0; i < nr_pages; i++) {
2312 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
2313 if (!data->data_pages[i])
2314 goto fail_data_pages;
2317 data->data_order = 0;
2318 data->nr_pages = nr_pages;
2323 for (i--; i >= 0; i--)
2324 free_page((unsigned long)data->data_pages[i]);
2326 free_page((unsigned long)data->user_page);
2335 static void perf_mmap_free_page(unsigned long addr)
2337 struct page *page = virt_to_page((void *)addr);
2339 page->mapping = NULL;
2343 static void perf_mmap_data_free(struct perf_mmap_data *data)
2347 perf_mmap_free_page((unsigned long)data->user_page);
2348 for (i = 0; i < data->nr_pages; i++)
2349 perf_mmap_free_page((unsigned long)data->data_pages[i]);
2356 * Back perf_mmap() with vmalloc memory.
2358 * Required for architectures that have d-cache aliasing issues.
2361 static struct page *
2362 perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2364 if (pgoff > (1UL << data->data_order))
2367 return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE);
2370 static void perf_mmap_unmark_page(void *addr)
2372 struct page *page = vmalloc_to_page(addr);
2374 page->mapping = NULL;
2377 static void perf_mmap_data_free_work(struct work_struct *work)
2379 struct perf_mmap_data *data;
2383 data = container_of(work, struct perf_mmap_data, work);
2384 nr = 1 << data->data_order;
2386 base = data->user_page;
2387 for (i = 0; i < nr + 1; i++)
2388 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2394 static void perf_mmap_data_free(struct perf_mmap_data *data)
2396 schedule_work(&data->work);
2399 static struct perf_mmap_data *
2400 perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2402 struct perf_mmap_data *data;
2406 WARN_ON(atomic_read(&event->mmap_count));
2408 size = sizeof(struct perf_mmap_data);
2409 size += sizeof(void *);
2411 data = kzalloc(size, GFP_KERNEL);
2415 INIT_WORK(&data->work, perf_mmap_data_free_work);
2417 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
2421 data->user_page = all_buf;
2422 data->data_pages[0] = all_buf + PAGE_SIZE;
2423 data->data_order = ilog2(nr_pages);
2437 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2439 struct perf_event *event = vma->vm_file->private_data;
2440 struct perf_mmap_data *data;
2441 int ret = VM_FAULT_SIGBUS;
2443 if (vmf->flags & FAULT_FLAG_MKWRITE) {
2444 if (vmf->pgoff == 0)
2450 data = rcu_dereference(event->data);
2454 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
2457 vmf->page = perf_mmap_to_page(data, vmf->pgoff);
2461 get_page(vmf->page);
2462 vmf->page->mapping = vma->vm_file->f_mapping;
2463 vmf->page->index = vmf->pgoff;
2473 perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data)
2475 long max_size = perf_data_size(data);
2477 atomic_set(&data->lock, -1);
2479 if (event->attr.watermark) {
2480 data->watermark = min_t(long, max_size,
2481 event->attr.wakeup_watermark);
2484 if (!data->watermark)
2485 data->watermark = max_size / 2;
2488 rcu_assign_pointer(event->data, data);
2491 static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
2493 struct perf_mmap_data *data;
2495 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2496 perf_mmap_data_free(data);
2499 static void perf_mmap_data_release(struct perf_event *event)
2501 struct perf_mmap_data *data = event->data;
2503 WARN_ON(atomic_read(&event->mmap_count));
2505 rcu_assign_pointer(event->data, NULL);
2506 call_rcu(&data->rcu_head, perf_mmap_data_free_rcu);
2509 static void perf_mmap_open(struct vm_area_struct *vma)
2511 struct perf_event *event = vma->vm_file->private_data;
2513 atomic_inc(&event->mmap_count);
2516 static void perf_mmap_close(struct vm_area_struct *vma)
2518 struct perf_event *event = vma->vm_file->private_data;
2520 WARN_ON_ONCE(event->ctx->parent_ctx);
2521 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
2522 unsigned long size = perf_data_size(event->data);
2523 struct user_struct *user = current_user();
2525 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
2526 vma->vm_mm->locked_vm -= event->data->nr_locked;
2527 perf_mmap_data_release(event);
2528 mutex_unlock(&event->mmap_mutex);
2532 static const struct vm_operations_struct perf_mmap_vmops = {
2533 .open = perf_mmap_open,
2534 .close = perf_mmap_close,
2535 .fault = perf_mmap_fault,
2536 .page_mkwrite = perf_mmap_fault,
2539 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2541 struct perf_event *event = file->private_data;
2542 unsigned long user_locked, user_lock_limit;
2543 struct user_struct *user = current_user();
2544 unsigned long locked, lock_limit;
2545 struct perf_mmap_data *data;
2546 unsigned long vma_size;
2547 unsigned long nr_pages;
2548 long user_extra, extra;
2551 if (!(vma->vm_flags & VM_SHARED))
2554 vma_size = vma->vm_end - vma->vm_start;
2555 nr_pages = (vma_size / PAGE_SIZE) - 1;
2558 * If we have data pages ensure they're a power-of-two number, so we
2559 * can do bitmasks instead of modulo.
2561 if (nr_pages != 0 && !is_power_of_2(nr_pages))
2564 if (vma_size != PAGE_SIZE * (1 + nr_pages))
2567 if (vma->vm_pgoff != 0)
2570 WARN_ON_ONCE(event->ctx->parent_ctx);
2571 mutex_lock(&event->mmap_mutex);
2572 if (event->output) {
2577 if (atomic_inc_not_zero(&event->mmap_count)) {
2578 if (nr_pages != event->data->nr_pages)
2583 user_extra = nr_pages + 1;
2584 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
2587 * Increase the limit linearly with more CPUs:
2589 user_lock_limit *= num_online_cpus();
2591 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
2594 if (user_locked > user_lock_limit)
2595 extra = user_locked - user_lock_limit;
2597 lock_limit = rlimit(RLIMIT_MEMLOCK);
2598 lock_limit >>= PAGE_SHIFT;
2599 locked = vma->vm_mm->locked_vm + extra;
2601 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
2602 !capable(CAP_IPC_LOCK)) {
2607 WARN_ON(event->data);
2609 data = perf_mmap_data_alloc(event, nr_pages);
2615 perf_mmap_data_init(event, data);
2617 atomic_set(&event->mmap_count, 1);
2618 atomic_long_add(user_extra, &user->locked_vm);
2619 vma->vm_mm->locked_vm += extra;
2620 event->data->nr_locked = extra;
2621 if (vma->vm_flags & VM_WRITE)
2622 event->data->writable = 1;
2625 mutex_unlock(&event->mmap_mutex);
2627 vma->vm_flags |= VM_RESERVED;
2628 vma->vm_ops = &perf_mmap_vmops;
2633 static int perf_fasync(int fd, struct file *filp, int on)
2635 struct inode *inode = filp->f_path.dentry->d_inode;
2636 struct perf_event *event = filp->private_data;
2639 mutex_lock(&inode->i_mutex);
2640 retval = fasync_helper(fd, filp, on, &event->fasync);
2641 mutex_unlock(&inode->i_mutex);
2649 static const struct file_operations perf_fops = {
2650 .release = perf_release,
2653 .unlocked_ioctl = perf_ioctl,
2654 .compat_ioctl = perf_ioctl,
2656 .fasync = perf_fasync,
2662 * If there's data, ensure we set the poll() state and publish everything
2663 * to user-space before waking everybody up.
2666 void perf_event_wakeup(struct perf_event *event)
2668 wake_up_all(&event->waitq);
2670 if (event->pending_kill) {
2671 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
2672 event->pending_kill = 0;
2679 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
2681 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
2682 * single linked list and use cmpxchg() to add entries lockless.
2685 static void perf_pending_event(struct perf_pending_entry *entry)
2687 struct perf_event *event = container_of(entry,
2688 struct perf_event, pending);
2690 if (event->pending_disable) {
2691 event->pending_disable = 0;
2692 __perf_event_disable(event);
2695 if (event->pending_wakeup) {
2696 event->pending_wakeup = 0;
2697 perf_event_wakeup(event);
2701 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2703 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
2707 static void perf_pending_queue(struct perf_pending_entry *entry,
2708 void (*func)(struct perf_pending_entry *))
2710 struct perf_pending_entry **head;
2712 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
2717 head = &get_cpu_var(perf_pending_head);
2720 entry->next = *head;
2721 } while (cmpxchg(head, entry->next, entry) != entry->next);
2723 set_perf_event_pending();
2725 put_cpu_var(perf_pending_head);
2728 static int __perf_pending_run(void)
2730 struct perf_pending_entry *list;
2733 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
2734 while (list != PENDING_TAIL) {
2735 void (*func)(struct perf_pending_entry *);
2736 struct perf_pending_entry *entry = list;
2743 * Ensure we observe the unqueue before we issue the wakeup,
2744 * so that we won't be waiting forever.
2745 * -- see perf_not_pending().
2756 static inline int perf_not_pending(struct perf_event *event)
2759 * If we flush on whatever cpu we run, there is a chance we don't
2763 __perf_pending_run();
2767 * Ensure we see the proper queue state before going to sleep
2768 * so that we do not miss the wakeup. -- see perf_pending_handle()
2771 return event->pending.next == NULL;
2774 static void perf_pending_sync(struct perf_event *event)
2776 wait_event(event->waitq, perf_not_pending(event));
2779 void perf_event_do_pending(void)
2781 __perf_pending_run();
2785 * Callchain support -- arch specific
2788 __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2794 void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
2802 static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
2803 unsigned long offset, unsigned long head)
2807 if (!data->writable)
2810 mask = perf_data_size(data) - 1;
2812 offset = (offset - tail) & mask;
2813 head = (head - tail) & mask;
2815 if ((int)(head - offset) < 0)
2821 static void perf_output_wakeup(struct perf_output_handle *handle)
2823 atomic_set(&handle->data->poll, POLL_IN);
2826 handle->event->pending_wakeup = 1;
2827 perf_pending_queue(&handle->event->pending,
2828 perf_pending_event);
2830 perf_event_wakeup(handle->event);
2834 * Curious locking construct.
2836 * We need to ensure a later event_id doesn't publish a head when a former
2837 * event_id isn't done writing. However since we need to deal with NMIs we
2838 * cannot fully serialize things.
2840 * What we do is serialize between CPUs so we only have to deal with NMI
2841 * nesting on a single CPU.
2843 * We only publish the head (and generate a wakeup) when the outer-most
2844 * event_id completes.
2846 static void perf_output_lock(struct perf_output_handle *handle)
2848 struct perf_mmap_data *data = handle->data;
2849 int cur, cpu = get_cpu();
2854 cur = atomic_cmpxchg(&data->lock, -1, cpu);
2866 static void perf_output_unlock(struct perf_output_handle *handle)
2868 struct perf_mmap_data *data = handle->data;
2872 data->done_head = data->head;
2874 if (!handle->locked)
2879 * The xchg implies a full barrier that ensures all writes are done
2880 * before we publish the new head, matched by a rmb() in userspace when
2881 * reading this position.
2883 while ((head = atomic_long_xchg(&data->done_head, 0)))
2884 data->user_page->data_head = head;
2887 * NMI can happen here, which means we can miss a done_head update.
2890 cpu = atomic_xchg(&data->lock, -1);
2891 WARN_ON_ONCE(cpu != smp_processor_id());
2894 * Therefore we have to validate we did not indeed do so.
2896 if (unlikely(atomic_long_read(&data->done_head))) {
2898 * Since we had it locked, we can lock it again.
2900 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2906 if (atomic_xchg(&data->wakeup, 0))
2907 perf_output_wakeup(handle);
2912 void perf_output_copy(struct perf_output_handle *handle,
2913 const void *buf, unsigned int len)
2915 unsigned int pages_mask;
2916 unsigned long offset;
2920 offset = handle->offset;
2921 pages_mask = handle->data->nr_pages - 1;
2922 pages = handle->data->data_pages;
2925 unsigned long page_offset;
2926 unsigned long page_size;
2929 nr = (offset >> PAGE_SHIFT) & pages_mask;
2930 page_size = 1UL << (handle->data->data_order + PAGE_SHIFT);
2931 page_offset = offset & (page_size - 1);
2932 size = min_t(unsigned int, page_size - page_offset, len);
2934 memcpy(pages[nr] + page_offset, buf, size);
2941 handle->offset = offset;
2944 * Check we didn't copy past our reservation window, taking the
2945 * possible unsigned int wrap into account.
2947 WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
2950 int perf_output_begin(struct perf_output_handle *handle,
2951 struct perf_event *event, unsigned int size,
2952 int nmi, int sample)
2954 struct perf_event *output_event;
2955 struct perf_mmap_data *data;
2956 unsigned long tail, offset, head;
2959 struct perf_event_header header;
2966 * For inherited events we send all the output towards the parent.
2969 event = event->parent;
2971 output_event = rcu_dereference(event->output);
2973 event = output_event;
2975 data = rcu_dereference(event->data);
2979 handle->data = data;
2980 handle->event = event;
2982 handle->sample = sample;
2984 if (!data->nr_pages)
2987 have_lost = atomic_read(&data->lost);
2989 size += sizeof(lost_event);
2991 perf_output_lock(handle);
2995 * Userspace could choose to issue a mb() before updating the
2996 * tail pointer. So that all reads will be completed before the
2999 tail = ACCESS_ONCE(data->user_page->data_tail);
3001 offset = head = atomic_long_read(&data->head);
3003 if (unlikely(!perf_output_space(data, tail, offset, head)))
3005 } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
3007 handle->offset = offset;
3008 handle->head = head;
3010 if (head - tail > data->watermark)
3011 atomic_set(&data->wakeup, 1);
3014 lost_event.header.type = PERF_RECORD_LOST;
3015 lost_event.header.misc = 0;
3016 lost_event.header.size = sizeof(lost_event);
3017 lost_event.id = event->id;
3018 lost_event.lost = atomic_xchg(&data->lost, 0);
3020 perf_output_put(handle, lost_event);
3026 atomic_inc(&data->lost);
3027 perf_output_unlock(handle);
3034 void perf_output_end(struct perf_output_handle *handle)
3036 struct perf_event *event = handle->event;
3037 struct perf_mmap_data *data = handle->data;
3039 int wakeup_events = event->attr.wakeup_events;
3041 if (handle->sample && wakeup_events) {
3042 int events = atomic_inc_return(&data->events);
3043 if (events >= wakeup_events) {
3044 atomic_sub(wakeup_events, &data->events);
3045 atomic_set(&data->wakeup, 1);
3049 perf_output_unlock(handle);
3053 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
3056 * only top level events have the pid namespace they were created in
3059 event = event->parent;
3061 return task_tgid_nr_ns(p, event->ns);
3064 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
3067 * only top level events have the pid namespace they were created in
3070 event = event->parent;
3072 return task_pid_nr_ns(p, event->ns);
3075 static void perf_output_read_one(struct perf_output_handle *handle,
3076 struct perf_event *event)
3078 u64 read_format = event->attr.read_format;
3082 values[n++] = atomic64_read(&event->count);
3083 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3084 values[n++] = event->total_time_enabled +
3085 atomic64_read(&event->child_total_time_enabled);
3087 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3088 values[n++] = event->total_time_running +
3089 atomic64_read(&event->child_total_time_running);
3091 if (read_format & PERF_FORMAT_ID)
3092 values[n++] = primary_event_id(event);
3094 perf_output_copy(handle, values, n * sizeof(u64));
3098 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3100 static void perf_output_read_group(struct perf_output_handle *handle,
3101 struct perf_event *event)
3103 struct perf_event *leader = event->group_leader, *sub;
3104 u64 read_format = event->attr.read_format;
3108 values[n++] = 1 + leader->nr_siblings;
3110 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3111 values[n++] = leader->total_time_enabled;
3113 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3114 values[n++] = leader->total_time_running;
3116 if (leader != event)
3117 leader->pmu->read(leader);
3119 values[n++] = atomic64_read(&leader->count);
3120 if (read_format & PERF_FORMAT_ID)
3121 values[n++] = primary_event_id(leader);
3123 perf_output_copy(handle, values, n * sizeof(u64));
3125 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3129 sub->pmu->read(sub);
3131 values[n++] = atomic64_read(&sub->count);
3132 if (read_format & PERF_FORMAT_ID)
3133 values[n++] = primary_event_id(sub);
3135 perf_output_copy(handle, values, n * sizeof(u64));
3139 static void perf_output_read(struct perf_output_handle *handle,
3140 struct perf_event *event)
3142 if (event->attr.read_format & PERF_FORMAT_GROUP)
3143 perf_output_read_group(handle, event);
3145 perf_output_read_one(handle, event);
3148 void perf_output_sample(struct perf_output_handle *handle,
3149 struct perf_event_header *header,
3150 struct perf_sample_data *data,
3151 struct perf_event *event)
3153 u64 sample_type = data->type;
3155 perf_output_put(handle, *header);
3157 if (sample_type & PERF_SAMPLE_IP)
3158 perf_output_put(handle, data->ip);
3160 if (sample_type & PERF_SAMPLE_TID)
3161 perf_output_put(handle, data->tid_entry);
3163 if (sample_type & PERF_SAMPLE_TIME)
3164 perf_output_put(handle, data->time);
3166 if (sample_type & PERF_SAMPLE_ADDR)
3167 perf_output_put(handle, data->addr);
3169 if (sample_type & PERF_SAMPLE_ID)
3170 perf_output_put(handle, data->id);
3172 if (sample_type & PERF_SAMPLE_STREAM_ID)
3173 perf_output_put(handle, data->stream_id);
3175 if (sample_type & PERF_SAMPLE_CPU)
3176 perf_output_put(handle, data->cpu_entry);
3178 if (sample_type & PERF_SAMPLE_PERIOD)
3179 perf_output_put(handle, data->period);
3181 if (sample_type & PERF_SAMPLE_READ)
3182 perf_output_read(handle, event);
3184 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3185 if (data->callchain) {
3188 if (data->callchain)
3189 size += data->callchain->nr;
3191 size *= sizeof(u64);
3193 perf_output_copy(handle, data->callchain, size);
3196 perf_output_put(handle, nr);
3200 if (sample_type & PERF_SAMPLE_RAW) {
3202 perf_output_put(handle, data->raw->size);
3203 perf_output_copy(handle, data->raw->data,
3210 .size = sizeof(u32),
3213 perf_output_put(handle, raw);
3218 void perf_prepare_sample(struct perf_event_header *header,
3219 struct perf_sample_data *data,
3220 struct perf_event *event,
3221 struct pt_regs *regs)
3223 u64 sample_type = event->attr.sample_type;
3225 data->type = sample_type;
3227 header->type = PERF_RECORD_SAMPLE;
3228 header->size = sizeof(*header);
3231 header->misc |= perf_misc_flags(regs);
3233 if (sample_type & PERF_SAMPLE_IP) {
3234 data->ip = perf_instruction_pointer(regs);
3236 header->size += sizeof(data->ip);
3239 if (sample_type & PERF_SAMPLE_TID) {
3240 /* namespace issues */
3241 data->tid_entry.pid = perf_event_pid(event, current);
3242 data->tid_entry.tid = perf_event_tid(event, current);
3244 header->size += sizeof(data->tid_entry);
3247 if (sample_type & PERF_SAMPLE_TIME) {
3248 data->time = perf_clock();
3250 header->size += sizeof(data->time);
3253 if (sample_type & PERF_SAMPLE_ADDR)
3254 header->size += sizeof(data->addr);
3256 if (sample_type & PERF_SAMPLE_ID) {
3257 data->id = primary_event_id(event);
3259 header->size += sizeof(data->id);
3262 if (sample_type & PERF_SAMPLE_STREAM_ID) {
3263 data->stream_id = event->id;
3265 header->size += sizeof(data->stream_id);
3268 if (sample_type & PERF_SAMPLE_CPU) {
3269 data->cpu_entry.cpu = raw_smp_processor_id();
3270 data->cpu_entry.reserved = 0;
3272 header->size += sizeof(data->cpu_entry);
3275 if (sample_type & PERF_SAMPLE_PERIOD)
3276 header->size += sizeof(data->period);
3278 if (sample_type & PERF_SAMPLE_READ)
3279 header->size += perf_event_read_size(event);
3281 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3284 data->callchain = perf_callchain(regs);
3286 if (data->callchain)
3287 size += data->callchain->nr;
3289 header->size += size * sizeof(u64);
3292 if (sample_type & PERF_SAMPLE_RAW) {
3293 int size = sizeof(u32);
3296 size += data->raw->size;
3298 size += sizeof(u32);
3300 WARN_ON_ONCE(size & (sizeof(u64)-1));
3301 header->size += size;
3305 static void perf_event_output(struct perf_event *event, int nmi,
3306 struct perf_sample_data *data,
3307 struct pt_regs *regs)
3309 struct perf_output_handle handle;
3310 struct perf_event_header header;
3312 perf_prepare_sample(&header, data, event, regs);
3314 if (perf_output_begin(&handle, event, header.size, nmi, 1))
3317 perf_output_sample(&handle, &header, data, event);
3319 perf_output_end(&handle);
3326 struct perf_read_event {
3327 struct perf_event_header header;
3334 perf_event_read_event(struct perf_event *event,
3335 struct task_struct *task)
3337 struct perf_output_handle handle;
3338 struct perf_read_event read_event = {
3340 .type = PERF_RECORD_READ,
3342 .size = sizeof(read_event) + perf_event_read_size(event),
3344 .pid = perf_event_pid(event, task),
3345 .tid = perf_event_tid(event, task),
3349 ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3353 perf_output_put(&handle, read_event);
3354 perf_output_read(&handle, event);
3356 perf_output_end(&handle);
3360 * task tracking -- fork/exit
3362 * enabled by: attr.comm | attr.mmap | attr.task
3365 struct perf_task_event {
3366 struct task_struct *task;
3367 struct perf_event_context *task_ctx;
3370 struct perf_event_header header;
3380 static void perf_event_task_output(struct perf_event *event,
3381 struct perf_task_event *task_event)
3383 struct perf_output_handle handle;
3384 struct task_struct *task = task_event->task;
3385 unsigned long flags;
3389 * If this CPU attempts to acquire an rq lock held by a CPU spinning
3390 * in perf_output_lock() from interrupt context, it's game over.
3392 local_irq_save(flags);
3394 size = task_event->event_id.header.size;
3395 ret = perf_output_begin(&handle, event, size, 0, 0);
3398 local_irq_restore(flags);
3402 task_event->event_id.pid = perf_event_pid(event, task);
3403 task_event->event_id.ppid = perf_event_pid(event, current);
3405 task_event->event_id.tid = perf_event_tid(event, task);
3406 task_event->event_id.ptid = perf_event_tid(event, current);
3408 perf_output_put(&handle, task_event->event_id);
3410 perf_output_end(&handle);
3411 local_irq_restore(flags);
3414 static int perf_event_task_match(struct perf_event *event)
3416 if (event->state < PERF_EVENT_STATE_INACTIVE)
3419 if (event->cpu != -1 && event->cpu != smp_processor_id())
3422 if (event->attr.comm || event->attr.mmap || event->attr.task)
3428 static void perf_event_task_ctx(struct perf_event_context *ctx,
3429 struct perf_task_event *task_event)
3431 struct perf_event *event;
3433 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3434 if (perf_event_task_match(event))
3435 perf_event_task_output(event, task_event);
3439 static void perf_event_task_event(struct perf_task_event *task_event)
3441 struct perf_cpu_context *cpuctx;
3442 struct perf_event_context *ctx = task_event->task_ctx;
3445 cpuctx = &get_cpu_var(perf_cpu_context);
3446 perf_event_task_ctx(&cpuctx->ctx, task_event);
3448 ctx = rcu_dereference(current->perf_event_ctxp);
3450 perf_event_task_ctx(ctx, task_event);
3451 put_cpu_var(perf_cpu_context);
3455 static void perf_event_task(struct task_struct *task,
3456 struct perf_event_context *task_ctx,
3459 struct perf_task_event task_event;
3461 if (!atomic_read(&nr_comm_events) &&
3462 !atomic_read(&nr_mmap_events) &&
3463 !atomic_read(&nr_task_events))
3466 task_event = (struct perf_task_event){
3468 .task_ctx = task_ctx,
3471 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
3473 .size = sizeof(task_event.event_id),
3479 .time = perf_clock(),
3483 perf_event_task_event(&task_event);
3486 void perf_event_fork(struct task_struct *task)
3488 perf_event_task(task, NULL, 1);
3495 struct perf_comm_event {
3496 struct task_struct *task;
3501 struct perf_event_header header;
3508 static void perf_event_comm_output(struct perf_event *event,
3509 struct perf_comm_event *comm_event)
3511 struct perf_output_handle handle;
3512 int size = comm_event->event_id.header.size;
3513 int ret = perf_output_begin(&handle, event, size, 0, 0);
3518 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
3519 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
3521 perf_output_put(&handle, comm_event->event_id);
3522 perf_output_copy(&handle, comm_event->comm,
3523 comm_event->comm_size);
3524 perf_output_end(&handle);
3527 static int perf_event_comm_match(struct perf_event *event)
3529 if (event->state < PERF_EVENT_STATE_INACTIVE)
3532 if (event->cpu != -1 && event->cpu != smp_processor_id())
3535 if (event->attr.comm)
3541 static void perf_event_comm_ctx(struct perf_event_context *ctx,
3542 struct perf_comm_event *comm_event)
3544 struct perf_event *event;
3546 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3547 if (perf_event_comm_match(event))
3548 perf_event_comm_output(event, comm_event);
3552 static void perf_event_comm_event(struct perf_comm_event *comm_event)
3554 struct perf_cpu_context *cpuctx;
3555 struct perf_event_context *ctx;
3557 char comm[TASK_COMM_LEN];
3559 memset(comm, 0, sizeof(comm));
3560 strlcpy(comm, comm_event->task->comm, sizeof(comm));
3561 size = ALIGN(strlen(comm)+1, sizeof(u64));
3563 comm_event->comm = comm;
3564 comm_event->comm_size = size;
3566 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3569 cpuctx = &get_cpu_var(perf_cpu_context);
3570 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
3571 ctx = rcu_dereference(current->perf_event_ctxp);
3573 perf_event_comm_ctx(ctx, comm_event);
3574 put_cpu_var(perf_cpu_context);
3578 void perf_event_comm(struct task_struct *task)
3580 struct perf_comm_event comm_event;
3582 if (task->perf_event_ctxp)
3583 perf_event_enable_on_exec(task);
3585 if (!atomic_read(&nr_comm_events))
3588 comm_event = (struct perf_comm_event){
3594 .type = PERF_RECORD_COMM,
3603 perf_event_comm_event(&comm_event);
3610 struct perf_mmap_event {
3611 struct vm_area_struct *vma;
3613 const char *file_name;
3617 struct perf_event_header header;
3627 static void perf_event_mmap_output(struct perf_event *event,
3628 struct perf_mmap_event *mmap_event)
3630 struct perf_output_handle handle;
3631 int size = mmap_event->event_id.header.size;
3632 int ret = perf_output_begin(&handle, event, size, 0, 0);
3637 mmap_event->event_id.pid = perf_event_pid(event, current);
3638 mmap_event->event_id.tid = perf_event_tid(event, current);
3640 perf_output_put(&handle, mmap_event->event_id);
3641 perf_output_copy(&handle, mmap_event->file_name,
3642 mmap_event->file_size);
3643 perf_output_end(&handle);
3646 static int perf_event_mmap_match(struct perf_event *event,
3647 struct perf_mmap_event *mmap_event)
3649 if (event->state < PERF_EVENT_STATE_INACTIVE)
3652 if (event->cpu != -1 && event->cpu != smp_processor_id())
3655 if (event->attr.mmap)
3661 static void perf_event_mmap_ctx(struct perf_event_context *ctx,
3662 struct perf_mmap_event *mmap_event)
3664 struct perf_event *event;
3666 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3667 if (perf_event_mmap_match(event, mmap_event))
3668 perf_event_mmap_output(event, mmap_event);
3672 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
3674 struct perf_cpu_context *cpuctx;
3675 struct perf_event_context *ctx;
3676 struct vm_area_struct *vma = mmap_event->vma;
3677 struct file *file = vma->vm_file;
3683 memset(tmp, 0, sizeof(tmp));
3687 * d_path works from the end of the buffer backwards, so we
3688 * need to add enough zero bytes after the string to handle
3689 * the 64bit alignment we do later.
3691 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
3693 name = strncpy(tmp, "//enomem", sizeof(tmp));
3696 name = d_path(&file->f_path, buf, PATH_MAX);
3698 name = strncpy(tmp, "//toolong", sizeof(tmp));
3702 if (arch_vma_name(mmap_event->vma)) {
3703 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
3709 name = strncpy(tmp, "[vdso]", sizeof(tmp));
3713 name = strncpy(tmp, "//anon", sizeof(tmp));
3718 size = ALIGN(strlen(name)+1, sizeof(u64));
3720 mmap_event->file_name = name;
3721 mmap_event->file_size = size;
3723 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
3726 cpuctx = &get_cpu_var(perf_cpu_context);
3727 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
3728 ctx = rcu_dereference(current->perf_event_ctxp);
3730 perf_event_mmap_ctx(ctx, mmap_event);
3731 put_cpu_var(perf_cpu_context);
3737 void __perf_event_mmap(struct vm_area_struct *vma)
3739 struct perf_mmap_event mmap_event;
3741 if (!atomic_read(&nr_mmap_events))
3744 mmap_event = (struct perf_mmap_event){
3750 .type = PERF_RECORD_MMAP,
3756 .start = vma->vm_start,
3757 .len = vma->vm_end - vma->vm_start,
3758 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
3762 perf_event_mmap_event(&mmap_event);
3766 * IRQ throttle logging
3769 static void perf_log_throttle(struct perf_event *event, int enable)
3771 struct perf_output_handle handle;
3775 struct perf_event_header header;
3779 } throttle_event = {
3781 .type = PERF_RECORD_THROTTLE,
3783 .size = sizeof(throttle_event),
3785 .time = perf_clock(),
3786 .id = primary_event_id(event),
3787 .stream_id = event->id,
3791 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
3793 ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
3797 perf_output_put(&handle, throttle_event);
3798 perf_output_end(&handle);
3802 * Generic event overflow handling, sampling.
3805 static int __perf_event_overflow(struct perf_event *event, int nmi,
3806 int throttle, struct perf_sample_data *data,
3807 struct pt_regs *regs)
3809 int events = atomic_read(&event->event_limit);
3810 struct hw_perf_event *hwc = &event->hw;
3813 throttle = (throttle && event->pmu->unthrottle != NULL);
3818 if (hwc->interrupts != MAX_INTERRUPTS) {
3820 if (HZ * hwc->interrupts >
3821 (u64)sysctl_perf_event_sample_rate) {
3822 hwc->interrupts = MAX_INTERRUPTS;
3823 perf_log_throttle(event, 0);
3828 * Keep re-disabling events even though on the previous
3829 * pass we disabled it - just in case we raced with a
3830 * sched-in and the event got enabled again:
3836 if (event->attr.freq) {
3837 u64 now = perf_clock();
3838 s64 delta = now - hwc->freq_time_stamp;
3840 hwc->freq_time_stamp = now;
3842 if (delta > 0 && delta < 2*TICK_NSEC)
3843 perf_adjust_period(event, delta, hwc->last_period);
3847 * XXX event_limit might not quite work as expected on inherited
3851 event->pending_kill = POLL_IN;
3852 if (events && atomic_dec_and_test(&event->event_limit)) {
3854 event->pending_kill = POLL_HUP;
3856 event->pending_disable = 1;
3857 perf_pending_queue(&event->pending,
3858 perf_pending_event);
3860 perf_event_disable(event);
3863 if (event->overflow_handler)
3864 event->overflow_handler(event, nmi, data, regs);
3866 perf_event_output(event, nmi, data, regs);
3871 int perf_event_overflow(struct perf_event *event, int nmi,
3872 struct perf_sample_data *data,
3873 struct pt_regs *regs)
3875 return __perf_event_overflow(event, nmi, 1, data, regs);
3879 * Generic software event infrastructure
3883 * We directly increment event->count and keep a second value in
3884 * event->hw.period_left to count intervals. This period event
3885 * is kept in the range [-sample_period, 0] so that we can use the
3889 static u64 perf_swevent_set_period(struct perf_event *event)
3891 struct hw_perf_event *hwc = &event->hw;
3892 u64 period = hwc->last_period;
3896 hwc->last_period = hwc->sample_period;
3899 old = val = atomic64_read(&hwc->period_left);
3903 nr = div64_u64(period + val, period);
3904 offset = nr * period;
3906 if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
3912 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
3913 int nmi, struct perf_sample_data *data,
3914 struct pt_regs *regs)
3916 struct hw_perf_event *hwc = &event->hw;
3919 data->period = event->hw.last_period;
3921 overflow = perf_swevent_set_period(event);
3923 if (hwc->interrupts == MAX_INTERRUPTS)
3926 for (; overflow; overflow--) {
3927 if (__perf_event_overflow(event, nmi, throttle,
3930 * We inhibit the overflow from happening when
3931 * hwc->interrupts == MAX_INTERRUPTS.
3939 static void perf_swevent_unthrottle(struct perf_event *event)
3942 * Nothing to do, we already reset hwc->interrupts.
3946 static void perf_swevent_add(struct perf_event *event, u64 nr,
3947 int nmi, struct perf_sample_data *data,
3948 struct pt_regs *regs)
3950 struct hw_perf_event *hwc = &event->hw;
3952 atomic64_add(nr, &event->count);
3957 if (!hwc->sample_period)
3960 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
3961 return perf_swevent_overflow(event, 1, nmi, data, regs);
3963 if (atomic64_add_negative(nr, &hwc->period_left))
3966 perf_swevent_overflow(event, 0, nmi, data, regs);
3969 static int perf_swevent_is_counting(struct perf_event *event)
3972 * The event is active, we're good!
3974 if (event->state == PERF_EVENT_STATE_ACTIVE)
3978 * The event is off/error, not counting.
3980 if (event->state != PERF_EVENT_STATE_INACTIVE)
3984 * The event is inactive, if the context is active
3985 * we're part of a group that didn't make it on the 'pmu',
3988 if (event->ctx->is_active)
3992 * We're inactive and the context is too, this means the
3993 * task is scheduled out, we're counting events that happen
3994 * to us, like migration events.
3999 static int perf_tp_event_match(struct perf_event *event,
4000 struct perf_sample_data *data);
4002 static int perf_exclude_event(struct perf_event *event,
4003 struct pt_regs *regs)
4006 if (event->attr.exclude_user && user_mode(regs))
4009 if (event->attr.exclude_kernel && !user_mode(regs))
4016 static int perf_swevent_match(struct perf_event *event,
4017 enum perf_type_id type,
4019 struct perf_sample_data *data,
4020 struct pt_regs *regs)
4022 if (event->cpu != -1 && event->cpu != smp_processor_id())
4025 if (!perf_swevent_is_counting(event))
4028 if (event->attr.type != type)
4031 if (event->attr.config != event_id)
4034 if (perf_exclude_event(event, regs))
4037 if (event->attr.type == PERF_TYPE_TRACEPOINT &&
4038 !perf_tp_event_match(event, data))
4044 static void perf_swevent_ctx_event(struct perf_event_context *ctx,
4045 enum perf_type_id type,
4046 u32 event_id, u64 nr, int nmi,
4047 struct perf_sample_data *data,
4048 struct pt_regs *regs)
4050 struct perf_event *event;
4052 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4053 if (perf_swevent_match(event, type, event_id, data, regs))
4054 perf_swevent_add(event, nr, nmi, data, regs);
4058 int perf_swevent_get_recursion_context(void)
4060 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
4067 else if (in_softirq())
4072 if (cpuctx->recursion[rctx]) {
4073 put_cpu_var(perf_cpu_context);
4077 cpuctx->recursion[rctx]++;
4082 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
4084 void perf_swevent_put_recursion_context(int rctx)
4086 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4088 cpuctx->recursion[rctx]--;
4089 put_cpu_var(perf_cpu_context);
4091 EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
4093 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4095 struct perf_sample_data *data,
4096 struct pt_regs *regs)
4098 struct perf_cpu_context *cpuctx;
4099 struct perf_event_context *ctx;
4101 cpuctx = &__get_cpu_var(perf_cpu_context);
4103 perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
4104 nr, nmi, data, regs);
4106 * doesn't really matter which of the child contexts the
4107 * events ends up in.
4109 ctx = rcu_dereference(current->perf_event_ctxp);
4111 perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs);
4115 void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4116 struct pt_regs *regs, u64 addr)
4118 struct perf_sample_data data;
4121 rctx = perf_swevent_get_recursion_context();
4125 perf_sample_data_init(&data, addr);
4127 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
4129 perf_swevent_put_recursion_context(rctx);
4132 static void perf_swevent_read(struct perf_event *event)
4136 static int perf_swevent_enable(struct perf_event *event)
4138 struct hw_perf_event *hwc = &event->hw;
4140 if (hwc->sample_period) {
4141 hwc->last_period = hwc->sample_period;
4142 perf_swevent_set_period(event);
4147 static void perf_swevent_disable(struct perf_event *event)
4151 static const struct pmu perf_ops_generic = {
4152 .enable = perf_swevent_enable,
4153 .disable = perf_swevent_disable,
4154 .read = perf_swevent_read,
4155 .unthrottle = perf_swevent_unthrottle,
4159 * hrtimer based swevent callback
4162 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
4164 enum hrtimer_restart ret = HRTIMER_RESTART;
4165 struct perf_sample_data data;
4166 struct pt_regs *regs;
4167 struct perf_event *event;
4170 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
4171 event->pmu->read(event);
4173 perf_sample_data_init(&data, 0);
4174 data.period = event->hw.last_period;
4175 regs = get_irq_regs();
4177 * In case we exclude kernel IPs or are somehow not in interrupt
4178 * context, provide the next best thing, the user IP.
4180 if ((event->attr.exclude_kernel || !regs) &&
4181 !event->attr.exclude_user)
4182 regs = task_pt_regs(current);
4185 if (!(event->attr.exclude_idle && current->pid == 0))
4186 if (perf_event_overflow(event, 0, &data, regs))
4187 ret = HRTIMER_NORESTART;
4190 period = max_t(u64, 10000, event->hw.sample_period);
4191 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
4196 static void perf_swevent_start_hrtimer(struct perf_event *event)
4198 struct hw_perf_event *hwc = &event->hw;
4200 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4201 hwc->hrtimer.function = perf_swevent_hrtimer;
4202 if (hwc->sample_period) {
4205 if (hwc->remaining) {
4206 if (hwc->remaining < 0)
4209 period = hwc->remaining;
4212 period = max_t(u64, 10000, hwc->sample_period);
4214 __hrtimer_start_range_ns(&hwc->hrtimer,
4215 ns_to_ktime(period), 0,
4216 HRTIMER_MODE_REL, 0);
4220 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
4222 struct hw_perf_event *hwc = &event->hw;
4224 if (hwc->sample_period) {
4225 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
4226 hwc->remaining = ktime_to_ns(remaining);
4228 hrtimer_cancel(&hwc->hrtimer);
4233 * Software event: cpu wall time clock
4236 static void cpu_clock_perf_event_update(struct perf_event *event)
4238 int cpu = raw_smp_processor_id();
4242 now = cpu_clock(cpu);
4243 prev = atomic64_xchg(&event->hw.prev_count, now);
4244 atomic64_add(now - prev, &event->count);
4247 static int cpu_clock_perf_event_enable(struct perf_event *event)
4249 struct hw_perf_event *hwc = &event->hw;
4250 int cpu = raw_smp_processor_id();
4252 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
4253 perf_swevent_start_hrtimer(event);
4258 static void cpu_clock_perf_event_disable(struct perf_event *event)
4260 perf_swevent_cancel_hrtimer(event);
4261 cpu_clock_perf_event_update(event);
4264 static void cpu_clock_perf_event_read(struct perf_event *event)
4266 cpu_clock_perf_event_update(event);
4269 static const struct pmu perf_ops_cpu_clock = {
4270 .enable = cpu_clock_perf_event_enable,
4271 .disable = cpu_clock_perf_event_disable,
4272 .read = cpu_clock_perf_event_read,
4276 * Software event: task time clock
4279 static void task_clock_perf_event_update(struct perf_event *event, u64 now)
4284 prev = atomic64_xchg(&event->hw.prev_count, now);
4286 atomic64_add(delta, &event->count);
4289 static int task_clock_perf_event_enable(struct perf_event *event)
4291 struct hw_perf_event *hwc = &event->hw;
4294 now = event->ctx->time;
4296 atomic64_set(&hwc->prev_count, now);
4298 perf_swevent_start_hrtimer(event);
4303 static void task_clock_perf_event_disable(struct perf_event *event)
4305 perf_swevent_cancel_hrtimer(event);
4306 task_clock_perf_event_update(event, event->ctx->time);
4310 static void task_clock_perf_event_read(struct perf_event *event)
4315 update_context_time(event->ctx);
4316 time = event->ctx->time;
4318 u64 now = perf_clock();
4319 u64 delta = now - event->ctx->timestamp;
4320 time = event->ctx->time + delta;
4323 task_clock_perf_event_update(event, time);
4326 static const struct pmu perf_ops_task_clock = {
4327 .enable = task_clock_perf_event_enable,
4328 .disable = task_clock_perf_event_disable,
4329 .read = task_clock_perf_event_read,
4332 #ifdef CONFIG_EVENT_TRACING
4334 void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4335 int entry_size, struct pt_regs *regs)
4337 struct perf_sample_data data;
4338 struct perf_raw_record raw = {
4343 perf_sample_data_init(&data, addr);
4346 /* Trace events already protected against recursion */
4347 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
4350 EXPORT_SYMBOL_GPL(perf_tp_event);
4352 static int perf_tp_event_match(struct perf_event *event,
4353 struct perf_sample_data *data)
4355 void *record = data->raw->data;
4357 if (likely(!event->filter) || filter_match_preds(event->filter, record))
4362 static void tp_perf_event_destroy(struct perf_event *event)
4364 perf_trace_disable(event->attr.config);
4367 static const struct pmu *tp_perf_event_init(struct perf_event *event)
4370 * Raw tracepoint data is a severe data leak, only allow root to
4373 if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
4374 perf_paranoid_tracepoint_raw() &&
4375 !capable(CAP_SYS_ADMIN))
4376 return ERR_PTR(-EPERM);
4378 if (perf_trace_enable(event->attr.config))
4381 event->destroy = tp_perf_event_destroy;
4383 return &perf_ops_generic;
4386 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4391 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4394 filter_str = strndup_user(arg, PAGE_SIZE);
4395 if (IS_ERR(filter_str))
4396 return PTR_ERR(filter_str);
4398 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
4404 static void perf_event_free_filter(struct perf_event *event)
4406 ftrace_profile_free_filter(event);
4411 static int perf_tp_event_match(struct perf_event *event,
4412 struct perf_sample_data *data)
4417 static const struct pmu *tp_perf_event_init(struct perf_event *event)
4422 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4427 static void perf_event_free_filter(struct perf_event *event)
4431 #endif /* CONFIG_EVENT_TRACING */
4433 #ifdef CONFIG_HAVE_HW_BREAKPOINT
4434 static void bp_perf_event_destroy(struct perf_event *event)
4436 release_bp_slot(event);
4439 static const struct pmu *bp_perf_event_init(struct perf_event *bp)
4443 err = register_perf_hw_breakpoint(bp);
4445 return ERR_PTR(err);
4447 bp->destroy = bp_perf_event_destroy;
4449 return &perf_ops_bp;
4452 void perf_bp_event(struct perf_event *bp, void *data)
4454 struct perf_sample_data sample;
4455 struct pt_regs *regs = data;
4457 perf_sample_data_init(&sample, bp->attr.bp_addr);
4459 if (!perf_exclude_event(bp, regs))
4460 perf_swevent_add(bp, 1, 1, &sample, regs);
4463 static const struct pmu *bp_perf_event_init(struct perf_event *bp)
4468 void perf_bp_event(struct perf_event *bp, void *regs)
4473 atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
4475 static void sw_perf_event_destroy(struct perf_event *event)
4477 u64 event_id = event->attr.config;
4479 WARN_ON(event->parent);
4481 atomic_dec(&perf_swevent_enabled[event_id]);
4484 static const struct pmu *sw_perf_event_init(struct perf_event *event)
4486 const struct pmu *pmu = NULL;
4487 u64 event_id = event->attr.config;
4490 * Software events (currently) can't in general distinguish
4491 * between user, kernel and hypervisor events.
4492 * However, context switches and cpu migrations are considered
4493 * to be kernel events, and page faults are never hypervisor
4497 case PERF_COUNT_SW_CPU_CLOCK:
4498 pmu = &perf_ops_cpu_clock;
4501 case PERF_COUNT_SW_TASK_CLOCK:
4503 * If the user instantiates this as a per-cpu event,
4504 * use the cpu_clock event instead.
4506 if (event->ctx->task)
4507 pmu = &perf_ops_task_clock;
4509 pmu = &perf_ops_cpu_clock;
4512 case PERF_COUNT_SW_PAGE_FAULTS:
4513 case PERF_COUNT_SW_PAGE_FAULTS_MIN:
4514 case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
4515 case PERF_COUNT_SW_CONTEXT_SWITCHES:
4516 case PERF_COUNT_SW_CPU_MIGRATIONS:
4517 case PERF_COUNT_SW_ALIGNMENT_FAULTS:
4518 case PERF_COUNT_SW_EMULATION_FAULTS:
4519 if (!event->parent) {
4520 atomic_inc(&perf_swevent_enabled[event_id]);
4521 event->destroy = sw_perf_event_destroy;
4523 pmu = &perf_ops_generic;
4531 * Allocate and initialize a event structure
4533 static struct perf_event *
4534 perf_event_alloc(struct perf_event_attr *attr,
4536 struct perf_event_context *ctx,
4537 struct perf_event *group_leader,
4538 struct perf_event *parent_event,
4539 perf_overflow_handler_t overflow_handler,
4542 const struct pmu *pmu;
4543 struct perf_event *event;
4544 struct hw_perf_event *hwc;
4547 event = kzalloc(sizeof(*event), gfpflags);
4549 return ERR_PTR(-ENOMEM);
4552 * Single events are their own group leaders, with an
4553 * empty sibling list:
4556 group_leader = event;
4558 mutex_init(&event->child_mutex);
4559 INIT_LIST_HEAD(&event->child_list);
4561 INIT_LIST_HEAD(&event->group_entry);
4562 INIT_LIST_HEAD(&event->event_entry);
4563 INIT_LIST_HEAD(&event->sibling_list);
4564 init_waitqueue_head(&event->waitq);
4566 mutex_init(&event->mmap_mutex);
4569 event->attr = *attr;
4570 event->group_leader = group_leader;
4575 event->parent = parent_event;
4577 event->ns = get_pid_ns(current->nsproxy->pid_ns);
4578 event->id = atomic64_inc_return(&perf_event_id);
4580 event->state = PERF_EVENT_STATE_INACTIVE;
4582 if (!overflow_handler && parent_event)
4583 overflow_handler = parent_event->overflow_handler;
4585 event->overflow_handler = overflow_handler;
4588 event->state = PERF_EVENT_STATE_OFF;
4593 hwc->sample_period = attr->sample_period;
4594 if (attr->freq && attr->sample_freq)
4595 hwc->sample_period = 1;
4596 hwc->last_period = hwc->sample_period;
4598 atomic64_set(&hwc->period_left, hwc->sample_period);
4601 * we currently do not support PERF_FORMAT_GROUP on inherited events
4603 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
4606 switch (attr->type) {
4608 case PERF_TYPE_HARDWARE:
4609 case PERF_TYPE_HW_CACHE:
4610 pmu = hw_perf_event_init(event);
4613 case PERF_TYPE_SOFTWARE:
4614 pmu = sw_perf_event_init(event);
4617 case PERF_TYPE_TRACEPOINT:
4618 pmu = tp_perf_event_init(event);
4621 case PERF_TYPE_BREAKPOINT:
4622 pmu = bp_perf_event_init(event);
4633 else if (IS_ERR(pmu))
4638 put_pid_ns(event->ns);
4640 return ERR_PTR(err);
4645 if (!event->parent) {
4646 atomic_inc(&nr_events);
4647 if (event->attr.mmap)
4648 atomic_inc(&nr_mmap_events);
4649 if (event->attr.comm)
4650 atomic_inc(&nr_comm_events);
4651 if (event->attr.task)
4652 atomic_inc(&nr_task_events);
4658 static int perf_copy_attr(struct perf_event_attr __user *uattr,
4659 struct perf_event_attr *attr)
4664 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
4668 * zero the full structure, so that a short copy will be nice.
4670 memset(attr, 0, sizeof(*attr));
4672 ret = get_user(size, &uattr->size);
4676 if (size > PAGE_SIZE) /* silly large */
4679 if (!size) /* abi compat */
4680 size = PERF_ATTR_SIZE_VER0;
4682 if (size < PERF_ATTR_SIZE_VER0)
4686 * If we're handed a bigger struct than we know of,
4687 * ensure all the unknown bits are 0 - i.e. new
4688 * user-space does not rely on any kernel feature
4689 * extensions we dont know about yet.
4691 if (size > sizeof(*attr)) {
4692 unsigned char __user *addr;
4693 unsigned char __user *end;
4696 addr = (void __user *)uattr + sizeof(*attr);
4697 end = (void __user *)uattr + size;
4699 for (; addr < end; addr++) {
4700 ret = get_user(val, addr);
4706 size = sizeof(*attr);
4709 ret = copy_from_user(attr, uattr, size);
4714 * If the type exists, the corresponding creation will verify
4717 if (attr->type >= PERF_TYPE_MAX)
4720 if (attr->__reserved_1)
4723 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
4726 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
4733 put_user(sizeof(*attr), &uattr->size);
4738 static int perf_event_set_output(struct perf_event *event, int output_fd)
4740 struct perf_event *output_event = NULL;
4741 struct file *output_file = NULL;
4742 struct perf_event *old_output;
4743 int fput_needed = 0;
4749 output_file = fget_light(output_fd, &fput_needed);
4753 if (output_file->f_op != &perf_fops)
4756 output_event = output_file->private_data;
4758 /* Don't chain output fds */
4759 if (output_event->output)
4762 /* Don't set an output fd when we already have an output channel */
4766 atomic_long_inc(&output_file->f_count);
4769 mutex_lock(&event->mmap_mutex);
4770 old_output = event->output;
4771 rcu_assign_pointer(event->output, output_event);
4772 mutex_unlock(&event->mmap_mutex);
4776 * we need to make sure no existing perf_output_*()
4777 * is still referencing this event.
4780 fput(old_output->filp);
4785 fput_light(output_file, fput_needed);
4790 * sys_perf_event_open - open a performance event, associate it to a task/cpu
4792 * @attr_uptr: event_id type attributes for monitoring/sampling
4795 * @group_fd: group leader event fd
4797 SYSCALL_DEFINE5(perf_event_open,
4798 struct perf_event_attr __user *, attr_uptr,
4799 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
4801 struct perf_event *event, *group_leader;
4802 struct perf_event_attr attr;
4803 struct perf_event_context *ctx;
4804 struct file *event_file = NULL;
4805 struct file *group_file = NULL;
4806 int fput_needed = 0;
4807 int fput_needed2 = 0;
4810 /* for future expandability... */
4811 if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
4814 err = perf_copy_attr(attr_uptr, &attr);
4818 if (!attr.exclude_kernel) {
4819 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
4824 if (attr.sample_freq > sysctl_perf_event_sample_rate)
4829 * Get the target context (task or percpu):
4831 ctx = find_get_context(pid, cpu);
4833 return PTR_ERR(ctx);
4836 * Look up the group leader (we will attach this event to it):
4838 group_leader = NULL;
4839 if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) {
4841 group_file = fget_light(group_fd, &fput_needed);
4843 goto err_put_context;
4844 if (group_file->f_op != &perf_fops)
4845 goto err_put_context;
4847 group_leader = group_file->private_data;
4849 * Do not allow a recursive hierarchy (this new sibling
4850 * becoming part of another group-sibling):
4852 if (group_leader->group_leader != group_leader)
4853 goto err_put_context;
4855 * Do not allow to attach to a group in a different
4856 * task or CPU context:
4858 if (group_leader->ctx != ctx)
4859 goto err_put_context;
4861 * Only a group leader can be exclusive or pinned
4863 if (attr.exclusive || attr.pinned)
4864 goto err_put_context;
4867 event = perf_event_alloc(&attr, cpu, ctx, group_leader,
4868 NULL, NULL, GFP_KERNEL);
4869 err = PTR_ERR(event);
4871 goto err_put_context;
4873 err = anon_inode_getfd("[perf_event]", &perf_fops, event, O_RDWR);
4875 goto err_free_put_context;
4877 event_file = fget_light(err, &fput_needed2);
4879 goto err_free_put_context;
4881 if (flags & PERF_FLAG_FD_OUTPUT) {
4882 err = perf_event_set_output(event, group_fd);
4884 goto err_fput_free_put_context;
4887 event->filp = event_file;
4888 WARN_ON_ONCE(ctx->parent_ctx);
4889 mutex_lock(&ctx->mutex);
4890 perf_install_in_context(ctx, event, cpu);
4892 mutex_unlock(&ctx->mutex);
4894 event->owner = current;
4895 get_task_struct(current);
4896 mutex_lock(¤t->perf_event_mutex);
4897 list_add_tail(&event->owner_entry, ¤t->perf_event_list);
4898 mutex_unlock(¤t->perf_event_mutex);
4900 err_fput_free_put_context:
4901 fput_light(event_file, fput_needed2);
4903 err_free_put_context:
4911 fput_light(group_file, fput_needed);
4917 * perf_event_create_kernel_counter
4919 * @attr: attributes of the counter to create
4920 * @cpu: cpu in which the counter is bound
4921 * @pid: task to profile
4924 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
4926 perf_overflow_handler_t overflow_handler)
4928 struct perf_event *event;
4929 struct perf_event_context *ctx;
4933 * Get the target context (task or percpu):
4936 ctx = find_get_context(pid, cpu);
4942 event = perf_event_alloc(attr, cpu, ctx, NULL,
4943 NULL, overflow_handler, GFP_KERNEL);
4944 if (IS_ERR(event)) {
4945 err = PTR_ERR(event);
4946 goto err_put_context;
4950 WARN_ON_ONCE(ctx->parent_ctx);
4951 mutex_lock(&ctx->mutex);
4952 perf_install_in_context(ctx, event, cpu);
4954 mutex_unlock(&ctx->mutex);
4956 event->owner = current;
4957 get_task_struct(current);
4958 mutex_lock(¤t->perf_event_mutex);
4959 list_add_tail(&event->owner_entry, ¤t->perf_event_list);
4960 mutex_unlock(¤t->perf_event_mutex);
4967 return ERR_PTR(err);
4969 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
4972 * inherit a event from parent task to child task:
4974 static struct perf_event *
4975 inherit_event(struct perf_event *parent_event,
4976 struct task_struct *parent,
4977 struct perf_event_context *parent_ctx,
4978 struct task_struct *child,
4979 struct perf_event *group_leader,
4980 struct perf_event_context *child_ctx)
4982 struct perf_event *child_event;
4985 * Instead of creating recursive hierarchies of events,
4986 * we link inherited events back to the original parent,
4987 * which has a filp for sure, which we use as the reference
4990 if (parent_event->parent)
4991 parent_event = parent_event->parent;
4993 child_event = perf_event_alloc(&parent_event->attr,
4994 parent_event->cpu, child_ctx,
4995 group_leader, parent_event,
4997 if (IS_ERR(child_event))
5002 * Make the child state follow the state of the parent event,
5003 * not its attr.disabled bit. We hold the parent's mutex,
5004 * so we won't race with perf_event_{en, dis}able_family.
5006 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
5007 child_event->state = PERF_EVENT_STATE_INACTIVE;
5009 child_event->state = PERF_EVENT_STATE_OFF;
5011 if (parent_event->attr.freq) {
5012 u64 sample_period = parent_event->hw.sample_period;
5013 struct hw_perf_event *hwc = &child_event->hw;
5015 hwc->sample_period = sample_period;
5016 hwc->last_period = sample_period;
5018 atomic64_set(&hwc->period_left, sample_period);
5021 child_event->overflow_handler = parent_event->overflow_handler;
5024 * Link it up in the child's context:
5026 add_event_to_ctx(child_event, child_ctx);
5029 * Get a reference to the parent filp - we will fput it
5030 * when the child event exits. This is safe to do because
5031 * we are in the parent and we know that the filp still
5032 * exists and has a nonzero count:
5034 atomic_long_inc(&parent_event->filp->f_count);
5037 * Link this into the parent event's child list
5039 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
5040 mutex_lock(&parent_event->child_mutex);
5041 list_add_tail(&child_event->child_list, &parent_event->child_list);
5042 mutex_unlock(&parent_event->child_mutex);
5047 static int inherit_group(struct perf_event *parent_event,
5048 struct task_struct *parent,
5049 struct perf_event_context *parent_ctx,
5050 struct task_struct *child,
5051 struct perf_event_context *child_ctx)
5053 struct perf_event *leader;
5054 struct perf_event *sub;
5055 struct perf_event *child_ctr;
5057 leader = inherit_event(parent_event, parent, parent_ctx,
5058 child, NULL, child_ctx);
5060 return PTR_ERR(leader);
5061 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
5062 child_ctr = inherit_event(sub, parent, parent_ctx,
5063 child, leader, child_ctx);
5064 if (IS_ERR(child_ctr))
5065 return PTR_ERR(child_ctr);
5070 static void sync_child_event(struct perf_event *child_event,
5071 struct task_struct *child)
5073 struct perf_event *parent_event = child_event->parent;
5076 if (child_event->attr.inherit_stat)
5077 perf_event_read_event(child_event, child);
5079 child_val = atomic64_read(&child_event->count);
5082 * Add back the child's count to the parent's count:
5084 atomic64_add(child_val, &parent_event->count);
5085 atomic64_add(child_event->total_time_enabled,
5086 &parent_event->child_total_time_enabled);
5087 atomic64_add(child_event->total_time_running,
5088 &parent_event->child_total_time_running);
5091 * Remove this event from the parent's list
5093 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
5094 mutex_lock(&parent_event->child_mutex);
5095 list_del_init(&child_event->child_list);
5096 mutex_unlock(&parent_event->child_mutex);
5099 * Release the parent event, if this was the last
5102 fput(parent_event->filp);
5106 __perf_event_exit_task(struct perf_event *child_event,
5107 struct perf_event_context *child_ctx,
5108 struct task_struct *child)
5110 struct perf_event *parent_event;
5112 perf_event_remove_from_context(child_event);
5114 parent_event = child_event->parent;
5116 * It can happen that parent exits first, and has events
5117 * that are still around due to the child reference. These
5118 * events need to be zapped - but otherwise linger.
5121 sync_child_event(child_event, child);
5122 free_event(child_event);
5127 * When a child task exits, feed back event values to parent events.
5129 void perf_event_exit_task(struct task_struct *child)
5131 struct perf_event *child_event, *tmp;
5132 struct perf_event_context *child_ctx;
5133 unsigned long flags;
5135 if (likely(!child->perf_event_ctxp)) {
5136 perf_event_task(child, NULL, 0);
5140 local_irq_save(flags);
5142 * We can't reschedule here because interrupts are disabled,
5143 * and either child is current or it is a task that can't be
5144 * scheduled, so we are now safe from rescheduling changing
5147 child_ctx = child->perf_event_ctxp;
5148 __perf_event_task_sched_out(child_ctx);
5151 * Take the context lock here so that if find_get_context is
5152 * reading child->perf_event_ctxp, we wait until it has
5153 * incremented the context's refcount before we do put_ctx below.
5155 raw_spin_lock(&child_ctx->lock);
5156 child->perf_event_ctxp = NULL;
5158 * If this context is a clone; unclone it so it can't get
5159 * swapped to another process while we're removing all
5160 * the events from it.
5162 unclone_ctx(child_ctx);
5163 update_context_time(child_ctx);
5164 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
5167 * Report the task dead after unscheduling the events so that we
5168 * won't get any samples after PERF_RECORD_EXIT. We can however still
5169 * get a few PERF_RECORD_READ events.
5171 perf_event_task(child, child_ctx, 0);
5174 * We can recurse on the same lock type through:
5176 * __perf_event_exit_task()
5177 * sync_child_event()
5178 * fput(parent_event->filp)
5180 * mutex_lock(&ctx->mutex)
5182 * But since its the parent context it won't be the same instance.
5184 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
5187 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
5189 __perf_event_exit_task(child_event, child_ctx, child);
5191 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
5193 __perf_event_exit_task(child_event, child_ctx, child);
5196 * If the last event was a group event, it will have appended all
5197 * its siblings to the list, but we obtained 'tmp' before that which
5198 * will still point to the list head terminating the iteration.
5200 if (!list_empty(&child_ctx->pinned_groups) ||
5201 !list_empty(&child_ctx->flexible_groups))
5204 mutex_unlock(&child_ctx->mutex);
5209 static void perf_free_event(struct perf_event *event,
5210 struct perf_event_context *ctx)
5212 struct perf_event *parent = event->parent;
5214 if (WARN_ON_ONCE(!parent))
5217 mutex_lock(&parent->child_mutex);
5218 list_del_init(&event->child_list);
5219 mutex_unlock(&parent->child_mutex);
5223 list_del_event(event, ctx);
5228 * free an unexposed, unused context as created by inheritance by
5229 * init_task below, used by fork() in case of fail.
5231 void perf_event_free_task(struct task_struct *task)
5233 struct perf_event_context *ctx = task->perf_event_ctxp;
5234 struct perf_event *event, *tmp;
5239 mutex_lock(&ctx->mutex);
5241 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
5242 perf_free_event(event, ctx);
5244 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
5246 perf_free_event(event, ctx);
5248 if (!list_empty(&ctx->pinned_groups) ||
5249 !list_empty(&ctx->flexible_groups))
5252 mutex_unlock(&ctx->mutex);
5258 inherit_task_group(struct perf_event *event, struct task_struct *parent,
5259 struct perf_event_context *parent_ctx,
5260 struct task_struct *child,
5264 struct perf_event_context *child_ctx = child->perf_event_ctxp;
5266 if (!event->attr.inherit) {
5273 * This is executed from the parent task context, so
5274 * inherit events that have been marked for cloning.
5275 * First allocate and initialize a context for the
5279 child_ctx = kzalloc(sizeof(struct perf_event_context),
5284 __perf_event_init_context(child_ctx, child);
5285 child->perf_event_ctxp = child_ctx;
5286 get_task_struct(child);
5289 ret = inherit_group(event, parent, parent_ctx,
5300 * Initialize the perf_event context in task_struct
5302 int perf_event_init_task(struct task_struct *child)
5304 struct perf_event_context *child_ctx, *parent_ctx;
5305 struct perf_event_context *cloned_ctx;
5306 struct perf_event *event;
5307 struct task_struct *parent = current;
5308 int inherited_all = 1;
5311 child->perf_event_ctxp = NULL;
5313 mutex_init(&child->perf_event_mutex);
5314 INIT_LIST_HEAD(&child->perf_event_list);
5316 if (likely(!parent->perf_event_ctxp))
5320 * If the parent's context is a clone, pin it so it won't get
5323 parent_ctx = perf_pin_task_context(parent);
5326 * No need to check if parent_ctx != NULL here; since we saw
5327 * it non-NULL earlier, the only reason for it to become NULL
5328 * is if we exit, and since we're currently in the middle of
5329 * a fork we can't be exiting at the same time.
5333 * Lock the parent list. No need to lock the child - not PID
5334 * hashed yet and not running, so nobody can access it.
5336 mutex_lock(&parent_ctx->mutex);
5339 * We dont have to disable NMIs - we are only looking at
5340 * the list, not manipulating it:
5342 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
5343 ret = inherit_task_group(event, parent, parent_ctx, child,
5349 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
5350 ret = inherit_task_group(event, parent, parent_ctx, child,
5356 child_ctx = child->perf_event_ctxp;
5358 if (child_ctx && inherited_all) {
5360 * Mark the child context as a clone of the parent
5361 * context, or of whatever the parent is a clone of.
5362 * Note that if the parent is a clone, it could get
5363 * uncloned at any point, but that doesn't matter
5364 * because the list of events and the generation
5365 * count can't have changed since we took the mutex.
5367 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
5369 child_ctx->parent_ctx = cloned_ctx;
5370 child_ctx->parent_gen = parent_ctx->parent_gen;
5372 child_ctx->parent_ctx = parent_ctx;
5373 child_ctx->parent_gen = parent_ctx->generation;
5375 get_ctx(child_ctx->parent_ctx);
5378 mutex_unlock(&parent_ctx->mutex);
5380 perf_unpin_context(parent_ctx);
5385 static void __init perf_event_init_all_cpus(void)
5388 struct perf_cpu_context *cpuctx;
5390 for_each_possible_cpu(cpu) {
5391 cpuctx = &per_cpu(perf_cpu_context, cpu);
5392 __perf_event_init_context(&cpuctx->ctx, NULL);
5396 static void __cpuinit perf_event_init_cpu(int cpu)
5398 struct perf_cpu_context *cpuctx;
5400 cpuctx = &per_cpu(perf_cpu_context, cpu);
5402 spin_lock(&perf_resource_lock);
5403 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
5404 spin_unlock(&perf_resource_lock);
5407 #ifdef CONFIG_HOTPLUG_CPU
5408 static void __perf_event_exit_cpu(void *info)
5410 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
5411 struct perf_event_context *ctx = &cpuctx->ctx;
5412 struct perf_event *event, *tmp;
5414 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
5415 __perf_event_remove_from_context(event);
5416 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
5417 __perf_event_remove_from_context(event);
5419 static void perf_event_exit_cpu(int cpu)
5421 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
5422 struct perf_event_context *ctx = &cpuctx->ctx;
5424 mutex_lock(&ctx->mutex);
5425 smp_call_function_single(cpu, __perf_event_exit_cpu, NULL, 1);
5426 mutex_unlock(&ctx->mutex);
5429 static inline void perf_event_exit_cpu(int cpu) { }
5432 static int __cpuinit
5433 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
5435 unsigned int cpu = (long)hcpu;
5439 case CPU_UP_PREPARE:
5440 case CPU_UP_PREPARE_FROZEN:
5441 perf_event_init_cpu(cpu);
5444 case CPU_DOWN_PREPARE:
5445 case CPU_DOWN_PREPARE_FROZEN:
5446 perf_event_exit_cpu(cpu);
5457 * This has to have a higher priority than migration_notifier in sched.c.
5459 static struct notifier_block __cpuinitdata perf_cpu_nb = {
5460 .notifier_call = perf_cpu_notify,
5464 void __init perf_event_init(void)
5466 perf_event_init_all_cpus();
5467 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
5468 (void *)(long)smp_processor_id());
5469 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
5470 (void *)(long)smp_processor_id());
5471 register_cpu_notifier(&perf_cpu_nb);
5474 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class,
5475 struct sysdev_class_attribute *attr,
5478 return sprintf(buf, "%d\n", perf_reserved_percpu);
5482 perf_set_reserve_percpu(struct sysdev_class *class,
5483 struct sysdev_class_attribute *attr,
5487 struct perf_cpu_context *cpuctx;
5491 err = strict_strtoul(buf, 10, &val);
5494 if (val > perf_max_events)
5497 spin_lock(&perf_resource_lock);
5498 perf_reserved_percpu = val;
5499 for_each_online_cpu(cpu) {
5500 cpuctx = &per_cpu(perf_cpu_context, cpu);
5501 raw_spin_lock_irq(&cpuctx->ctx.lock);
5502 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
5503 perf_max_events - perf_reserved_percpu);
5504 cpuctx->max_pertask = mpt;
5505 raw_spin_unlock_irq(&cpuctx->ctx.lock);
5507 spin_unlock(&perf_resource_lock);
5512 static ssize_t perf_show_overcommit(struct sysdev_class *class,
5513 struct sysdev_class_attribute *attr,
5516 return sprintf(buf, "%d\n", perf_overcommit);
5520 perf_set_overcommit(struct sysdev_class *class,
5521 struct sysdev_class_attribute *attr,
5522 const char *buf, size_t count)
5527 err = strict_strtoul(buf, 10, &val);
5533 spin_lock(&perf_resource_lock);
5534 perf_overcommit = val;
5535 spin_unlock(&perf_resource_lock);
5540 static SYSDEV_CLASS_ATTR(
5543 perf_show_reserve_percpu,
5544 perf_set_reserve_percpu
5547 static SYSDEV_CLASS_ATTR(
5550 perf_show_overcommit,
5554 static struct attribute *perfclass_attrs[] = {
5555 &attr_reserve_percpu.attr,
5556 &attr_overcommit.attr,
5560 static struct attribute_group perfclass_attr_group = {
5561 .attrs = perfclass_attrs,
5562 .name = "perf_events",
5565 static int __init perf_event_sysfs_init(void)
5567 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
5568 &perfclass_attr_group);
5570 device_initcall(perf_event_sysfs_init);