]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/ia64/kernel/perfmon.c
[IS64-SGI] Set Altix error handling features
[net-next-2.6.git] / arch / ia64 / kernel / perfmon.c
CommitLineData
1da177e4
LT
1/*
2 * This file implements the perfmon-2 subsystem which is used
3 * to program the IA-64 Performance Monitoring Unit (PMU).
4 *
5 * The initial version of perfmon.c was written by
6 * Ganesh Venkitachalam, IBM Corp.
7 *
8 * Then it was modified for perfmon-1.x by Stephane Eranian and
9 * David Mosberger, Hewlett Packard Co.
10 *
11 * Version Perfmon-2.x is a rewrite of perfmon-1.x
12 * by Stephane Eranian, Hewlett Packard Co.
13 *
14 * Copyright (C) 1999-2003, 2005 Hewlett Packard Co
15 * Stephane Eranian <eranian@hpl.hp.com>
16 * David Mosberger-Tang <davidm@hpl.hp.com>
17 *
18 * More information about perfmon available at:
19 * http://www.hpl.hp.com/research/linux/perfmon
20 */
21
22#include <linux/config.h>
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/sched.h>
26#include <linux/interrupt.h>
27#include <linux/smp_lock.h>
28#include <linux/proc_fs.h>
29#include <linux/seq_file.h>
30#include <linux/init.h>
31#include <linux/vmalloc.h>
32#include <linux/mm.h>
33#include <linux/sysctl.h>
34#include <linux/list.h>
35#include <linux/file.h>
36#include <linux/poll.h>
37#include <linux/vfs.h>
38#include <linux/pagemap.h>
39#include <linux/mount.h>
40#include <linux/version.h>
41#include <linux/bitops.h>
42
43#include <asm/errno.h>
44#include <asm/intrinsics.h>
45#include <asm/page.h>
46#include <asm/perfmon.h>
47#include <asm/processor.h>
48#include <asm/signal.h>
49#include <asm/system.h>
50#include <asm/uaccess.h>
51#include <asm/delay.h>
52
53#ifdef CONFIG_PERFMON
54/*
55 * perfmon context state
56 */
57#define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */
58#define PFM_CTX_LOADED 2 /* context is loaded onto a task */
59#define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
60#define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
61
62#define PFM_INVALID_ACTIVATION (~0UL)
63
64/*
65 * depth of message queue
66 */
67#define PFM_MAX_MSGS 32
68#define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
69
70/*
71 * type of a PMU register (bitmask).
72 * bitmask structure:
73 * bit0 : register implemented
74 * bit1 : end marker
75 * bit2-3 : reserved
76 * bit4 : pmc has pmc.pm
77 * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter
78 * bit6-7 : register type
79 * bit8-31: reserved
80 */
81#define PFM_REG_NOTIMPL 0x0 /* not implemented at all */
82#define PFM_REG_IMPL 0x1 /* register implemented */
83#define PFM_REG_END 0x2 /* end marker */
84#define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
85#define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */
86#define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */
87#define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */
88#define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */
89
90#define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
91#define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
92
93#define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
94
95/* i assumed unsigned */
96#define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
97#define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
98
99/* XXX: these assume that register i is implemented */
100#define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
101#define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
102#define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
103#define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
104
105#define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
106#define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
107#define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
108#define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
109
110#define PFM_NUM_IBRS IA64_NUM_DBG_REGS
111#define PFM_NUM_DBRS IA64_NUM_DBG_REGS
112
113#define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
114#define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
115#define PFM_CTX_TASK(h) (h)->ctx_task
116
117#define PMU_PMC_OI 5 /* position of pmc.oi bit */
118
119/* XXX: does not support more than 64 PMDs */
120#define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
121#define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
122
123#define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
124
125#define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
126#define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
127#define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
128#define PFM_CODE_RR 0 /* requesting code range restriction */
129#define PFM_DATA_RR 1 /* requestion data range restriction */
130
131#define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
132#define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
133#define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
134
135#define RDEP(x) (1UL<<(x))
136
137/*
138 * context protection macros
139 * in SMP:
140 * - we need to protect against CPU concurrency (spin_lock)
141 * - we need to protect against PMU overflow interrupts (local_irq_disable)
142 * in UP:
143 * - we need to protect against PMU overflow interrupts (local_irq_disable)
144 *
145 * spin_lock_irqsave()/spin_lock_irqrestore():
146 * in SMP: local_irq_disable + spin_lock
147 * in UP : local_irq_disable
148 *
149 * spin_lock()/spin_lock():
150 * in UP : removed automatically
151 * in SMP: protect against context accesses from other CPU. interrupts
152 * are not masked. This is useful for the PMU interrupt handler
153 * because we know we will not get PMU concurrency in that code.
154 */
155#define PROTECT_CTX(c, f) \
156 do { \
157 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, current->pid)); \
158 spin_lock_irqsave(&(c)->ctx_lock, f); \
159 DPRINT(("spinlocked ctx %p by [%d]\n", c, current->pid)); \
160 } while(0)
161
162#define UNPROTECT_CTX(c, f) \
163 do { \
164 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, current->pid)); \
165 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
166 } while(0)
167
168#define PROTECT_CTX_NOPRINT(c, f) \
169 do { \
170 spin_lock_irqsave(&(c)->ctx_lock, f); \
171 } while(0)
172
173
174#define UNPROTECT_CTX_NOPRINT(c, f) \
175 do { \
176 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
177 } while(0)
178
179
180#define PROTECT_CTX_NOIRQ(c) \
181 do { \
182 spin_lock(&(c)->ctx_lock); \
183 } while(0)
184
185#define UNPROTECT_CTX_NOIRQ(c) \
186 do { \
187 spin_unlock(&(c)->ctx_lock); \
188 } while(0)
189
190
191#ifdef CONFIG_SMP
192
193#define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
194#define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
195#define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
196
197#else /* !CONFIG_SMP */
198#define SET_ACTIVATION(t) do {} while(0)
199#define GET_ACTIVATION(t) do {} while(0)
200#define INC_ACTIVATION(t) do {} while(0)
201#endif /* CONFIG_SMP */
202
203#define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
204#define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
205#define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
206
207#define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
208#define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
209
210#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
211
212/*
213 * cmp0 must be the value of pmc0
214 */
215#define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
216
217#define PFMFS_MAGIC 0xa0b4d889
218
219/*
220 * debugging
221 */
222#define PFM_DEBUGGING 1
223#ifdef PFM_DEBUGGING
224#define DPRINT(a) \
225 do { \
226 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \
227 } while (0)
228
229#define DPRINT_ovfl(a) \
230 do { \
231 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \
232 } while (0)
233#endif
234
235/*
236 * 64-bit software counter structure
237 *
238 * the next_reset_type is applied to the next call to pfm_reset_regs()
239 */
240typedef struct {
241 unsigned long val; /* virtual 64bit counter value */
242 unsigned long lval; /* last reset value */
243 unsigned long long_reset; /* reset value on sampling overflow */
244 unsigned long short_reset; /* reset value on overflow */
245 unsigned long reset_pmds[4]; /* which other pmds to reset when this counter overflows */
246 unsigned long smpl_pmds[4]; /* which pmds are accessed when counter overflow */
247 unsigned long seed; /* seed for random-number generator */
248 unsigned long mask; /* mask for random-number generator */
249 unsigned int flags; /* notify/do not notify */
250 unsigned long eventid; /* overflow event identifier */
251} pfm_counter_t;
252
253/*
254 * context flags
255 */
256typedef struct {
257 unsigned int block:1; /* when 1, task will blocked on user notifications */
258 unsigned int system:1; /* do system wide monitoring */
259 unsigned int using_dbreg:1; /* using range restrictions (debug registers) */
260 unsigned int is_sampling:1; /* true if using a custom format */
261 unsigned int excl_idle:1; /* exclude idle task in system wide session */
262 unsigned int going_zombie:1; /* context is zombie (MASKED+blocking) */
263 unsigned int trap_reason:2; /* reason for going into pfm_handle_work() */
264 unsigned int no_msg:1; /* no message sent on overflow */
265 unsigned int can_restart:1; /* allowed to issue a PFM_RESTART */
266 unsigned int reserved:22;
267} pfm_context_flags_t;
268
269#define PFM_TRAP_REASON_NONE 0x0 /* default value */
270#define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */
271#define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */
272
273
274/*
275 * perfmon context: encapsulates all the state of a monitoring session
276 */
277
278typedef struct pfm_context {
279 spinlock_t ctx_lock; /* context protection */
280
281 pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */
282 unsigned int ctx_state; /* state: active/inactive (no bitfield) */
283
284 struct task_struct *ctx_task; /* task to which context is attached */
285
286 unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
287
288 struct semaphore ctx_restart_sem; /* use for blocking notification mode */
289
290 unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
291 unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
292 unsigned long ctx_reload_pmds[4]; /* bitmask of force reload PMD on ctxsw in */
293
294 unsigned long ctx_all_pmcs[4]; /* bitmask of all accessible PMCs */
295 unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */
296 unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */
297
298 unsigned long ctx_pmcs[IA64_NUM_PMC_REGS]; /* saved copies of PMC values */
299
300 unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */
301 unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */
302 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */
303 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */
304
305 pfm_counter_t ctx_pmds[IA64_NUM_PMD_REGS]; /* software state for PMDS */
306
307 u64 ctx_saved_psr_up; /* only contains psr.up value */
308
309 unsigned long ctx_last_activation; /* context last activation number for last_cpu */
310 unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */
311 unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */
312
313 int ctx_fd; /* file descriptor used my this context */
314 pfm_ovfl_arg_t ctx_ovfl_arg; /* argument to custom buffer format handler */
315
316 pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */
317 void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */
318 unsigned long ctx_smpl_size; /* size of sampling buffer */
319 void *ctx_smpl_vaddr; /* user level virtual address of smpl buffer */
320
321 wait_queue_head_t ctx_msgq_wait;
322 pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
323 int ctx_msgq_head;
324 int ctx_msgq_tail;
325 struct fasync_struct *ctx_async_queue;
326
327 wait_queue_head_t ctx_zombieq; /* termination cleanup wait queue */
328} pfm_context_t;
329
330/*
331 * magic number used to verify that structure is really
332 * a perfmon context
333 */
334#define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
335
336#define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
337
338#ifdef CONFIG_SMP
339#define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
340#define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
341#else
342#define SET_LAST_CPU(ctx, v) do {} while(0)
343#define GET_LAST_CPU(ctx) do {} while(0)
344#endif
345
346
347#define ctx_fl_block ctx_flags.block
348#define ctx_fl_system ctx_flags.system
349#define ctx_fl_using_dbreg ctx_flags.using_dbreg
350#define ctx_fl_is_sampling ctx_flags.is_sampling
351#define ctx_fl_excl_idle ctx_flags.excl_idle
352#define ctx_fl_going_zombie ctx_flags.going_zombie
353#define ctx_fl_trap_reason ctx_flags.trap_reason
354#define ctx_fl_no_msg ctx_flags.no_msg
355#define ctx_fl_can_restart ctx_flags.can_restart
356
357#define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
358#define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
359
360/*
361 * global information about all sessions
362 * mostly used to synchronize between system wide and per-process
363 */
364typedef struct {
365 spinlock_t pfs_lock; /* lock the structure */
366
367 unsigned int pfs_task_sessions; /* number of per task sessions */
368 unsigned int pfs_sys_sessions; /* number of per system wide sessions */
369 unsigned int pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */
370 unsigned int pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */
371 struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */
372} pfm_session_t;
373
374/*
375 * information about a PMC or PMD.
376 * dep_pmd[]: a bitmask of dependent PMD registers
377 * dep_pmc[]: a bitmask of dependent PMC registers
378 */
379typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
380typedef struct {
381 unsigned int type;
382 int pm_pos;
383 unsigned long default_value; /* power-on default value */
384 unsigned long reserved_mask; /* bitmask of reserved bits */
385 pfm_reg_check_t read_check;
386 pfm_reg_check_t write_check;
387 unsigned long dep_pmd[4];
388 unsigned long dep_pmc[4];
389} pfm_reg_desc_t;
390
391/* assume cnum is a valid monitor */
392#define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
393
394/*
395 * This structure is initialized at boot time and contains
396 * a description of the PMU main characteristics.
397 *
398 * If the probe function is defined, detection is based
399 * on its return value:
400 * - 0 means recognized PMU
401 * - anything else means not supported
402 * When the probe function is not defined, then the pmu_family field
403 * is used and it must match the host CPU family such that:
404 * - cpu->family & config->pmu_family != 0
405 */
406typedef struct {
407 unsigned long ovfl_val; /* overflow value for counters */
408
409 pfm_reg_desc_t *pmc_desc; /* detailed PMC register dependencies descriptions */
410 pfm_reg_desc_t *pmd_desc; /* detailed PMD register dependencies descriptions */
411
412 unsigned int num_pmcs; /* number of PMCS: computed at init time */
413 unsigned int num_pmds; /* number of PMDS: computed at init time */
414 unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */
415 unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */
416
417 char *pmu_name; /* PMU family name */
418 unsigned int pmu_family; /* cpuid family pattern used to identify pmu */
419 unsigned int flags; /* pmu specific flags */
420 unsigned int num_ibrs; /* number of IBRS: computed at init time */
421 unsigned int num_dbrs; /* number of DBRS: computed at init time */
422 unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */
423 int (*probe)(void); /* customized probe routine */
424 unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */
425} pmu_config_t;
426/*
427 * PMU specific flags
428 */
429#define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */
430
431/*
432 * debug register related type definitions
433 */
434typedef struct {
435 unsigned long ibr_mask:56;
436 unsigned long ibr_plm:4;
437 unsigned long ibr_ig:3;
438 unsigned long ibr_x:1;
439} ibr_mask_reg_t;
440
441typedef struct {
442 unsigned long dbr_mask:56;
443 unsigned long dbr_plm:4;
444 unsigned long dbr_ig:2;
445 unsigned long dbr_w:1;
446 unsigned long dbr_r:1;
447} dbr_mask_reg_t;
448
449typedef union {
450 unsigned long val;
451 ibr_mask_reg_t ibr;
452 dbr_mask_reg_t dbr;
453} dbreg_t;
454
455
456/*
457 * perfmon command descriptions
458 */
459typedef struct {
460 int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
461 char *cmd_name;
462 int cmd_flags;
463 unsigned int cmd_narg;
464 size_t cmd_argsize;
465 int (*cmd_getsize)(void *arg, size_t *sz);
466} pfm_cmd_desc_t;
467
468#define PFM_CMD_FD 0x01 /* command requires a file descriptor */
469#define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */
470#define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */
471#define PFM_CMD_STOP 0x08 /* command does not work on zombie context */
472
473
474#define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
475#define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
476#define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
477#define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
478#define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
479
480#define PFM_CMD_ARG_MANY -1 /* cannot be zero */
481
1da177e4
LT
482typedef struct {
483 unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
484 unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
485 unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */
486 unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */
487 unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */
488 unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */
489 unsigned long pfm_smpl_handler_calls;
490 unsigned long pfm_smpl_handler_cycles;
491 char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
492} pfm_stats_t;
493
494/*
495 * perfmon internal variables
496 */
497static pfm_stats_t pfm_stats[NR_CPUS];
498static pfm_session_t pfm_sessions; /* global sessions information */
499
500static struct proc_dir_entry *perfmon_dir;
501static pfm_uuid_t pfm_null_uuid = {0,};
502
503static spinlock_t pfm_buffer_fmt_lock;
504static LIST_HEAD(pfm_buffer_fmt_list);
505
506static pmu_config_t *pmu_conf;
507
508/* sysctl() controls */
4944930a
SE
509pfm_sysctl_t pfm_sysctl;
510EXPORT_SYMBOL(pfm_sysctl);
1da177e4
LT
511
512static ctl_table pfm_ctl_table[]={
513 {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
514 {2, "debug_ovfl", &pfm_sysctl.debug_ovfl, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
515 {3, "fastctxsw", &pfm_sysctl.fastctxsw, sizeof(int), 0600, NULL, &proc_dointvec, NULL,},
516 {4, "expert_mode", &pfm_sysctl.expert_mode, sizeof(int), 0600, NULL, &proc_dointvec, NULL,},
517 { 0, },
518};
519static ctl_table pfm_sysctl_dir[] = {
520 {1, "perfmon", NULL, 0, 0755, pfm_ctl_table, },
521 {0,},
522};
523static ctl_table pfm_sysctl_root[] = {
524 {1, "kernel", NULL, 0, 0755, pfm_sysctl_dir, },
525 {0,},
526};
527static struct ctl_table_header *pfm_sysctl_header;
528
529static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
530static int pfm_flush(struct file *filp);
531
532#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
533#define pfm_get_cpu_data(a,b) per_cpu(a, b)
534
535static inline void
536pfm_put_task(struct task_struct *task)
537{
538 if (task != current) put_task_struct(task);
539}
540
541static inline void
542pfm_set_task_notify(struct task_struct *task)
543{
544 struct thread_info *info;
545
546 info = (struct thread_info *) ((char *) task + IA64_TASK_SIZE);
547 set_bit(TIF_NOTIFY_RESUME, &info->flags);
548}
549
550static inline void
551pfm_clear_task_notify(void)
552{
553 clear_thread_flag(TIF_NOTIFY_RESUME);
554}
555
556static inline void
557pfm_reserve_page(unsigned long a)
558{
559 SetPageReserved(vmalloc_to_page((void *)a));
560}
561static inline void
562pfm_unreserve_page(unsigned long a)
563{
564 ClearPageReserved(vmalloc_to_page((void*)a));
565}
566
567static inline unsigned long
568pfm_protect_ctx_ctxsw(pfm_context_t *x)
569{
570 spin_lock(&(x)->ctx_lock);
571 return 0UL;
572}
573
574static inline unsigned long
575pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
576{
577 spin_unlock(&(x)->ctx_lock);
578}
579
580static inline unsigned int
581pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct)
582{
583 return do_munmap(mm, addr, len);
584}
585
586static inline unsigned long
587pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
588{
589 return get_unmapped_area(file, addr, len, pgoff, flags);
590}
591
592
593static struct super_block *
594pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
595{
596 return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC);
597}
598
599static struct file_system_type pfm_fs_type = {
600 .name = "pfmfs",
601 .get_sb = pfmfs_get_sb,
602 .kill_sb = kill_anon_super,
603};
604
605DEFINE_PER_CPU(unsigned long, pfm_syst_info);
606DEFINE_PER_CPU(struct task_struct *, pmu_owner);
607DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
608DEFINE_PER_CPU(unsigned long, pmu_activation_number);
609
610
611/* forward declaration */
612static struct file_operations pfm_file_ops;
613
614/*
615 * forward declarations
616 */
617#ifndef CONFIG_SMP
618static void pfm_lazy_save_regs (struct task_struct *ta);
619#endif
620
621void dump_pmu_state(const char *);
622static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
623
624#include "perfmon_itanium.h"
625#include "perfmon_mckinley.h"
626#include "perfmon_generic.h"
627
628static pmu_config_t *pmu_confs[]={
629 &pmu_conf_mck,
630 &pmu_conf_ita,
631 &pmu_conf_gen, /* must be last */
632 NULL
633};
634
635
636static int pfm_end_notify_user(pfm_context_t *ctx);
637
638static inline void
639pfm_clear_psr_pp(void)
640{
641 ia64_rsm(IA64_PSR_PP);
642 ia64_srlz_i();
643}
644
645static inline void
646pfm_set_psr_pp(void)
647{
648 ia64_ssm(IA64_PSR_PP);
649 ia64_srlz_i();
650}
651
652static inline void
653pfm_clear_psr_up(void)
654{
655 ia64_rsm(IA64_PSR_UP);
656 ia64_srlz_i();
657}
658
659static inline void
660pfm_set_psr_up(void)
661{
662 ia64_ssm(IA64_PSR_UP);
663 ia64_srlz_i();
664}
665
666static inline unsigned long
667pfm_get_psr(void)
668{
669 unsigned long tmp;
670 tmp = ia64_getreg(_IA64_REG_PSR);
671 ia64_srlz_i();
672 return tmp;
673}
674
675static inline void
676pfm_set_psr_l(unsigned long val)
677{
678 ia64_setreg(_IA64_REG_PSR_L, val);
679 ia64_srlz_i();
680}
681
682static inline void
683pfm_freeze_pmu(void)
684{
685 ia64_set_pmc(0,1UL);
686 ia64_srlz_d();
687}
688
689static inline void
690pfm_unfreeze_pmu(void)
691{
692 ia64_set_pmc(0,0UL);
693 ia64_srlz_d();
694}
695
696static inline void
697pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
698{
699 int i;
700
701 for (i=0; i < nibrs; i++) {
702 ia64_set_ibr(i, ibrs[i]);
703 ia64_dv_serialize_instruction();
704 }
705 ia64_srlz_i();
706}
707
708static inline void
709pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
710{
711 int i;
712
713 for (i=0; i < ndbrs; i++) {
714 ia64_set_dbr(i, dbrs[i]);
715 ia64_dv_serialize_data();
716 }
717 ia64_srlz_d();
718}
719
720/*
721 * PMD[i] must be a counter. no check is made
722 */
723static inline unsigned long
724pfm_read_soft_counter(pfm_context_t *ctx, int i)
725{
726 return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
727}
728
729/*
730 * PMD[i] must be a counter. no check is made
731 */
732static inline void
733pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
734{
735 unsigned long ovfl_val = pmu_conf->ovfl_val;
736
737 ctx->ctx_pmds[i].val = val & ~ovfl_val;
738 /*
739 * writing to unimplemented part is ignore, so we do not need to
740 * mask off top part
741 */
742 ia64_set_pmd(i, val & ovfl_val);
743}
744
745static pfm_msg_t *
746pfm_get_new_msg(pfm_context_t *ctx)
747{
748 int idx, next;
749
750 next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
751
752 DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
753 if (next == ctx->ctx_msgq_head) return NULL;
754
755 idx = ctx->ctx_msgq_tail;
756 ctx->ctx_msgq_tail = next;
757
758 DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
759
760 return ctx->ctx_msgq+idx;
761}
762
763static pfm_msg_t *
764pfm_get_next_msg(pfm_context_t *ctx)
765{
766 pfm_msg_t *msg;
767
768 DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
769
770 if (PFM_CTXQ_EMPTY(ctx)) return NULL;
771
772 /*
773 * get oldest message
774 */
775 msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
776
777 /*
778 * and move forward
779 */
780 ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
781
782 DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
783
784 return msg;
785}
786
787static void
788pfm_reset_msgq(pfm_context_t *ctx)
789{
790 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
791 DPRINT(("ctx=%p msgq reset\n", ctx));
792}
793
794static void *
795pfm_rvmalloc(unsigned long size)
796{
797 void *mem;
798 unsigned long addr;
799
800 size = PAGE_ALIGN(size);
801 mem = vmalloc(size);
802 if (mem) {
803 //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
804 memset(mem, 0, size);
805 addr = (unsigned long)mem;
806 while (size > 0) {
807 pfm_reserve_page(addr);
808 addr+=PAGE_SIZE;
809 size-=PAGE_SIZE;
810 }
811 }
812 return mem;
813}
814
815static void
816pfm_rvfree(void *mem, unsigned long size)
817{
818 unsigned long addr;
819
820 if (mem) {
821 DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
822 addr = (unsigned long) mem;
823 while ((long) size > 0) {
824 pfm_unreserve_page(addr);
825 addr+=PAGE_SIZE;
826 size-=PAGE_SIZE;
827 }
828 vfree(mem);
829 }
830 return;
831}
832
833static pfm_context_t *
834pfm_context_alloc(void)
835{
836 pfm_context_t *ctx;
837
838 /*
839 * allocate context descriptor
840 * must be able to free with interrupts disabled
841 */
842 ctx = kmalloc(sizeof(pfm_context_t), GFP_KERNEL);
843 if (ctx) {
844 memset(ctx, 0, sizeof(pfm_context_t));
845 DPRINT(("alloc ctx @%p\n", ctx));
846 }
847 return ctx;
848}
849
850static void
851pfm_context_free(pfm_context_t *ctx)
852{
853 if (ctx) {
854 DPRINT(("free ctx @%p\n", ctx));
855 kfree(ctx);
856 }
857}
858
859static void
860pfm_mask_monitoring(struct task_struct *task)
861{
862 pfm_context_t *ctx = PFM_GET_CTX(task);
863 struct thread_struct *th = &task->thread;
864 unsigned long mask, val, ovfl_mask;
865 int i;
866
867 DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid));
868
869 ovfl_mask = pmu_conf->ovfl_val;
870 /*
871 * monitoring can only be masked as a result of a valid
872 * counter overflow. In UP, it means that the PMU still
873 * has an owner. Note that the owner can be different
874 * from the current task. However the PMU state belongs
875 * to the owner.
876 * In SMP, a valid overflow only happens when task is
877 * current. Therefore if we come here, we know that
878 * the PMU state belongs to the current task, therefore
879 * we can access the live registers.
880 *
881 * So in both cases, the live register contains the owner's
882 * state. We can ONLY touch the PMU registers and NOT the PSR.
883 *
884 * As a consequence to this call, the thread->pmds[] array
885 * contains stale information which must be ignored
886 * when context is reloaded AND monitoring is active (see
887 * pfm_restart).
888 */
889 mask = ctx->ctx_used_pmds[0];
890 for (i = 0; mask; i++, mask>>=1) {
891 /* skip non used pmds */
892 if ((mask & 0x1) == 0) continue;
893 val = ia64_get_pmd(i);
894
895 if (PMD_IS_COUNTING(i)) {
896 /*
897 * we rebuild the full 64 bit value of the counter
898 */
899 ctx->ctx_pmds[i].val += (val & ovfl_mask);
900 } else {
901 ctx->ctx_pmds[i].val = val;
902 }
903 DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
904 i,
905 ctx->ctx_pmds[i].val,
906 val & ovfl_mask));
907 }
908 /*
909 * mask monitoring by setting the privilege level to 0
910 * we cannot use psr.pp/psr.up for this, it is controlled by
911 * the user
912 *
913 * if task is current, modify actual registers, otherwise modify
914 * thread save state, i.e., what will be restored in pfm_load_regs()
915 */
916 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
917 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
918 if ((mask & 0x1) == 0UL) continue;
919 ia64_set_pmc(i, th->pmcs[i] & ~0xfUL);
920 th->pmcs[i] &= ~0xfUL;
921 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, th->pmcs[i]));
922 }
923 /*
924 * make all of this visible
925 */
926 ia64_srlz_d();
927}
928
929/*
930 * must always be done with task == current
931 *
932 * context must be in MASKED state when calling
933 */
934static void
935pfm_restore_monitoring(struct task_struct *task)
936{
937 pfm_context_t *ctx = PFM_GET_CTX(task);
938 struct thread_struct *th = &task->thread;
939 unsigned long mask, ovfl_mask;
940 unsigned long psr, val;
941 int i, is_system;
942
943 is_system = ctx->ctx_fl_system;
944 ovfl_mask = pmu_conf->ovfl_val;
945
946 if (task != current) {
947 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid);
948 return;
949 }
950 if (ctx->ctx_state != PFM_CTX_MASKED) {
951 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
952 task->pid, current->pid, ctx->ctx_state);
953 return;
954 }
955 psr = pfm_get_psr();
956 /*
957 * monitoring is masked via the PMC.
958 * As we restore their value, we do not want each counter to
959 * restart right away. We stop monitoring using the PSR,
960 * restore the PMC (and PMD) and then re-establish the psr
961 * as it was. Note that there can be no pending overflow at
962 * this point, because monitoring was MASKED.
963 *
964 * system-wide session are pinned and self-monitoring
965 */
966 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
967 /* disable dcr pp */
968 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
969 pfm_clear_psr_pp();
970 } else {
971 pfm_clear_psr_up();
972 }
973 /*
974 * first, we restore the PMD
975 */
976 mask = ctx->ctx_used_pmds[0];
977 for (i = 0; mask; i++, mask>>=1) {
978 /* skip non used pmds */
979 if ((mask & 0x1) == 0) continue;
980
981 if (PMD_IS_COUNTING(i)) {
982 /*
983 * we split the 64bit value according to
984 * counter width
985 */
986 val = ctx->ctx_pmds[i].val & ovfl_mask;
987 ctx->ctx_pmds[i].val &= ~ovfl_mask;
988 } else {
989 val = ctx->ctx_pmds[i].val;
990 }
991 ia64_set_pmd(i, val);
992
993 DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
994 i,
995 ctx->ctx_pmds[i].val,
996 val));
997 }
998 /*
999 * restore the PMCs
1000 */
1001 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
1002 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
1003 if ((mask & 0x1) == 0UL) continue;
1004 th->pmcs[i] = ctx->ctx_pmcs[i];
1005 ia64_set_pmc(i, th->pmcs[i]);
1006 DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, th->pmcs[i]));
1007 }
1008 ia64_srlz_d();
1009
1010 /*
1011 * must restore DBR/IBR because could be modified while masked
1012 * XXX: need to optimize
1013 */
1014 if (ctx->ctx_fl_using_dbreg) {
1015 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
1016 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
1017 }
1018
1019 /*
1020 * now restore PSR
1021 */
1022 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1023 /* enable dcr pp */
1024 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
1025 ia64_srlz_i();
1026 }
1027 pfm_set_psr_l(psr);
1028}
1029
1030static inline void
1031pfm_save_pmds(unsigned long *pmds, unsigned long mask)
1032{
1033 int i;
1034
1035 ia64_srlz_d();
1036
1037 for (i=0; mask; i++, mask>>=1) {
1038 if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
1039 }
1040}
1041
1042/*
1043 * reload from thread state (used for ctxw only)
1044 */
1045static inline void
1046pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
1047{
1048 int i;
1049 unsigned long val, ovfl_val = pmu_conf->ovfl_val;
1050
1051 for (i=0; mask; i++, mask>>=1) {
1052 if ((mask & 0x1) == 0) continue;
1053 val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
1054 ia64_set_pmd(i, val);
1055 }
1056 ia64_srlz_d();
1057}
1058
1059/*
1060 * propagate PMD from context to thread-state
1061 */
1062static inline void
1063pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1064{
1065 struct thread_struct *thread = &task->thread;
1066 unsigned long ovfl_val = pmu_conf->ovfl_val;
1067 unsigned long mask = ctx->ctx_all_pmds[0];
1068 unsigned long val;
1069 int i;
1070
1071 DPRINT(("mask=0x%lx\n", mask));
1072
1073 for (i=0; mask; i++, mask>>=1) {
1074
1075 val = ctx->ctx_pmds[i].val;
1076
1077 /*
1078 * We break up the 64 bit value into 2 pieces
1079 * the lower bits go to the machine state in the
1080 * thread (will be reloaded on ctxsw in).
1081 * The upper part stays in the soft-counter.
1082 */
1083 if (PMD_IS_COUNTING(i)) {
1084 ctx->ctx_pmds[i].val = val & ~ovfl_val;
1085 val &= ovfl_val;
1086 }
1087 thread->pmds[i] = val;
1088
1089 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1090 i,
1091 thread->pmds[i],
1092 ctx->ctx_pmds[i].val));
1093 }
1094}
1095
1096/*
1097 * propagate PMC from context to thread-state
1098 */
1099static inline void
1100pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1101{
1102 struct thread_struct *thread = &task->thread;
1103 unsigned long mask = ctx->ctx_all_pmcs[0];
1104 int i;
1105
1106 DPRINT(("mask=0x%lx\n", mask));
1107
1108 for (i=0; mask; i++, mask>>=1) {
1109 /* masking 0 with ovfl_val yields 0 */
1110 thread->pmcs[i] = ctx->ctx_pmcs[i];
1111 DPRINT(("pmc[%d]=0x%lx\n", i, thread->pmcs[i]));
1112 }
1113}
1114
1115
1116
1117static inline void
1118pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
1119{
1120 int i;
1121
1122 for (i=0; mask; i++, mask>>=1) {
1123 if ((mask & 0x1) == 0) continue;
1124 ia64_set_pmc(i, pmcs[i]);
1125 }
1126 ia64_srlz_d();
1127}
1128
1129static inline int
1130pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
1131{
1132 return memcmp(a, b, sizeof(pfm_uuid_t));
1133}
1134
1135static inline int
1136pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
1137{
1138 int ret = 0;
1139 if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
1140 return ret;
1141}
1142
1143static inline int
1144pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
1145{
1146 int ret = 0;
1147 if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
1148 return ret;
1149}
1150
1151
1152static inline int
1153pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
1154 int cpu, void *arg)
1155{
1156 int ret = 0;
1157 if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
1158 return ret;
1159}
1160
1161static inline int
1162pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
1163 int cpu, void *arg)
1164{
1165 int ret = 0;
1166 if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
1167 return ret;
1168}
1169
1170static inline int
1171pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1172{
1173 int ret = 0;
1174 if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
1175 return ret;
1176}
1177
1178static inline int
1179pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1180{
1181 int ret = 0;
1182 if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
1183 return ret;
1184}
1185
1186static pfm_buffer_fmt_t *
1187__pfm_find_buffer_fmt(pfm_uuid_t uuid)
1188{
1189 struct list_head * pos;
1190 pfm_buffer_fmt_t * entry;
1191
1192 list_for_each(pos, &pfm_buffer_fmt_list) {
1193 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
1194 if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
1195 return entry;
1196 }
1197 return NULL;
1198}
1199
1200/*
1201 * find a buffer format based on its uuid
1202 */
1203static pfm_buffer_fmt_t *
1204pfm_find_buffer_fmt(pfm_uuid_t uuid)
1205{
1206 pfm_buffer_fmt_t * fmt;
1207 spin_lock(&pfm_buffer_fmt_lock);
1208 fmt = __pfm_find_buffer_fmt(uuid);
1209 spin_unlock(&pfm_buffer_fmt_lock);
1210 return fmt;
1211}
1212
1213int
1214pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
1215{
1216 int ret = 0;
1217
1218 /* some sanity checks */
1219 if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
1220
1221 /* we need at least a handler */
1222 if (fmt->fmt_handler == NULL) return -EINVAL;
1223
1224 /*
1225 * XXX: need check validity of fmt_arg_size
1226 */
1227
1228 spin_lock(&pfm_buffer_fmt_lock);
1229
1230 if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
1231 printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
1232 ret = -EBUSY;
1233 goto out;
1234 }
1235 list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
1236 printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
1237
1238out:
1239 spin_unlock(&pfm_buffer_fmt_lock);
1240 return ret;
1241}
1242EXPORT_SYMBOL(pfm_register_buffer_fmt);
1243
1244int
1245pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
1246{
1247 pfm_buffer_fmt_t *fmt;
1248 int ret = 0;
1249
1250 spin_lock(&pfm_buffer_fmt_lock);
1251
1252 fmt = __pfm_find_buffer_fmt(uuid);
1253 if (!fmt) {
1254 printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
1255 ret = -EINVAL;
1256 goto out;
1257 }
1258 list_del_init(&fmt->fmt_list);
1259 printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
1260
1261out:
1262 spin_unlock(&pfm_buffer_fmt_lock);
1263 return ret;
1264
1265}
1266EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1267
8df5a500
SE
1268extern void update_pal_halt_status(int);
1269
1da177e4
LT
1270static int
1271pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1272{
1273 unsigned long flags;
1274 /*
1275 * validy checks on cpu_mask have been done upstream
1276 */
1277 LOCK_PFS(flags);
1278
1279 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1280 pfm_sessions.pfs_sys_sessions,
1281 pfm_sessions.pfs_task_sessions,
1282 pfm_sessions.pfs_sys_use_dbregs,
1283 is_syswide,
1284 cpu));
1285
1286 if (is_syswide) {
1287 /*
1288 * cannot mix system wide and per-task sessions
1289 */
1290 if (pfm_sessions.pfs_task_sessions > 0UL) {
1291 DPRINT(("system wide not possible, %u conflicting task_sessions\n",
1292 pfm_sessions.pfs_task_sessions));
1293 goto abort;
1294 }
1295
1296 if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
1297
1298 DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
1299
1300 pfm_sessions.pfs_sys_session[cpu] = task;
1301
1302 pfm_sessions.pfs_sys_sessions++ ;
1303
1304 } else {
1305 if (pfm_sessions.pfs_sys_sessions) goto abort;
1306 pfm_sessions.pfs_task_sessions++;
1307 }
1308
1309 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1310 pfm_sessions.pfs_sys_sessions,
1311 pfm_sessions.pfs_task_sessions,
1312 pfm_sessions.pfs_sys_use_dbregs,
1313 is_syswide,
1314 cpu));
1315
8df5a500
SE
1316 /*
1317 * disable default_idle() to go to PAL_HALT
1318 */
1319 update_pal_halt_status(0);
1320
1da177e4
LT
1321 UNLOCK_PFS(flags);
1322
1323 return 0;
1324
1325error_conflict:
1326 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
1327 pfm_sessions.pfs_sys_session[cpu]->pid,
1328 smp_processor_id()));
1329abort:
1330 UNLOCK_PFS(flags);
1331
1332 return -EBUSY;
1333
1334}
1335
1336static int
1337pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1338{
1339 unsigned long flags;
1340 /*
1341 * validy checks on cpu_mask have been done upstream
1342 */
1343 LOCK_PFS(flags);
1344
1345 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1346 pfm_sessions.pfs_sys_sessions,
1347 pfm_sessions.pfs_task_sessions,
1348 pfm_sessions.pfs_sys_use_dbregs,
1349 is_syswide,
1350 cpu));
1351
1352
1353 if (is_syswide) {
1354 pfm_sessions.pfs_sys_session[cpu] = NULL;
1355 /*
1356 * would not work with perfmon+more than one bit in cpu_mask
1357 */
1358 if (ctx && ctx->ctx_fl_using_dbreg) {
1359 if (pfm_sessions.pfs_sys_use_dbregs == 0) {
1360 printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
1361 } else {
1362 pfm_sessions.pfs_sys_use_dbregs--;
1363 }
1364 }
1365 pfm_sessions.pfs_sys_sessions--;
1366 } else {
1367 pfm_sessions.pfs_task_sessions--;
1368 }
1369 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1370 pfm_sessions.pfs_sys_sessions,
1371 pfm_sessions.pfs_task_sessions,
1372 pfm_sessions.pfs_sys_use_dbregs,
1373 is_syswide,
1374 cpu));
1375
8df5a500
SE
1376 /*
1377 * if possible, enable default_idle() to go into PAL_HALT
1378 */
1379 if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
1380 update_pal_halt_status(1);
1381
1da177e4
LT
1382 UNLOCK_PFS(flags);
1383
1384 return 0;
1385}
1386
1387/*
1388 * removes virtual mapping of the sampling buffer.
1389 * IMPORTANT: cannot be called with interrupts disable, e.g. inside
1390 * a PROTECT_CTX() section.
1391 */
1392static int
1393pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size)
1394{
1395 int r;
1396
1397 /* sanity checks */
1398 if (task->mm == NULL || size == 0UL || vaddr == NULL) {
1399 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task->pid, task->mm);
1400 return -EINVAL;
1401 }
1402
1403 DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
1404
1405 /*
1406 * does the actual unmapping
1407 */
1408 down_write(&task->mm->mmap_sem);
1409
1410 DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size));
1411
1412 r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0);
1413
1414 up_write(&task->mm->mmap_sem);
1415 if (r !=0) {
1416 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task->pid, vaddr, size);
1417 }
1418
1419 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
1420
1421 return 0;
1422}
1423
1424/*
1425 * free actual physical storage used by sampling buffer
1426 */
1427#if 0
1428static int
1429pfm_free_smpl_buffer(pfm_context_t *ctx)
1430{
1431 pfm_buffer_fmt_t *fmt;
1432
1433 if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
1434
1435 /*
1436 * we won't use the buffer format anymore
1437 */
1438 fmt = ctx->ctx_buf_fmt;
1439
1440 DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
1441 ctx->ctx_smpl_hdr,
1442 ctx->ctx_smpl_size,
1443 ctx->ctx_smpl_vaddr));
1444
1445 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1446
1447 /*
1448 * free the buffer
1449 */
1450 pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
1451
1452 ctx->ctx_smpl_hdr = NULL;
1453 ctx->ctx_smpl_size = 0UL;
1454
1455 return 0;
1456
1457invalid_free:
1458 printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", current->pid);
1459 return -EINVAL;
1460}
1461#endif
1462
1463static inline void
1464pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
1465{
1466 if (fmt == NULL) return;
1467
1468 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1469
1470}
1471
1472/*
1473 * pfmfs should _never_ be mounted by userland - too much of security hassle,
1474 * no real gain from having the whole whorehouse mounted. So we don't need
1475 * any operations on the root directory. However, we need a non-trivial
1476 * d_name - pfm: will go nicely and kill the special-casing in procfs.
1477 */
1478static struct vfsmount *pfmfs_mnt;
1479
1480static int __init
1481init_pfm_fs(void)
1482{
1483 int err = register_filesystem(&pfm_fs_type);
1484 if (!err) {
1485 pfmfs_mnt = kern_mount(&pfm_fs_type);
1486 err = PTR_ERR(pfmfs_mnt);
1487 if (IS_ERR(pfmfs_mnt))
1488 unregister_filesystem(&pfm_fs_type);
1489 else
1490 err = 0;
1491 }
1492 return err;
1493}
1494
1495static void __exit
1496exit_pfm_fs(void)
1497{
1498 unregister_filesystem(&pfm_fs_type);
1499 mntput(pfmfs_mnt);
1500}
1501
1502static ssize_t
1503pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1504{
1505 pfm_context_t *ctx;
1506 pfm_msg_t *msg;
1507 ssize_t ret;
1508 unsigned long flags;
1509 DECLARE_WAITQUEUE(wait, current);
1510 if (PFM_IS_FILE(filp) == 0) {
1511 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);
1512 return -EINVAL;
1513 }
1514
1515 ctx = (pfm_context_t *)filp->private_data;
1516 if (ctx == NULL) {
1517 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", current->pid);
1518 return -EINVAL;
1519 }
1520
1521 /*
1522 * check even when there is no message
1523 */
1524 if (size < sizeof(pfm_msg_t)) {
1525 DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
1526 return -EINVAL;
1527 }
1528
1529 PROTECT_CTX(ctx, flags);
1530
1531 /*
1532 * put ourselves on the wait queue
1533 */
1534 add_wait_queue(&ctx->ctx_msgq_wait, &wait);
1535
1536
1537 for(;;) {
1538 /*
1539 * check wait queue
1540 */
1541
1542 set_current_state(TASK_INTERRUPTIBLE);
1543
1544 DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
1545
1546 ret = 0;
1547 if(PFM_CTXQ_EMPTY(ctx) == 0) break;
1548
1549 UNPROTECT_CTX(ctx, flags);
1550
1551 /*
1552 * check non-blocking read
1553 */
1554 ret = -EAGAIN;
1555 if(filp->f_flags & O_NONBLOCK) break;
1556
1557 /*
1558 * check pending signals
1559 */
1560 if(signal_pending(current)) {
1561 ret = -EINTR;
1562 break;
1563 }
1564 /*
1565 * no message, so wait
1566 */
1567 schedule();
1568
1569 PROTECT_CTX(ctx, flags);
1570 }
1571 DPRINT(("[%d] back to running ret=%ld\n", current->pid, ret));
1572 set_current_state(TASK_RUNNING);
1573 remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
1574
1575 if (ret < 0) goto abort;
1576
1577 ret = -EINVAL;
1578 msg = pfm_get_next_msg(ctx);
1579 if (msg == NULL) {
1580 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, current->pid);
1581 goto abort_locked;
1582 }
1583
4944930a 1584 DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
1da177e4
LT
1585
1586 ret = -EFAULT;
1587 if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
1588
1589abort_locked:
1590 UNPROTECT_CTX(ctx, flags);
1591abort:
1592 return ret;
1593}
1594
1595static ssize_t
1596pfm_write(struct file *file, const char __user *ubuf,
1597 size_t size, loff_t *ppos)
1598{
1599 DPRINT(("pfm_write called\n"));
1600 return -EINVAL;
1601}
1602
1603static unsigned int
1604pfm_poll(struct file *filp, poll_table * wait)
1605{
1606 pfm_context_t *ctx;
1607 unsigned long flags;
1608 unsigned int mask = 0;
1609
1610 if (PFM_IS_FILE(filp) == 0) {
1611 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);
1612 return 0;
1613 }
1614
1615 ctx = (pfm_context_t *)filp->private_data;
1616 if (ctx == NULL) {
1617 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", current->pid);
1618 return 0;
1619 }
1620
1621
1622 DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
1623
1624 poll_wait(filp, &ctx->ctx_msgq_wait, wait);
1625
1626 PROTECT_CTX(ctx, flags);
1627
1628 if (PFM_CTXQ_EMPTY(ctx) == 0)
1629 mask = POLLIN | POLLRDNORM;
1630
1631 UNPROTECT_CTX(ctx, flags);
1632
1633 DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
1634
1635 return mask;
1636}
1637
1638static int
1639pfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1640{
1641 DPRINT(("pfm_ioctl called\n"));
1642 return -EINVAL;
1643}
1644
1645/*
1646 * interrupt cannot be masked when coming here
1647 */
1648static inline int
1649pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
1650{
1651 int ret;
1652
1653 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
1654
1655 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1656 current->pid,
1657 fd,
1658 on,
1659 ctx->ctx_async_queue, ret));
1660
1661 return ret;
1662}
1663
1664static int
1665pfm_fasync(int fd, struct file *filp, int on)
1666{
1667 pfm_context_t *ctx;
1668 int ret;
1669
1670 if (PFM_IS_FILE(filp) == 0) {
1671 printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", current->pid);
1672 return -EBADF;
1673 }
1674
1675 ctx = (pfm_context_t *)filp->private_data;
1676 if (ctx == NULL) {
1677 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", current->pid);
1678 return -EBADF;
1679 }
1680 /*
1681 * we cannot mask interrupts during this call because this may
1682 * may go to sleep if memory is not readily avalaible.
1683 *
1684 * We are protected from the conetxt disappearing by the get_fd()/put_fd()
1685 * done in caller. Serialization of this function is ensured by caller.
1686 */
1687 ret = pfm_do_fasync(fd, filp, ctx, on);
1688
1689
1690 DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1691 fd,
1692 on,
1693 ctx->ctx_async_queue, ret));
1694
1695 return ret;
1696}
1697
1698#ifdef CONFIG_SMP
1699/*
1700 * this function is exclusively called from pfm_close().
1701 * The context is not protected at that time, nor are interrupts
1702 * on the remote CPU. That's necessary to avoid deadlocks.
1703 */
1704static void
1705pfm_syswide_force_stop(void *info)
1706{
1707 pfm_context_t *ctx = (pfm_context_t *)info;
1708 struct pt_regs *regs = ia64_task_regs(current);
1709 struct task_struct *owner;
1710 unsigned long flags;
1711 int ret;
1712
1713 if (ctx->ctx_cpu != smp_processor_id()) {
1714 printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
1715 ctx->ctx_cpu,
1716 smp_processor_id());
1717 return;
1718 }
1719 owner = GET_PMU_OWNER();
1720 if (owner != ctx->ctx_task) {
1721 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1722 smp_processor_id(),
1723 owner->pid, ctx->ctx_task->pid);
1724 return;
1725 }
1726 if (GET_PMU_CTX() != ctx) {
1727 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
1728 smp_processor_id(),
1729 GET_PMU_CTX(), ctx);
1730 return;
1731 }
1732
1733 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), ctx->ctx_task->pid));
1734 /*
1735 * the context is already protected in pfm_close(), we simply
1736 * need to mask interrupts to avoid a PMU interrupt race on
1737 * this CPU
1738 */
1739 local_irq_save(flags);
1740
1741 ret = pfm_context_unload(ctx, NULL, 0, regs);
1742 if (ret) {
1743 DPRINT(("context_unload returned %d\n", ret));
1744 }
1745
1746 /*
1747 * unmask interrupts, PMU interrupts are now spurious here
1748 */
1749 local_irq_restore(flags);
1750}
1751
1752static void
1753pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
1754{
1755 int ret;
1756
1757 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
1758 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1);
1759 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
1760}
1761#endif /* CONFIG_SMP */
1762
1763/*
1764 * called for each close(). Partially free resources.
1765 * When caller is self-monitoring, the context is unloaded.
1766 */
1767static int
1768pfm_flush(struct file *filp)
1769{
1770 pfm_context_t *ctx;
1771 struct task_struct *task;
1772 struct pt_regs *regs;
1773 unsigned long flags;
1774 unsigned long smpl_buf_size = 0UL;
1775 void *smpl_buf_vaddr = NULL;
1776 int state, is_system;
1777
1778 if (PFM_IS_FILE(filp) == 0) {
1779 DPRINT(("bad magic for\n"));
1780 return -EBADF;
1781 }
1782
1783 ctx = (pfm_context_t *)filp->private_data;
1784 if (ctx == NULL) {
1785 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", current->pid);
1786 return -EBADF;
1787 }
1788
1789 /*
1790 * remove our file from the async queue, if we use this mode.
1791 * This can be done without the context being protected. We come
1792 * here when the context has become unreacheable by other tasks.
1793 *
1794 * We may still have active monitoring at this point and we may
1795 * end up in pfm_overflow_handler(). However, fasync_helper()
1796 * operates with interrupts disabled and it cleans up the
1797 * queue. If the PMU handler is called prior to entering
1798 * fasync_helper() then it will send a signal. If it is
1799 * invoked after, it will find an empty queue and no
1800 * signal will be sent. In both case, we are safe
1801 */
1802 if (filp->f_flags & FASYNC) {
1803 DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue));
1804 pfm_do_fasync (-1, filp, ctx, 0);
1805 }
1806
1807 PROTECT_CTX(ctx, flags);
1808
1809 state = ctx->ctx_state;
1810 is_system = ctx->ctx_fl_system;
1811
1812 task = PFM_CTX_TASK(ctx);
1813 regs = ia64_task_regs(task);
1814
1815 DPRINT(("ctx_state=%d is_current=%d\n",
1816 state,
1817 task == current ? 1 : 0));
1818
1819 /*
1820 * if state == UNLOADED, then task is NULL
1821 */
1822
1823 /*
1824 * we must stop and unload because we are losing access to the context.
1825 */
1826 if (task == current) {
1827#ifdef CONFIG_SMP
1828 /*
1829 * the task IS the owner but it migrated to another CPU: that's bad
1830 * but we must handle this cleanly. Unfortunately, the kernel does
1831 * not provide a mechanism to block migration (while the context is loaded).
1832 *
1833 * We need to release the resource on the ORIGINAL cpu.
1834 */
1835 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
1836
1837 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
1838 /*
1839 * keep context protected but unmask interrupt for IPI
1840 */
1841 local_irq_restore(flags);
1842
1843 pfm_syswide_cleanup_other_cpu(ctx);
1844
1845 /*
1846 * restore interrupt masking
1847 */
1848 local_irq_save(flags);
1849
1850 /*
1851 * context is unloaded at this point
1852 */
1853 } else
1854#endif /* CONFIG_SMP */
1855 {
1856
1857 DPRINT(("forcing unload\n"));
1858 /*
1859 * stop and unload, returning with state UNLOADED
1860 * and session unreserved.
1861 */
1862 pfm_context_unload(ctx, NULL, 0, regs);
1863
1864 DPRINT(("ctx_state=%d\n", ctx->ctx_state));
1865 }
1866 }
1867
1868 /*
1869 * remove virtual mapping, if any, for the calling task.
1870 * cannot reset ctx field until last user is calling close().
1871 *
1872 * ctx_smpl_vaddr must never be cleared because it is needed
1873 * by every task with access to the context
1874 *
1875 * When called from do_exit(), the mm context is gone already, therefore
1876 * mm is NULL, i.e., the VMA is already gone and we do not have to
1877 * do anything here
1878 */
1879 if (ctx->ctx_smpl_vaddr && current->mm) {
1880 smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
1881 smpl_buf_size = ctx->ctx_smpl_size;
1882 }
1883
1884 UNPROTECT_CTX(ctx, flags);
1885
1886 /*
1887 * if there was a mapping, then we systematically remove it
1888 * at this point. Cannot be done inside critical section
1889 * because some VM function reenables interrupts.
1890 *
1891 */
1892 if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size);
1893
1894 return 0;
1895}
1896/*
1897 * called either on explicit close() or from exit_files().
1898 * Only the LAST user of the file gets to this point, i.e., it is
1899 * called only ONCE.
1900 *
1901 * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
1902 * (fput()),i.e, last task to access the file. Nobody else can access the
1903 * file at this point.
1904 *
1905 * When called from exit_files(), the VMA has been freed because exit_mm()
1906 * is executed before exit_files().
1907 *
1908 * When called from exit_files(), the current task is not yet ZOMBIE but we
1909 * flush the PMU state to the context.
1910 */
1911static int
1912pfm_close(struct inode *inode, struct file *filp)
1913{
1914 pfm_context_t *ctx;
1915 struct task_struct *task;
1916 struct pt_regs *regs;
1917 DECLARE_WAITQUEUE(wait, current);
1918 unsigned long flags;
1919 unsigned long smpl_buf_size = 0UL;
1920 void *smpl_buf_addr = NULL;
1921 int free_possible = 1;
1922 int state, is_system;
1923
1924 DPRINT(("pfm_close called private=%p\n", filp->private_data));
1925
1926 if (PFM_IS_FILE(filp) == 0) {
1927 DPRINT(("bad magic\n"));
1928 return -EBADF;
1929 }
1930
1931 ctx = (pfm_context_t *)filp->private_data;
1932 if (ctx == NULL) {
1933 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid);
1934 return -EBADF;
1935 }
1936
1937 PROTECT_CTX(ctx, flags);
1938
1939 state = ctx->ctx_state;
1940 is_system = ctx->ctx_fl_system;
1941
1942 task = PFM_CTX_TASK(ctx);
1943 regs = ia64_task_regs(task);
1944
1945 DPRINT(("ctx_state=%d is_current=%d\n",
1946 state,
1947 task == current ? 1 : 0));
1948
1949 /*
1950 * if task == current, then pfm_flush() unloaded the context
1951 */
1952 if (state == PFM_CTX_UNLOADED) goto doit;
1953
1954 /*
1955 * context is loaded/masked and task != current, we need to
1956 * either force an unload or go zombie
1957 */
1958
1959 /*
1960 * The task is currently blocked or will block after an overflow.
1961 * we must force it to wakeup to get out of the
1962 * MASKED state and transition to the unloaded state by itself.
1963 *
1964 * This situation is only possible for per-task mode
1965 */
1966 if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
1967
1968 /*
1969 * set a "partial" zombie state to be checked
1970 * upon return from down() in pfm_handle_work().
1971 *
1972 * We cannot use the ZOMBIE state, because it is checked
1973 * by pfm_load_regs() which is called upon wakeup from down().
1974 * In such case, it would free the context and then we would
1975 * return to pfm_handle_work() which would access the
1976 * stale context. Instead, we set a flag invisible to pfm_load_regs()
1977 * but visible to pfm_handle_work().
1978 *
1979 * For some window of time, we have a zombie context with
1980 * ctx_state = MASKED and not ZOMBIE
1981 */
1982 ctx->ctx_fl_going_zombie = 1;
1983
1984 /*
1985 * force task to wake up from MASKED state
1986 */
1987 up(&ctx->ctx_restart_sem);
1988
1989 DPRINT(("waking up ctx_state=%d\n", state));
1990
1991 /*
1992 * put ourself to sleep waiting for the other
1993 * task to report completion
1994 *
1995 * the context is protected by mutex, therefore there
1996 * is no risk of being notified of completion before
1997 * begin actually on the waitq.
1998 */
1999 set_current_state(TASK_INTERRUPTIBLE);
2000 add_wait_queue(&ctx->ctx_zombieq, &wait);
2001
2002 UNPROTECT_CTX(ctx, flags);
2003
2004 /*
2005 * XXX: check for signals :
2006 * - ok for explicit close
2007 * - not ok when coming from exit_files()
2008 */
2009 schedule();
2010
2011
2012 PROTECT_CTX(ctx, flags);
2013
2014
2015 remove_wait_queue(&ctx->ctx_zombieq, &wait);
2016 set_current_state(TASK_RUNNING);
2017
2018 /*
2019 * context is unloaded at this point
2020 */
2021 DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
2022 }
2023 else if (task != current) {
2024#ifdef CONFIG_SMP
2025 /*
2026 * switch context to zombie state
2027 */
2028 ctx->ctx_state = PFM_CTX_ZOMBIE;
2029
2030 DPRINT(("zombie ctx for [%d]\n", task->pid));
2031 /*
2032 * cannot free the context on the spot. deferred until
2033 * the task notices the ZOMBIE state
2034 */
2035 free_possible = 0;
2036#else
2037 pfm_context_unload(ctx, NULL, 0, regs);
2038#endif
2039 }
2040
2041doit:
2042 /* reload state, may have changed during opening of critical section */
2043 state = ctx->ctx_state;
2044
2045 /*
2046 * the context is still attached to a task (possibly current)
2047 * we cannot destroy it right now
2048 */
2049
2050 /*
2051 * we must free the sampling buffer right here because
2052 * we cannot rely on it being cleaned up later by the
2053 * monitored task. It is not possible to free vmalloc'ed
2054 * memory in pfm_load_regs(). Instead, we remove the buffer
2055 * now. should there be subsequent PMU overflow originally
2056 * meant for sampling, the will be converted to spurious
2057 * and that's fine because the monitoring tools is gone anyway.
2058 */
2059 if (ctx->ctx_smpl_hdr) {
2060 smpl_buf_addr = ctx->ctx_smpl_hdr;
2061 smpl_buf_size = ctx->ctx_smpl_size;
2062 /* no more sampling */
2063 ctx->ctx_smpl_hdr = NULL;
2064 ctx->ctx_fl_is_sampling = 0;
2065 }
2066
2067 DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
2068 state,
2069 free_possible,
2070 smpl_buf_addr,
2071 smpl_buf_size));
2072
2073 if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
2074
2075 /*
2076 * UNLOADED that the session has already been unreserved.
2077 */
2078 if (state == PFM_CTX_ZOMBIE) {
2079 pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
2080 }
2081
2082 /*
2083 * disconnect file descriptor from context must be done
2084 * before we unlock.
2085 */
2086 filp->private_data = NULL;
2087
2088 /*
2089 * if we free on the spot, the context is now completely unreacheable
2090 * from the callers side. The monitored task side is also cut, so we
2091 * can freely cut.
2092 *
2093 * If we have a deferred free, only the caller side is disconnected.
2094 */
2095 UNPROTECT_CTX(ctx, flags);
2096
2097 /*
2098 * All memory free operations (especially for vmalloc'ed memory)
2099 * MUST be done with interrupts ENABLED.
2100 */
2101 if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
2102
2103 /*
2104 * return the memory used by the context
2105 */
2106 if (free_possible) pfm_context_free(ctx);
2107
2108 return 0;
2109}
2110
2111static int
2112pfm_no_open(struct inode *irrelevant, struct file *dontcare)
2113{
2114 DPRINT(("pfm_no_open called\n"));
2115 return -ENXIO;
2116}
2117
2118
2119
2120static struct file_operations pfm_file_ops = {
2121 .llseek = no_llseek,
2122 .read = pfm_read,
2123 .write = pfm_write,
2124 .poll = pfm_poll,
2125 .ioctl = pfm_ioctl,
2126 .open = pfm_no_open, /* special open code to disallow open via /proc */
2127 .fasync = pfm_fasync,
2128 .release = pfm_close,
2129 .flush = pfm_flush
2130};
2131
2132static int
2133pfmfs_delete_dentry(struct dentry *dentry)
2134{
2135 return 1;
2136}
2137
2138static struct dentry_operations pfmfs_dentry_operations = {
2139 .d_delete = pfmfs_delete_dentry,
2140};
2141
2142
2143static int
2144pfm_alloc_fd(struct file **cfile)
2145{
2146 int fd, ret = 0;
2147 struct file *file = NULL;
2148 struct inode * inode;
2149 char name[32];
2150 struct qstr this;
2151
2152 fd = get_unused_fd();
2153 if (fd < 0) return -ENFILE;
2154
2155 ret = -ENFILE;
2156
2157 file = get_empty_filp();
2158 if (!file) goto out;
2159
2160 /*
2161 * allocate a new inode
2162 */
2163 inode = new_inode(pfmfs_mnt->mnt_sb);
2164 if (!inode) goto out;
2165
2166 DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
2167
2168 inode->i_mode = S_IFCHR|S_IRUGO;
2169 inode->i_uid = current->fsuid;
2170 inode->i_gid = current->fsgid;
2171
2172 sprintf(name, "[%lu]", inode->i_ino);
2173 this.name = name;
2174 this.len = strlen(name);
2175 this.hash = inode->i_ino;
2176
2177 ret = -ENOMEM;
2178
2179 /*
2180 * allocate a new dcache entry
2181 */
2182 file->f_dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
2183 if (!file->f_dentry) goto out;
2184
2185 file->f_dentry->d_op = &pfmfs_dentry_operations;
2186
2187 d_add(file->f_dentry, inode);
2188 file->f_vfsmnt = mntget(pfmfs_mnt);
2189 file->f_mapping = inode->i_mapping;
2190
2191 file->f_op = &pfm_file_ops;
2192 file->f_mode = FMODE_READ;
2193 file->f_flags = O_RDONLY;
2194 file->f_pos = 0;
2195
2196 /*
2197 * may have to delay until context is attached?
2198 */
2199 fd_install(fd, file);
2200
2201 /*
2202 * the file structure we will use
2203 */
2204 *cfile = file;
2205
2206 return fd;
2207out:
2208 if (file) put_filp(file);
2209 put_unused_fd(fd);
2210 return ret;
2211}
2212
2213static void
2214pfm_free_fd(int fd, struct file *file)
2215{
2216 struct files_struct *files = current->files;
2217
2218 /*
2219 * there ie no fd_uninstall(), so we do it here
2220 */
2221 spin_lock(&files->file_lock);
2222 files->fd[fd] = NULL;
2223 spin_unlock(&files->file_lock);
2224
2225 if (file) put_filp(file);
2226 put_unused_fd(fd);
2227}
2228
2229static int
2230pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
2231{
2232 DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
2233
2234 while (size > 0) {
2235 unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
2236
2237
2238 if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
2239 return -ENOMEM;
2240
2241 addr += PAGE_SIZE;
2242 buf += PAGE_SIZE;
2243 size -= PAGE_SIZE;
2244 }
2245 return 0;
2246}
2247
2248/*
2249 * allocate a sampling buffer and remaps it into the user address space of the task
2250 */
2251static int
2252pfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
2253{
2254 struct mm_struct *mm = task->mm;
2255 struct vm_area_struct *vma = NULL;
2256 unsigned long size;
2257 void *smpl_buf;
2258
2259
2260 /*
2261 * the fixed header + requested size and align to page boundary
2262 */
2263 size = PAGE_ALIGN(rsize);
2264
2265 DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
2266
2267 /*
2268 * check requested size to avoid Denial-of-service attacks
2269 * XXX: may have to refine this test
2270 * Check against address space limit.
2271 *
2272 * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
2273 * return -ENOMEM;
2274 */
2275 if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
2276 return -ENOMEM;
2277
2278 /*
2279 * We do the easy to undo allocations first.
2280 *
2281 * pfm_rvmalloc(), clears the buffer, so there is no leak
2282 */
2283 smpl_buf = pfm_rvmalloc(size);
2284 if (smpl_buf == NULL) {
2285 DPRINT(("Can't allocate sampling buffer\n"));
2286 return -ENOMEM;
2287 }
2288
2289 DPRINT(("smpl_buf @%p\n", smpl_buf));
2290
2291 /* allocate vma */
2292 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
2293 if (!vma) {
2294 DPRINT(("Cannot allocate vma\n"));
2295 goto error_kmem;
2296 }
2297 memset(vma, 0, sizeof(*vma));
2298
2299 /*
2300 * partially initialize the vma for the sampling buffer
2301 */
2302 vma->vm_mm = mm;
2303 vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
2304 vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
2305
2306 /*
2307 * Now we have everything we need and we can initialize
2308 * and connect all the data structures
2309 */
2310
2311 ctx->ctx_smpl_hdr = smpl_buf;
2312 ctx->ctx_smpl_size = size; /* aligned size */
2313
2314 /*
2315 * Let's do the difficult operations next.
2316 *
2317 * now we atomically find some area in the address space and
2318 * remap the buffer in it.
2319 */
2320 down_write(&task->mm->mmap_sem);
2321
2322 /* find some free area in address space, must have mmap sem held */
2323 vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0);
2324 if (vma->vm_start == 0UL) {
2325 DPRINT(("Cannot find unmapped area for size %ld\n", size));
2326 up_write(&task->mm->mmap_sem);
2327 goto error;
2328 }
2329 vma->vm_end = vma->vm_start + size;
2330 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2331
2332 DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
2333
2334 /* can only be applied to current task, need to have the mm semaphore held when called */
2335 if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
2336 DPRINT(("Can't remap buffer\n"));
2337 up_write(&task->mm->mmap_sem);
2338 goto error;
2339 }
2340
2341 /*
2342 * now insert the vma in the vm list for the process, must be
2343 * done with mmap lock held
2344 */
2345 insert_vm_struct(mm, vma);
2346
2347 mm->total_vm += size >> PAGE_SHIFT;
2348 vm_stat_account(vma);
2349 up_write(&task->mm->mmap_sem);
2350
2351 /*
2352 * keep track of user level virtual address
2353 */
2354 ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
2355 *(unsigned long *)user_vaddr = vma->vm_start;
2356
2357 return 0;
2358
2359error:
2360 kmem_cache_free(vm_area_cachep, vma);
2361error_kmem:
2362 pfm_rvfree(smpl_buf, size);
2363
2364 return -ENOMEM;
2365}
2366
2367/*
2368 * XXX: do something better here
2369 */
2370static int
2371pfm_bad_permissions(struct task_struct *task)
2372{
2373 /* inspired by ptrace_attach() */
2374 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
2375 current->uid,
2376 current->gid,
2377 task->euid,
2378 task->suid,
2379 task->uid,
2380 task->egid,
2381 task->sgid));
2382
2383 return ((current->uid != task->euid)
2384 || (current->uid != task->suid)
2385 || (current->uid != task->uid)
2386 || (current->gid != task->egid)
2387 || (current->gid != task->sgid)
2388 || (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE);
2389}
2390
2391static int
2392pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
2393{
2394 int ctx_flags;
2395
2396 /* valid signal */
2397
2398 ctx_flags = pfx->ctx_flags;
2399
2400 if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
2401
2402 /*
2403 * cannot block in this mode
2404 */
2405 if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
2406 DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
2407 return -EINVAL;
2408 }
2409 } else {
2410 }
2411 /* probably more to add here */
2412
2413 return 0;
2414}
2415
2416static int
2417pfm_setup_buffer_fmt(struct task_struct *task, pfm_context_t *ctx, unsigned int ctx_flags,
2418 unsigned int cpu, pfarg_context_t *arg)
2419{
2420 pfm_buffer_fmt_t *fmt = NULL;
2421 unsigned long size = 0UL;
2422 void *uaddr = NULL;
2423 void *fmt_arg = NULL;
2424 int ret = 0;
2425#define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
2426
2427 /* invoke and lock buffer format, if found */
2428 fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
2429 if (fmt == NULL) {
2430 DPRINT(("[%d] cannot find buffer format\n", task->pid));
2431 return -EINVAL;
2432 }
2433
2434 /*
2435 * buffer argument MUST be contiguous to pfarg_context_t
2436 */
2437 if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
2438
2439 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
2440
2441 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task->pid, ctx_flags, cpu, fmt_arg, ret));
2442
2443 if (ret) goto error;
2444
2445 /* link buffer format and context */
2446 ctx->ctx_buf_fmt = fmt;
2447
2448 /*
2449 * check if buffer format wants to use perfmon buffer allocation/mapping service
2450 */
2451 ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
2452 if (ret) goto error;
2453
2454 if (size) {
2455 /*
2456 * buffer is always remapped into the caller's address space
2457 */
2458 ret = pfm_smpl_buffer_alloc(current, ctx, size, &uaddr);
2459 if (ret) goto error;
2460
2461 /* keep track of user address of buffer */
2462 arg->ctx_smpl_vaddr = uaddr;
2463 }
2464 ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
2465
2466error:
2467 return ret;
2468}
2469
2470static void
2471pfm_reset_pmu_state(pfm_context_t *ctx)
2472{
2473 int i;
2474
2475 /*
2476 * install reset values for PMC.
2477 */
2478 for (i=1; PMC_IS_LAST(i) == 0; i++) {
2479 if (PMC_IS_IMPL(i) == 0) continue;
2480 ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
2481 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
2482 }
2483 /*
2484 * PMD registers are set to 0UL when the context in memset()
2485 */
2486
2487 /*
2488 * On context switched restore, we must restore ALL pmc and ALL pmd even
2489 * when they are not actively used by the task. In UP, the incoming process
2490 * may otherwise pick up left over PMC, PMD state from the previous process.
2491 * As opposed to PMD, stale PMC can cause harm to the incoming
2492 * process because they may change what is being measured.
2493 * Therefore, we must systematically reinstall the entire
2494 * PMC state. In SMP, the same thing is possible on the
2495 * same CPU but also on between 2 CPUs.
2496 *
2497 * The problem with PMD is information leaking especially
2498 * to user level when psr.sp=0
2499 *
2500 * There is unfortunately no easy way to avoid this problem
2501 * on either UP or SMP. This definitively slows down the
2502 * pfm_load_regs() function.
2503 */
2504
2505 /*
2506 * bitmask of all PMCs accessible to this context
2507 *
2508 * PMC0 is treated differently.
2509 */
2510 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
2511
2512 /*
2513 * bitmask of all PMDs that are accesible to this context
2514 */
2515 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
2516
2517 DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
2518
2519 /*
2520 * useful in case of re-enable after disable
2521 */
2522 ctx->ctx_used_ibrs[0] = 0UL;
2523 ctx->ctx_used_dbrs[0] = 0UL;
2524}
2525
2526static int
2527pfm_ctx_getsize(void *arg, size_t *sz)
2528{
2529 pfarg_context_t *req = (pfarg_context_t *)arg;
2530 pfm_buffer_fmt_t *fmt;
2531
2532 *sz = 0;
2533
2534 if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
2535
2536 fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
2537 if (fmt == NULL) {
2538 DPRINT(("cannot find buffer format\n"));
2539 return -EINVAL;
2540 }
2541 /* get just enough to copy in user parameters */
2542 *sz = fmt->fmt_arg_size;
2543 DPRINT(("arg_size=%lu\n", *sz));
2544
2545 return 0;
2546}
2547
2548
2549
2550/*
2551 * cannot attach if :
2552 * - kernel task
2553 * - task not owned by caller
2554 * - task incompatible with context mode
2555 */
2556static int
2557pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2558{
2559 /*
2560 * no kernel task or task not owner by caller
2561 */
2562 if (task->mm == NULL) {
2563 DPRINT(("task [%d] has not memory context (kernel thread)\n", task->pid));
2564 return -EPERM;
2565 }
2566 if (pfm_bad_permissions(task)) {
2567 DPRINT(("no permission to attach to [%d]\n", task->pid));
2568 return -EPERM;
2569 }
2570 /*
2571 * cannot block in self-monitoring mode
2572 */
2573 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
2574 DPRINT(("cannot load a blocking context on self for [%d]\n", task->pid));
2575 return -EINVAL;
2576 }
2577
2578 if (task->exit_state == EXIT_ZOMBIE) {
2579 DPRINT(("cannot attach to zombie task [%d]\n", task->pid));
2580 return -EBUSY;
2581 }
2582
2583 /*
2584 * always ok for self
2585 */
2586 if (task == current) return 0;
2587
2588 if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
2589 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid, task->state));
2590 return -EBUSY;
2591 }
2592 /*
2593 * make sure the task is off any CPU
2594 */
2595 wait_task_inactive(task);
2596
2597 /* more to come... */
2598
2599 return 0;
2600}
2601
2602static int
2603pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
2604{
2605 struct task_struct *p = current;
2606 int ret;
2607
2608 /* XXX: need to add more checks here */
2609 if (pid < 2) return -EPERM;
2610
2611 if (pid != current->pid) {
2612
2613 read_lock(&tasklist_lock);
2614
2615 p = find_task_by_pid(pid);
2616
2617 /* make sure task cannot go away while we operate on it */
2618 if (p) get_task_struct(p);
2619
2620 read_unlock(&tasklist_lock);
2621
2622 if (p == NULL) return -ESRCH;
2623 }
2624
2625 ret = pfm_task_incompatible(ctx, p);
2626 if (ret == 0) {
2627 *task = p;
2628 } else if (p != current) {
2629 pfm_put_task(p);
2630 }
2631 return ret;
2632}
2633
2634
2635
2636static int
2637pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2638{
2639 pfarg_context_t *req = (pfarg_context_t *)arg;
2640 struct file *filp;
2641 int ctx_flags;
2642 int ret;
2643
2644 /* let's check the arguments first */
2645 ret = pfarg_is_sane(current, req);
2646 if (ret < 0) return ret;
2647
2648 ctx_flags = req->ctx_flags;
2649
2650 ret = -ENOMEM;
2651
2652 ctx = pfm_context_alloc();
2653 if (!ctx) goto error;
2654
2655 ret = pfm_alloc_fd(&filp);
2656 if (ret < 0) goto error_file;
2657
2658 req->ctx_fd = ctx->ctx_fd = ret;
2659
2660 /*
2661 * attach context to file
2662 */
2663 filp->private_data = ctx;
2664
2665 /*
2666 * does the user want to sample?
2667 */
2668 if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
2669 ret = pfm_setup_buffer_fmt(current, ctx, ctx_flags, 0, req);
2670 if (ret) goto buffer_error;
2671 }
2672
2673 /*
2674 * init context protection lock
2675 */
2676 spin_lock_init(&ctx->ctx_lock);
2677
2678 /*
2679 * context is unloaded
2680 */
2681 ctx->ctx_state = PFM_CTX_UNLOADED;
2682
2683 /*
2684 * initialization of context's flags
2685 */
2686 ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
2687 ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
2688 ctx->ctx_fl_is_sampling = ctx->ctx_buf_fmt ? 1 : 0; /* assume record() is defined */
2689 ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
2690 /*
2691 * will move to set properties
2692 * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
2693 */
2694
2695 /*
2696 * init restart semaphore to locked
2697 */
2698 sema_init(&ctx->ctx_restart_sem, 0);
2699
2700 /*
2701 * activation is used in SMP only
2702 */
2703 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
2704 SET_LAST_CPU(ctx, -1);
2705
2706 /*
2707 * initialize notification message queue
2708 */
2709 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
2710 init_waitqueue_head(&ctx->ctx_msgq_wait);
2711 init_waitqueue_head(&ctx->ctx_zombieq);
2712
2713 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n",
2714 ctx,
2715 ctx_flags,
2716 ctx->ctx_fl_system,
2717 ctx->ctx_fl_block,
2718 ctx->ctx_fl_excl_idle,
2719 ctx->ctx_fl_no_msg,
2720 ctx->ctx_fd));
2721
2722 /*
2723 * initialize soft PMU state
2724 */
2725 pfm_reset_pmu_state(ctx);
2726
2727 return 0;
2728
2729buffer_error:
2730 pfm_free_fd(ctx->ctx_fd, filp);
2731
2732 if (ctx->ctx_buf_fmt) {
2733 pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
2734 }
2735error_file:
2736 pfm_context_free(ctx);
2737
2738error:
2739 return ret;
2740}
2741
2742static inline unsigned long
2743pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
2744{
2745 unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
2746 unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
2747 extern unsigned long carta_random32 (unsigned long seed);
2748
2749 if (reg->flags & PFM_REGFL_RANDOM) {
2750 new_seed = carta_random32(old_seed);
2751 val -= (old_seed & mask); /* counter values are negative numbers! */
2752 if ((mask >> 32) != 0)
2753 /* construct a full 64-bit random value: */
2754 new_seed |= carta_random32(old_seed >> 32) << 32;
2755 reg->seed = new_seed;
2756 }
2757 reg->lval = val;
2758 return val;
2759}
2760
2761static void
2762pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2763{
2764 unsigned long mask = ovfl_regs[0];
2765 unsigned long reset_others = 0UL;
2766 unsigned long val;
2767 int i;
2768
2769 /*
2770 * now restore reset value on sampling overflowed counters
2771 */
2772 mask >>= PMU_FIRST_COUNTER;
2773 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2774
2775 if ((mask & 0x1UL) == 0UL) continue;
2776
2777 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2778 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2779
2780 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2781 }
2782
2783 /*
2784 * Now take care of resetting the other registers
2785 */
2786 for(i = 0; reset_others; i++, reset_others >>= 1) {
2787
2788 if ((reset_others & 0x1) == 0) continue;
2789
2790 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2791
2792 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2793 is_long_reset ? "long" : "short", i, val));
2794 }
2795}
2796
2797static void
2798pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2799{
2800 unsigned long mask = ovfl_regs[0];
2801 unsigned long reset_others = 0UL;
2802 unsigned long val;
2803 int i;
2804
2805 DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
2806
2807 if (ctx->ctx_state == PFM_CTX_MASKED) {
2808 pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
2809 return;
2810 }
2811
2812 /*
2813 * now restore reset value on sampling overflowed counters
2814 */
2815 mask >>= PMU_FIRST_COUNTER;
2816 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2817
2818 if ((mask & 0x1UL) == 0UL) continue;
2819
2820 val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2821 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2822
2823 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2824
2825 pfm_write_soft_counter(ctx, i, val);
2826 }
2827
2828 /*
2829 * Now take care of resetting the other registers
2830 */
2831 for(i = 0; reset_others; i++, reset_others >>= 1) {
2832
2833 if ((reset_others & 0x1) == 0) continue;
2834
2835 val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2836
2837 if (PMD_IS_COUNTING(i)) {
2838 pfm_write_soft_counter(ctx, i, val);
2839 } else {
2840 ia64_set_pmd(i, val);
2841 }
2842 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2843 is_long_reset ? "long" : "short", i, val));
2844 }
2845 ia64_srlz_d();
2846}
2847
2848static int
2849pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2850{
2851 struct thread_struct *thread = NULL;
2852 struct task_struct *task;
2853 pfarg_reg_t *req = (pfarg_reg_t *)arg;
2854 unsigned long value, pmc_pm;
2855 unsigned long smpl_pmds, reset_pmds, impl_pmds;
2856 unsigned int cnum, reg_flags, flags, pmc_type;
2857 int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
2858 int is_monitor, is_counting, state;
2859 int ret = -EINVAL;
2860 pfm_reg_check_t wr_func;
2861#define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
2862
2863 state = ctx->ctx_state;
2864 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
2865 is_system = ctx->ctx_fl_system;
2866 task = ctx->ctx_task;
2867 impl_pmds = pmu_conf->impl_pmds[0];
2868
2869 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
2870
2871 if (is_loaded) {
2872 thread = &task->thread;
2873 /*
2874 * In system wide and when the context is loaded, access can only happen
2875 * when the caller is running on the CPU being monitored by the session.
2876 * It does not have to be the owner (ctx_task) of the context per se.
2877 */
2878 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
2879 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
2880 return -EBUSY;
2881 }
2882 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
2883 }
2884 expert_mode = pfm_sysctl.expert_mode;
2885
2886 for (i = 0; i < count; i++, req++) {
2887
2888 cnum = req->reg_num;
2889 reg_flags = req->reg_flags;
2890 value = req->reg_value;
2891 smpl_pmds = req->reg_smpl_pmds[0];
2892 reset_pmds = req->reg_reset_pmds[0];
2893 flags = 0;
2894
2895
2896 if (cnum >= PMU_MAX_PMCS) {
2897 DPRINT(("pmc%u is invalid\n", cnum));
2898 goto error;
2899 }
2900
2901 pmc_type = pmu_conf->pmc_desc[cnum].type;
2902 pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
2903 is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
2904 is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
2905
2906 /*
2907 * we reject all non implemented PMC as well
2908 * as attempts to modify PMC[0-3] which are used
2909 * as status registers by the PMU
2910 */
2911 if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
2912 DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
2913 goto error;
2914 }
2915 wr_func = pmu_conf->pmc_desc[cnum].write_check;
2916 /*
2917 * If the PMC is a monitor, then if the value is not the default:
2918 * - system-wide session: PMCx.pm=1 (privileged monitor)
2919 * - per-task : PMCx.pm=0 (user monitor)
2920 */
2921 if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
2922 DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
2923 cnum,
2924 pmc_pm,
2925 is_system));
2926 goto error;
2927 }
2928
2929 if (is_counting) {
2930 /*
2931 * enforce generation of overflow interrupt. Necessary on all
2932 * CPUs.
2933 */
2934 value |= 1 << PMU_PMC_OI;
2935
2936 if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
2937 flags |= PFM_REGFL_OVFL_NOTIFY;
2938 }
2939
2940 if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
2941
2942 /* verify validity of smpl_pmds */
2943 if ((smpl_pmds & impl_pmds) != smpl_pmds) {
2944 DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
2945 goto error;
2946 }
2947
2948 /* verify validity of reset_pmds */
2949 if ((reset_pmds & impl_pmds) != reset_pmds) {
2950 DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
2951 goto error;
2952 }
2953 } else {
2954 if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
2955 DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
2956 goto error;
2957 }
2958 /* eventid on non-counting monitors are ignored */
2959 }
2960
2961 /*
2962 * execute write checker, if any
2963 */
2964 if (likely(expert_mode == 0 && wr_func)) {
2965 ret = (*wr_func)(task, ctx, cnum, &value, regs);
2966 if (ret) goto error;
2967 ret = -EINVAL;
2968 }
2969
2970 /*
2971 * no error on this register
2972 */
2973 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
2974
2975 /*
2976 * Now we commit the changes to the software state
2977 */
2978
2979 /*
2980 * update overflow information
2981 */
2982 if (is_counting) {
2983 /*
2984 * full flag update each time a register is programmed
2985 */
2986 ctx->ctx_pmds[cnum].flags = flags;
2987
2988 ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
2989 ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
2990 ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
2991
2992 /*
2993 * Mark all PMDS to be accessed as used.
2994 *
2995 * We do not keep track of PMC because we have to
2996 * systematically restore ALL of them.
2997 *
2998 * We do not update the used_monitors mask, because
2999 * if we have not programmed them, then will be in
3000 * a quiescent state, therefore we will not need to
3001 * mask/restore then when context is MASKED.
3002 */
3003 CTX_USED_PMD(ctx, reset_pmds);
3004 CTX_USED_PMD(ctx, smpl_pmds);
3005 /*
3006 * make sure we do not try to reset on
3007 * restart because we have established new values
3008 */
3009 if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3010 }
3011 /*
3012 * Needed in case the user does not initialize the equivalent
3013 * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no
3014 * possible leak here.
3015 */
3016 CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
3017
3018 /*
3019 * keep track of the monitor PMC that we are using.
3020 * we save the value of the pmc in ctx_pmcs[] and if
3021 * the monitoring is not stopped for the context we also
3022 * place it in the saved state area so that it will be
3023 * picked up later by the context switch code.
3024 *
3025 * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
3026 *
3027 * The value in thread->pmcs[] may be modified on overflow, i.e., when
3028 * monitoring needs to be stopped.
3029 */
3030 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
3031
3032 /*
3033 * update context state
3034 */
3035 ctx->ctx_pmcs[cnum] = value;
3036
3037 if (is_loaded) {
3038 /*
3039 * write thread state
3040 */
3041 if (is_system == 0) thread->pmcs[cnum] = value;
3042
3043 /*
3044 * write hardware register if we can
3045 */
3046 if (can_access_pmu) {
3047 ia64_set_pmc(cnum, value);
3048 }
3049#ifdef CONFIG_SMP
3050 else {
3051 /*
3052 * per-task SMP only here
3053 *
3054 * we are guaranteed that the task is not running on the other CPU,
3055 * we indicate that this PMD will need to be reloaded if the task
3056 * is rescheduled on the CPU it ran last on.
3057 */
3058 ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
3059 }
3060#endif
3061 }
3062
3063 DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
3064 cnum,
3065 value,
3066 is_loaded,
3067 can_access_pmu,
3068 flags,
3069 ctx->ctx_all_pmcs[0],
3070 ctx->ctx_used_pmds[0],
3071 ctx->ctx_pmds[cnum].eventid,
3072 smpl_pmds,
3073 reset_pmds,
3074 ctx->ctx_reload_pmcs[0],
3075 ctx->ctx_used_monitors[0],
3076 ctx->ctx_ovfl_regs[0]));
3077 }
3078
3079 /*
3080 * make sure the changes are visible
3081 */
3082 if (can_access_pmu) ia64_srlz_d();
3083
3084 return 0;
3085error:
3086 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3087 return ret;
3088}
3089
3090static int
3091pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3092{
3093 struct thread_struct *thread = NULL;
3094 struct task_struct *task;
3095 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3096 unsigned long value, hw_value, ovfl_mask;
3097 unsigned int cnum;
3098 int i, can_access_pmu = 0, state;
3099 int is_counting, is_loaded, is_system, expert_mode;
3100 int ret = -EINVAL;
3101 pfm_reg_check_t wr_func;
3102
3103
3104 state = ctx->ctx_state;
3105 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3106 is_system = ctx->ctx_fl_system;
3107 ovfl_mask = pmu_conf->ovfl_val;
3108 task = ctx->ctx_task;
3109
3110 if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
3111
3112 /*
3113 * on both UP and SMP, we can only write to the PMC when the task is
3114 * the owner of the local PMU.
3115 */
3116 if (likely(is_loaded)) {
3117 thread = &task->thread;
3118 /*
3119 * In system wide and when the context is loaded, access can only happen
3120 * when the caller is running on the CPU being monitored by the session.
3121 * It does not have to be the owner (ctx_task) of the context per se.
3122 */
3123 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3124 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3125 return -EBUSY;
3126 }
3127 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3128 }
3129 expert_mode = pfm_sysctl.expert_mode;
3130
3131 for (i = 0; i < count; i++, req++) {
3132
3133 cnum = req->reg_num;
3134 value = req->reg_value;
3135
3136 if (!PMD_IS_IMPL(cnum)) {
3137 DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
3138 goto abort_mission;
3139 }
3140 is_counting = PMD_IS_COUNTING(cnum);
3141 wr_func = pmu_conf->pmd_desc[cnum].write_check;
3142
3143 /*
3144 * execute write checker, if any
3145 */
3146 if (unlikely(expert_mode == 0 && wr_func)) {
3147 unsigned long v = value;
3148
3149 ret = (*wr_func)(task, ctx, cnum, &v, regs);
3150 if (ret) goto abort_mission;
3151
3152 value = v;
3153 ret = -EINVAL;
3154 }
3155
3156 /*
3157 * no error on this register
3158 */
3159 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
3160
3161 /*
3162 * now commit changes to software state
3163 */
3164 hw_value = value;
3165
3166 /*
3167 * update virtualized (64bits) counter
3168 */
3169 if (is_counting) {
3170 /*
3171 * write context state
3172 */
3173 ctx->ctx_pmds[cnum].lval = value;
3174
3175 /*
3176 * when context is load we use the split value
3177 */
3178 if (is_loaded) {
3179 hw_value = value & ovfl_mask;
3180 value = value & ~ovfl_mask;
3181 }
3182 }
3183 /*
3184 * update reset values (not just for counters)
3185 */
3186 ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
3187 ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
3188
3189 /*
3190 * update randomization parameters (not just for counters)
3191 */
3192 ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
3193 ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
3194
3195 /*
3196 * update context value
3197 */
3198 ctx->ctx_pmds[cnum].val = value;
3199
3200 /*
3201 * Keep track of what we use
3202 *
3203 * We do not keep track of PMC because we have to
3204 * systematically restore ALL of them.
3205 */
3206 CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
3207
3208 /*
3209 * mark this PMD register used as well
3210 */
3211 CTX_USED_PMD(ctx, RDEP(cnum));
3212
3213 /*
3214 * make sure we do not try to reset on
3215 * restart because we have established new values
3216 */
3217 if (is_counting && state == PFM_CTX_MASKED) {
3218 ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3219 }
3220
3221 if (is_loaded) {
3222 /*
3223 * write thread state
3224 */
3225 if (is_system == 0) thread->pmds[cnum] = hw_value;
3226
3227 /*
3228 * write hardware register if we can
3229 */
3230 if (can_access_pmu) {
3231 ia64_set_pmd(cnum, hw_value);
3232 } else {
3233#ifdef CONFIG_SMP
3234 /*
3235 * we are guaranteed that the task is not running on the other CPU,
3236 * we indicate that this PMD will need to be reloaded if the task
3237 * is rescheduled on the CPU it ran last on.
3238 */
3239 ctx->ctx_reload_pmds[0] |= 1UL << cnum;
3240#endif
3241 }
3242 }
3243
3244 DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
3245 "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
3246 cnum,
3247 value,
3248 is_loaded,
3249 can_access_pmu,
3250 hw_value,
3251 ctx->ctx_pmds[cnum].val,
3252 ctx->ctx_pmds[cnum].short_reset,
3253 ctx->ctx_pmds[cnum].long_reset,
3254 PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
3255 ctx->ctx_pmds[cnum].seed,
3256 ctx->ctx_pmds[cnum].mask,
3257 ctx->ctx_used_pmds[0],
3258 ctx->ctx_pmds[cnum].reset_pmds[0],
3259 ctx->ctx_reload_pmds[0],
3260 ctx->ctx_all_pmds[0],
3261 ctx->ctx_ovfl_regs[0]));
3262 }
3263
3264 /*
3265 * make changes visible
3266 */
3267 if (can_access_pmu) ia64_srlz_d();
3268
3269 return 0;
3270
3271abort_mission:
3272 /*
3273 * for now, we have only one possibility for error
3274 */
3275 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3276 return ret;
3277}
3278
3279/*
3280 * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function.
3281 * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an
3282 * interrupt is delivered during the call, it will be kept pending until we leave, making
3283 * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are
3284 * guaranteed to return consistent data to the user, it may simply be old. It is not
3285 * trivial to treat the overflow while inside the call because you may end up in
3286 * some module sampling buffer code causing deadlocks.
3287 */
3288static int
3289pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3290{
3291 struct thread_struct *thread = NULL;
3292 struct task_struct *task;
3293 unsigned long val = 0UL, lval, ovfl_mask, sval;
3294 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3295 unsigned int cnum, reg_flags = 0;
3296 int i, can_access_pmu = 0, state;
3297 int is_loaded, is_system, is_counting, expert_mode;
3298 int ret = -EINVAL;
3299 pfm_reg_check_t rd_func;
3300
3301 /*
3302 * access is possible when loaded only for
3303 * self-monitoring tasks or in UP mode
3304 */
3305
3306 state = ctx->ctx_state;
3307 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3308 is_system = ctx->ctx_fl_system;
3309 ovfl_mask = pmu_conf->ovfl_val;
3310 task = ctx->ctx_task;
3311
3312 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3313
3314 if (likely(is_loaded)) {
3315 thread = &task->thread;
3316 /*
3317 * In system wide and when the context is loaded, access can only happen
3318 * when the caller is running on the CPU being monitored by the session.
3319 * It does not have to be the owner (ctx_task) of the context per se.
3320 */
3321 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3322 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3323 return -EBUSY;
3324 }
3325 /*
3326 * this can be true when not self-monitoring only in UP
3327 */
3328 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3329
3330 if (can_access_pmu) ia64_srlz_d();
3331 }
3332 expert_mode = pfm_sysctl.expert_mode;
3333
3334 DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
3335 is_loaded,
3336 can_access_pmu,
3337 state));
3338
3339 /*
3340 * on both UP and SMP, we can only read the PMD from the hardware register when
3341 * the task is the owner of the local PMU.
3342 */
3343
3344 for (i = 0; i < count; i++, req++) {
3345
3346 cnum = req->reg_num;
3347 reg_flags = req->reg_flags;
3348
3349 if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
3350 /*
3351 * we can only read the register that we use. That includes
3352 * the one we explicitely initialize AND the one we want included
3353 * in the sampling buffer (smpl_regs).
3354 *
3355 * Having this restriction allows optimization in the ctxsw routine
3356 * without compromising security (leaks)
3357 */
3358 if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
3359
3360 sval = ctx->ctx_pmds[cnum].val;
3361 lval = ctx->ctx_pmds[cnum].lval;
3362 is_counting = PMD_IS_COUNTING(cnum);
3363
3364 /*
3365 * If the task is not the current one, then we check if the
3366 * PMU state is still in the local live register due to lazy ctxsw.
3367 * If true, then we read directly from the registers.
3368 */
3369 if (can_access_pmu){
3370 val = ia64_get_pmd(cnum);
3371 } else {
3372 /*
3373 * context has been saved
3374 * if context is zombie, then task does not exist anymore.
3375 * In this case, we use the full value saved in the context (pfm_flush_regs()).
3376 */
3377 val = is_loaded ? thread->pmds[cnum] : 0UL;
3378 }
3379 rd_func = pmu_conf->pmd_desc[cnum].read_check;
3380
3381 if (is_counting) {
3382 /*
3383 * XXX: need to check for overflow when loaded
3384 */
3385 val &= ovfl_mask;
3386 val += sval;
3387 }
3388
3389 /*
3390 * execute read checker, if any
3391 */
3392 if (unlikely(expert_mode == 0 && rd_func)) {
3393 unsigned long v = val;
3394 ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
3395 if (ret) goto error;
3396 val = v;
3397 ret = -EINVAL;
3398 }
3399
3400 PFM_REG_RETFLAG_SET(reg_flags, 0);
3401
3402 DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
3403
3404 /*
3405 * update register return value, abort all if problem during copy.
3406 * we only modify the reg_flags field. no check mode is fine because
3407 * access has been verified upfront in sys_perfmonctl().
3408 */
3409 req->reg_value = val;
3410 req->reg_flags = reg_flags;
3411 req->reg_last_reset_val = lval;
3412 }
3413
3414 return 0;
3415
3416error:
3417 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3418 return ret;
3419}
3420
3421int
3422pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3423{
3424 pfm_context_t *ctx;
3425
3426 if (req == NULL) return -EINVAL;
3427
3428 ctx = GET_PMU_CTX();
3429
3430 if (ctx == NULL) return -EINVAL;
3431
3432 /*
3433 * for now limit to current task, which is enough when calling
3434 * from overflow handler
3435 */
3436 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3437
3438 return pfm_write_pmcs(ctx, req, nreq, regs);
3439}
3440EXPORT_SYMBOL(pfm_mod_write_pmcs);
3441
3442int
3443pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3444{
3445 pfm_context_t *ctx;
3446
3447 if (req == NULL) return -EINVAL;
3448
3449 ctx = GET_PMU_CTX();
3450
3451 if (ctx == NULL) return -EINVAL;
3452
3453 /*
3454 * for now limit to current task, which is enough when calling
3455 * from overflow handler
3456 */
3457 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3458
3459 return pfm_read_pmds(ctx, req, nreq, regs);
3460}
3461EXPORT_SYMBOL(pfm_mod_read_pmds);
3462
3463/*
3464 * Only call this function when a process it trying to
3465 * write the debug registers (reading is always allowed)
3466 */
3467int
3468pfm_use_debug_registers(struct task_struct *task)
3469{
3470 pfm_context_t *ctx = task->thread.pfm_context;
3471 unsigned long flags;
3472 int ret = 0;
3473
3474 if (pmu_conf->use_rr_dbregs == 0) return 0;
3475
3476 DPRINT(("called for [%d]\n", task->pid));
3477
3478 /*
3479 * do it only once
3480 */
3481 if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
3482
3483 /*
3484 * Even on SMP, we do not need to use an atomic here because
3485 * the only way in is via ptrace() and this is possible only when the
3486 * process is stopped. Even in the case where the ctxsw out is not totally
3487 * completed by the time we come here, there is no way the 'stopped' process
3488 * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine.
3489 * So this is always safe.
3490 */
3491 if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
3492
3493 LOCK_PFS(flags);
3494
3495 /*
3496 * We cannot allow setting breakpoints when system wide monitoring
3497 * sessions are using the debug registers.
3498 */
3499 if (pfm_sessions.pfs_sys_use_dbregs> 0)
3500 ret = -1;
3501 else
3502 pfm_sessions.pfs_ptrace_use_dbregs++;
3503
3504 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3505 pfm_sessions.pfs_ptrace_use_dbregs,
3506 pfm_sessions.pfs_sys_use_dbregs,
3507 task->pid, ret));
3508
3509 UNLOCK_PFS(flags);
3510
3511 return ret;
3512}
3513
3514/*
3515 * This function is called for every task that exits with the
3516 * IA64_THREAD_DBG_VALID set. This indicates a task which was
3517 * able to use the debug registers for debugging purposes via
3518 * ptrace(). Therefore we know it was not using them for
3519 * perfmormance monitoring, so we only decrement the number
3520 * of "ptraced" debug register users to keep the count up to date
3521 */
3522int
3523pfm_release_debug_registers(struct task_struct *task)
3524{
3525 unsigned long flags;
3526 int ret;
3527
3528 if (pmu_conf->use_rr_dbregs == 0) return 0;
3529
3530 LOCK_PFS(flags);
3531 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
3532 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task->pid);
3533 ret = -1;
3534 } else {
3535 pfm_sessions.pfs_ptrace_use_dbregs--;
3536 ret = 0;
3537 }
3538 UNLOCK_PFS(flags);
3539
3540 return ret;
3541}
3542
3543static int
3544pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3545{
3546 struct task_struct *task;
3547 pfm_buffer_fmt_t *fmt;
3548 pfm_ovfl_ctrl_t rst_ctrl;
3549 int state, is_system;
3550 int ret = 0;
3551
3552 state = ctx->ctx_state;
3553 fmt = ctx->ctx_buf_fmt;
3554 is_system = ctx->ctx_fl_system;
3555 task = PFM_CTX_TASK(ctx);
3556
3557 switch(state) {
3558 case PFM_CTX_MASKED:
3559 break;
3560 case PFM_CTX_LOADED:
3561 if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
3562 /* fall through */
3563 case PFM_CTX_UNLOADED:
3564 case PFM_CTX_ZOMBIE:
3565 DPRINT(("invalid state=%d\n", state));
3566 return -EBUSY;
3567 default:
3568 DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
3569 return -EINVAL;
3570 }
3571
3572 /*
3573 * In system wide and when the context is loaded, access can only happen
3574 * when the caller is running on the CPU being monitored by the session.
3575 * It does not have to be the owner (ctx_task) of the context per se.
3576 */
3577 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3578 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3579 return -EBUSY;
3580 }
3581
3582 /* sanity check */
3583 if (unlikely(task == NULL)) {
3584 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", current->pid);
3585 return -EINVAL;
3586 }
3587
3588 if (task == current || is_system) {
3589
3590 fmt = ctx->ctx_buf_fmt;
3591
3592 DPRINT(("restarting self %d ovfl=0x%lx\n",
3593 task->pid,
3594 ctx->ctx_ovfl_regs[0]));
3595
3596 if (CTX_HAS_SMPL(ctx)) {
3597
3598 prefetch(ctx->ctx_smpl_hdr);
3599
3600 rst_ctrl.bits.mask_monitoring = 0;
3601 rst_ctrl.bits.reset_ovfl_pmds = 0;
3602
3603 if (state == PFM_CTX_LOADED)
3604 ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3605 else
3606 ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3607 } else {
3608 rst_ctrl.bits.mask_monitoring = 0;
3609 rst_ctrl.bits.reset_ovfl_pmds = 1;
3610 }
3611
3612 if (ret == 0) {
3613 if (rst_ctrl.bits.reset_ovfl_pmds)
3614 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
3615
3616 if (rst_ctrl.bits.mask_monitoring == 0) {
3617 DPRINT(("resuming monitoring for [%d]\n", task->pid));
3618
3619 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
3620 } else {
3621 DPRINT(("keeping monitoring stopped for [%d]\n", task->pid));
3622
3623 // cannot use pfm_stop_monitoring(task, regs);
3624 }
3625 }
3626 /*
3627 * clear overflowed PMD mask to remove any stale information
3628 */
3629 ctx->ctx_ovfl_regs[0] = 0UL;
3630
3631 /*
3632 * back to LOADED state
3633 */
3634 ctx->ctx_state = PFM_CTX_LOADED;
3635
3636 /*
3637 * XXX: not really useful for self monitoring
3638 */
3639 ctx->ctx_fl_can_restart = 0;
3640
3641 return 0;
3642 }
3643
3644 /*
3645 * restart another task
3646 */
3647
3648 /*
3649 * When PFM_CTX_MASKED, we cannot issue a restart before the previous
3650 * one is seen by the task.
3651 */
3652 if (state == PFM_CTX_MASKED) {
3653 if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
3654 /*
3655 * will prevent subsequent restart before this one is
3656 * seen by other task
3657 */
3658 ctx->ctx_fl_can_restart = 0;
3659 }
3660
3661 /*
3662 * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e.
3663 * the task is blocked or on its way to block. That's the normal
3664 * restart path. If the monitoring is not masked, then the task
3665 * can be actively monitoring and we cannot directly intervene.
3666 * Therefore we use the trap mechanism to catch the task and
3667 * force it to reset the buffer/reset PMDs.
3668 *
3669 * if non-blocking, then we ensure that the task will go into
3670 * pfm_handle_work() before returning to user mode.
3671 *
3672 * We cannot explicitely reset another task, it MUST always
3673 * be done by the task itself. This works for system wide because
3674 * the tool that is controlling the session is logically doing
3675 * "self-monitoring".
3676 */
3677 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
3678 DPRINT(("unblocking [%d] \n", task->pid));
3679 up(&ctx->ctx_restart_sem);
3680 } else {
3681 DPRINT(("[%d] armed exit trap\n", task->pid));
3682
3683 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
3684
3685 PFM_SET_WORK_PENDING(task, 1);
3686
3687 pfm_set_task_notify(task);
3688
3689 /*
3690 * XXX: send reschedule if task runs on another CPU
3691 */
3692 }
3693 return 0;
3694}
3695
3696static int
3697pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3698{
3699 unsigned int m = *(unsigned int *)arg;
3700
3701 pfm_sysctl.debug = m == 0 ? 0 : 1;
3702
1da177e4
LT
3703 printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
3704
3705 if (m == 0) {
3706 memset(pfm_stats, 0, sizeof(pfm_stats));
3707 for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
3708 }
3709 return 0;
3710}
3711
3712/*
3713 * arg can be NULL and count can be zero for this function
3714 */
3715static int
3716pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3717{
3718 struct thread_struct *thread = NULL;
3719 struct task_struct *task;
3720 pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
3721 unsigned long flags;
3722 dbreg_t dbreg;
3723 unsigned int rnum;
3724 int first_time;
3725 int ret = 0, state;
3726 int i, can_access_pmu = 0;
3727 int is_system, is_loaded;
3728
3729 if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
3730
3731 state = ctx->ctx_state;
3732 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3733 is_system = ctx->ctx_fl_system;
3734 task = ctx->ctx_task;
3735
3736 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3737
3738 /*
3739 * on both UP and SMP, we can only write to the PMC when the task is
3740 * the owner of the local PMU.
3741 */
3742 if (is_loaded) {
3743 thread = &task->thread;
3744 /*
3745 * In system wide and when the context is loaded, access can only happen
3746 * when the caller is running on the CPU being monitored by the session.
3747 * It does not have to be the owner (ctx_task) of the context per se.
3748 */
3749 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3750 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3751 return -EBUSY;
3752 }
3753 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3754 }
3755
3756 /*
3757 * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w
3758 * ensuring that no real breakpoint can be installed via this call.
3759 *
3760 * IMPORTANT: regs can be NULL in this function
3761 */
3762
3763 first_time = ctx->ctx_fl_using_dbreg == 0;
3764
3765 /*
3766 * don't bother if we are loaded and task is being debugged
3767 */
3768 if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
3769 DPRINT(("debug registers already in use for [%d]\n", task->pid));
3770 return -EBUSY;
3771 }
3772
3773 /*
3774 * check for debug registers in system wide mode
3775 *
3776 * If though a check is done in pfm_context_load(),
3777 * we must repeat it here, in case the registers are
3778 * written after the context is loaded
3779 */
3780 if (is_loaded) {
3781 LOCK_PFS(flags);
3782
3783 if (first_time && is_system) {
3784 if (pfm_sessions.pfs_ptrace_use_dbregs)
3785 ret = -EBUSY;
3786 else
3787 pfm_sessions.pfs_sys_use_dbregs++;
3788 }
3789 UNLOCK_PFS(flags);
3790 }
3791
3792 if (ret != 0) return ret;
3793
3794 /*
3795 * mark ourself as user of the debug registers for
3796 * perfmon purposes.
3797 */
3798 ctx->ctx_fl_using_dbreg = 1;
3799
3800 /*
3801 * clear hardware registers to make sure we don't
3802 * pick up stale state.
3803 *
3804 * for a system wide session, we do not use
3805 * thread.dbr, thread.ibr because this process
3806 * never leaves the current CPU and the state
3807 * is shared by all processes running on it
3808 */
3809 if (first_time && can_access_pmu) {
3810 DPRINT(("[%d] clearing ibrs, dbrs\n", task->pid));
3811 for (i=0; i < pmu_conf->num_ibrs; i++) {
3812 ia64_set_ibr(i, 0UL);
3813 ia64_dv_serialize_instruction();
3814 }
3815 ia64_srlz_i();
3816 for (i=0; i < pmu_conf->num_dbrs; i++) {
3817 ia64_set_dbr(i, 0UL);
3818 ia64_dv_serialize_data();
3819 }
3820 ia64_srlz_d();
3821 }
3822
3823 /*
3824 * Now install the values into the registers
3825 */
3826 for (i = 0; i < count; i++, req++) {
3827
3828 rnum = req->dbreg_num;
3829 dbreg.val = req->dbreg_value;
3830
3831 ret = -EINVAL;
3832
3833 if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
3834 DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
3835 rnum, dbreg.val, mode, i, count));
3836
3837 goto abort_mission;
3838 }
3839
3840 /*
3841 * make sure we do not install enabled breakpoint
3842 */
3843 if (rnum & 0x1) {
3844 if (mode == PFM_CODE_RR)
3845 dbreg.ibr.ibr_x = 0;
3846 else
3847 dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
3848 }
3849
3850 PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
3851
3852 /*
3853 * Debug registers, just like PMC, can only be modified
3854 * by a kernel call. Moreover, perfmon() access to those
3855 * registers are centralized in this routine. The hardware
3856 * does not modify the value of these registers, therefore,
3857 * if we save them as they are written, we can avoid having
3858 * to save them on context switch out. This is made possible
3859 * by the fact that when perfmon uses debug registers, ptrace()
3860 * won't be able to modify them concurrently.
3861 */
3862 if (mode == PFM_CODE_RR) {
3863 CTX_USED_IBR(ctx, rnum);
3864
3865 if (can_access_pmu) {
3866 ia64_set_ibr(rnum, dbreg.val);
3867 ia64_dv_serialize_instruction();
3868 }
3869
3870 ctx->ctx_ibrs[rnum] = dbreg.val;
3871
3872 DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
3873 rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
3874 } else {
3875 CTX_USED_DBR(ctx, rnum);
3876
3877 if (can_access_pmu) {
3878 ia64_set_dbr(rnum, dbreg.val);
3879 ia64_dv_serialize_data();
3880 }
3881 ctx->ctx_dbrs[rnum] = dbreg.val;
3882
3883 DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
3884 rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
3885 }
3886 }
3887
3888 return 0;
3889
3890abort_mission:
3891 /*
3892 * in case it was our first attempt, we undo the global modifications
3893 */
3894 if (first_time) {
3895 LOCK_PFS(flags);
3896 if (ctx->ctx_fl_system) {
3897 pfm_sessions.pfs_sys_use_dbregs--;
3898 }
3899 UNLOCK_PFS(flags);
3900 ctx->ctx_fl_using_dbreg = 0;
3901 }
3902 /*
3903 * install error return flag
3904 */
3905 PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
3906
3907 return ret;
3908}
3909
3910static int
3911pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3912{
3913 return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
3914}
3915
3916static int
3917pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3918{
3919 return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
3920}
3921
3922int
3923pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3924{
3925 pfm_context_t *ctx;
3926
3927 if (req == NULL) return -EINVAL;
3928
3929 ctx = GET_PMU_CTX();
3930
3931 if (ctx == NULL) return -EINVAL;
3932
3933 /*
3934 * for now limit to current task, which is enough when calling
3935 * from overflow handler
3936 */
3937 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3938
3939 return pfm_write_ibrs(ctx, req, nreq, regs);
3940}
3941EXPORT_SYMBOL(pfm_mod_write_ibrs);
3942
3943int
3944pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3945{
3946 pfm_context_t *ctx;
3947
3948 if (req == NULL) return -EINVAL;
3949
3950 ctx = GET_PMU_CTX();
3951
3952 if (ctx == NULL) return -EINVAL;
3953
3954 /*
3955 * for now limit to current task, which is enough when calling
3956 * from overflow handler
3957 */
3958 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3959
3960 return pfm_write_dbrs(ctx, req, nreq, regs);
3961}
3962EXPORT_SYMBOL(pfm_mod_write_dbrs);
3963
3964
3965static int
3966pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3967{
3968 pfarg_features_t *req = (pfarg_features_t *)arg;
3969
3970 req->ft_version = PFM_VERSION;
3971 return 0;
3972}
3973
3974static int
3975pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3976{
3977 struct pt_regs *tregs;
3978 struct task_struct *task = PFM_CTX_TASK(ctx);
3979 int state, is_system;
3980
3981 state = ctx->ctx_state;
3982 is_system = ctx->ctx_fl_system;
3983
3984 /*
3985 * context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE)
3986 */
3987 if (state == PFM_CTX_UNLOADED) return -EINVAL;
3988
3989 /*
3990 * In system wide and when the context is loaded, access can only happen
3991 * when the caller is running on the CPU being monitored by the session.
3992 * It does not have to be the owner (ctx_task) of the context per se.
3993 */
3994 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3995 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3996 return -EBUSY;
3997 }
3998 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
3999 PFM_CTX_TASK(ctx)->pid,
4000 state,
4001 is_system));
4002 /*
4003 * in system mode, we need to update the PMU directly
4004 * and the user level state of the caller, which may not
4005 * necessarily be the creator of the context.
4006 */
4007 if (is_system) {
4008 /*
4009 * Update local PMU first
4010 *
4011 * disable dcr pp
4012 */
4013 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
4014 ia64_srlz_i();
4015
4016 /*
4017 * update local cpuinfo
4018 */
4019 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4020
4021 /*
4022 * stop monitoring, does srlz.i
4023 */
4024 pfm_clear_psr_pp();
4025
4026 /*
4027 * stop monitoring in the caller
4028 */
4029 ia64_psr(regs)->pp = 0;
4030
4031 return 0;
4032 }
4033 /*
4034 * per-task mode
4035 */
4036
4037 if (task == current) {
4038 /* stop monitoring at kernel level */
4039 pfm_clear_psr_up();
4040
4041 /*
4042 * stop monitoring at the user level
4043 */
4044 ia64_psr(regs)->up = 0;
4045 } else {
4046 tregs = ia64_task_regs(task);
4047
4048 /*
4049 * stop monitoring at the user level
4050 */
4051 ia64_psr(tregs)->up = 0;
4052
4053 /*
4054 * monitoring disabled in kernel at next reschedule
4055 */
4056 ctx->ctx_saved_psr_up = 0;
4057 DPRINT(("task=[%d]\n", task->pid));
4058 }
4059 return 0;
4060}
4061
4062
4063static int
4064pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4065{
4066 struct pt_regs *tregs;
4067 int state, is_system;
4068
4069 state = ctx->ctx_state;
4070 is_system = ctx->ctx_fl_system;
4071
4072 if (state != PFM_CTX_LOADED) return -EINVAL;
4073
4074 /*
4075 * In system wide and when the context is loaded, access can only happen
4076 * when the caller is running on the CPU being monitored by the session.
4077 * It does not have to be the owner (ctx_task) of the context per se.
4078 */
4079 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
4080 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
4081 return -EBUSY;
4082 }
4083
4084 /*
4085 * in system mode, we need to update the PMU directly
4086 * and the user level state of the caller, which may not
4087 * necessarily be the creator of the context.
4088 */
4089 if (is_system) {
4090
4091 /*
4092 * set user level psr.pp for the caller
4093 */
4094 ia64_psr(regs)->pp = 1;
4095
4096 /*
4097 * now update the local PMU and cpuinfo
4098 */
4099 PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
4100
4101 /*
4102 * start monitoring at kernel level
4103 */
4104 pfm_set_psr_pp();
4105
4106 /* enable dcr pp */
4107 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
4108 ia64_srlz_i();
4109
4110 return 0;
4111 }
4112
4113 /*
4114 * per-process mode
4115 */
4116
4117 if (ctx->ctx_task == current) {
4118
4119 /* start monitoring at kernel level */
4120 pfm_set_psr_up();
4121
4122 /*
4123 * activate monitoring at user level
4124 */
4125 ia64_psr(regs)->up = 1;
4126
4127 } else {
4128 tregs = ia64_task_regs(ctx->ctx_task);
4129
4130 /*
4131 * start monitoring at the kernel level the next
4132 * time the task is scheduled
4133 */
4134 ctx->ctx_saved_psr_up = IA64_PSR_UP;
4135
4136 /*
4137 * activate monitoring at user level
4138 */
4139 ia64_psr(tregs)->up = 1;
4140 }
4141 return 0;
4142}
4143
4144static int
4145pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4146{
4147 pfarg_reg_t *req = (pfarg_reg_t *)arg;
4148 unsigned int cnum;
4149 int i;
4150 int ret = -EINVAL;
4151
4152 for (i = 0; i < count; i++, req++) {
4153
4154 cnum = req->reg_num;
4155
4156 if (!PMC_IS_IMPL(cnum)) goto abort_mission;
4157
4158 req->reg_value = PMC_DFL_VAL(cnum);
4159
4160 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
4161
4162 DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
4163 }
4164 return 0;
4165
4166abort_mission:
4167 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
4168 return ret;
4169}
4170
4171static int
4172pfm_check_task_exist(pfm_context_t *ctx)
4173{
4174 struct task_struct *g, *t;
4175 int ret = -ESRCH;
4176
4177 read_lock(&tasklist_lock);
4178
4179 do_each_thread (g, t) {
4180 if (t->thread.pfm_context == ctx) {
4181 ret = 0;
4182 break;
4183 }
4184 } while_each_thread (g, t);
4185
4186 read_unlock(&tasklist_lock);
4187
4188 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
4189
4190 return ret;
4191}
4192
4193static int
4194pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4195{
4196 struct task_struct *task;
4197 struct thread_struct *thread;
4198 struct pfm_context_t *old;
4199 unsigned long flags;
4200#ifndef CONFIG_SMP
4201 struct task_struct *owner_task = NULL;
4202#endif
4203 pfarg_load_t *req = (pfarg_load_t *)arg;
4204 unsigned long *pmcs_source, *pmds_source;
4205 int the_cpu;
4206 int ret = 0;
4207 int state, is_system, set_dbregs = 0;
4208
4209 state = ctx->ctx_state;
4210 is_system = ctx->ctx_fl_system;
4211 /*
4212 * can only load from unloaded or terminated state
4213 */
4214 if (state != PFM_CTX_UNLOADED) {
4215 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4216 req->load_pid,
4217 ctx->ctx_state));
a5a70b75 4218 return -EBUSY;
1da177e4
LT
4219 }
4220
4221 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
4222
4223 if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
4224 DPRINT(("cannot use blocking mode on self\n"));
4225 return -EINVAL;
4226 }
4227
4228 ret = pfm_get_task(ctx, req->load_pid, &task);
4229 if (ret) {
4230 DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
4231 return ret;
4232 }
4233
4234 ret = -EINVAL;
4235
4236 /*
4237 * system wide is self monitoring only
4238 */
4239 if (is_system && task != current) {
4240 DPRINT(("system wide is self monitoring only load_pid=%d\n",
4241 req->load_pid));
4242 goto error;
4243 }
4244
4245 thread = &task->thread;
4246
4247 ret = 0;
4248 /*
4249 * cannot load a context which is using range restrictions,
4250 * into a task that is being debugged.
4251 */
4252 if (ctx->ctx_fl_using_dbreg) {
4253 if (thread->flags & IA64_THREAD_DBG_VALID) {
4254 ret = -EBUSY;
4255 DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
4256 goto error;
4257 }
4258 LOCK_PFS(flags);
4259
4260 if (is_system) {
4261 if (pfm_sessions.pfs_ptrace_use_dbregs) {
4262 DPRINT(("cannot load [%d] dbregs in use\n", task->pid));
4263 ret = -EBUSY;
4264 } else {
4265 pfm_sessions.pfs_sys_use_dbregs++;
4266 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task->pid, pfm_sessions.pfs_sys_use_dbregs));
4267 set_dbregs = 1;
4268 }
4269 }
4270
4271 UNLOCK_PFS(flags);
4272
4273 if (ret) goto error;
4274 }
4275
4276 /*
4277 * SMP system-wide monitoring implies self-monitoring.
4278 *
4279 * The programming model expects the task to
4280 * be pinned on a CPU throughout the session.
4281 * Here we take note of the current CPU at the
4282 * time the context is loaded. No call from
4283 * another CPU will be allowed.
4284 *
4285 * The pinning via shed_setaffinity()
4286 * must be done by the calling task prior
4287 * to this call.
4288 *
4289 * systemwide: keep track of CPU this session is supposed to run on
4290 */
4291 the_cpu = ctx->ctx_cpu = smp_processor_id();
4292
4293 ret = -EBUSY;
4294 /*
4295 * now reserve the session
4296 */
4297 ret = pfm_reserve_session(current, is_system, the_cpu);
4298 if (ret) goto error;
4299
4300 /*
4301 * task is necessarily stopped at this point.
4302 *
4303 * If the previous context was zombie, then it got removed in
4304 * pfm_save_regs(). Therefore we should not see it here.
4305 * If we see a context, then this is an active context
4306 *
4307 * XXX: needs to be atomic
4308 */
4309 DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
4310 thread->pfm_context, ctx));
4311
4312 old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
4313 if (old != NULL) {
4314 DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
4315 goto error_unres;
4316 }
4317
4318 pfm_reset_msgq(ctx);
4319
4320 ctx->ctx_state = PFM_CTX_LOADED;
4321
4322 /*
4323 * link context to task
4324 */
4325 ctx->ctx_task = task;
4326
4327 if (is_system) {
4328 /*
4329 * we load as stopped
4330 */
4331 PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
4332 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4333
4334 if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
4335 } else {
4336 thread->flags |= IA64_THREAD_PM_VALID;
4337 }
4338
4339 /*
4340 * propagate into thread-state
4341 */
4342 pfm_copy_pmds(task, ctx);
4343 pfm_copy_pmcs(task, ctx);
4344
4345 pmcs_source = thread->pmcs;
4346 pmds_source = thread->pmds;
4347
4348 /*
4349 * always the case for system-wide
4350 */
4351 if (task == current) {
4352
4353 if (is_system == 0) {
4354
4355 /* allow user level control */
4356 ia64_psr(regs)->sp = 0;
4357 DPRINT(("clearing psr.sp for [%d]\n", task->pid));
4358
4359 SET_LAST_CPU(ctx, smp_processor_id());
4360 INC_ACTIVATION();
4361 SET_ACTIVATION(ctx);
4362#ifndef CONFIG_SMP
4363 /*
4364 * push the other task out, if any
4365 */
4366 owner_task = GET_PMU_OWNER();
4367 if (owner_task) pfm_lazy_save_regs(owner_task);
4368#endif
4369 }
4370 /*
4371 * load all PMD from ctx to PMU (as opposed to thread state)
4372 * restore all PMC from ctx to PMU
4373 */
4374 pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
4375 pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
4376
4377 ctx->ctx_reload_pmcs[0] = 0UL;
4378 ctx->ctx_reload_pmds[0] = 0UL;
4379
4380 /*
4381 * guaranteed safe by earlier check against DBG_VALID
4382 */
4383 if (ctx->ctx_fl_using_dbreg) {
4384 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
4385 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
4386 }
4387 /*
4388 * set new ownership
4389 */
4390 SET_PMU_OWNER(task, ctx);
4391
4392 DPRINT(("context loaded on PMU for [%d]\n", task->pid));
4393 } else {
4394 /*
4395 * when not current, task MUST be stopped, so this is safe
4396 */
4397 regs = ia64_task_regs(task);
4398
4399 /* force a full reload */
4400 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4401 SET_LAST_CPU(ctx, -1);
4402
4403 /* initial saved psr (stopped) */
4404 ctx->ctx_saved_psr_up = 0UL;
4405 ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
4406 }
4407
4408 ret = 0;
4409
4410error_unres:
4411 if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
4412error:
4413 /*
4414 * we must undo the dbregs setting (for system-wide)
4415 */
4416 if (ret && set_dbregs) {
4417 LOCK_PFS(flags);
4418 pfm_sessions.pfs_sys_use_dbregs--;
4419 UNLOCK_PFS(flags);
4420 }
4421 /*
4422 * release task, there is now a link with the context
4423 */
4424 if (is_system == 0 && task != current) {
4425 pfm_put_task(task);
4426
4427 if (ret == 0) {
4428 ret = pfm_check_task_exist(ctx);
4429 if (ret) {
4430 ctx->ctx_state = PFM_CTX_UNLOADED;
4431 ctx->ctx_task = NULL;
4432 }
4433 }
4434 }
4435 return ret;
4436}
4437
4438/*
4439 * in this function, we do not need to increase the use count
4440 * for the task via get_task_struct(), because we hold the
4441 * context lock. If the task were to disappear while having
4442 * a context attached, it would go through pfm_exit_thread()
4443 * which also grabs the context lock and would therefore be blocked
4444 * until we are here.
4445 */
4446static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
4447
4448static int
4449pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4450{
4451 struct task_struct *task = PFM_CTX_TASK(ctx);
4452 struct pt_regs *tregs;
4453 int prev_state, is_system;
4454 int ret;
4455
4456 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1));
4457
4458 prev_state = ctx->ctx_state;
4459 is_system = ctx->ctx_fl_system;
4460
4461 /*
4462 * unload only when necessary
4463 */
4464 if (prev_state == PFM_CTX_UNLOADED) {
4465 DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
4466 return 0;
4467 }
4468
4469 /*
4470 * clear psr and dcr bits
4471 */
4472 ret = pfm_stop(ctx, NULL, 0, regs);
4473 if (ret) return ret;
4474
4475 ctx->ctx_state = PFM_CTX_UNLOADED;
4476
4477 /*
4478 * in system mode, we need to update the PMU directly
4479 * and the user level state of the caller, which may not
4480 * necessarily be the creator of the context.
4481 */
4482 if (is_system) {
4483
4484 /*
4485 * Update cpuinfo
4486 *
4487 * local PMU is taken care of in pfm_stop()
4488 */
4489 PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
4490 PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
4491
4492 /*
4493 * save PMDs in context
4494 * release ownership
4495 */
4496 pfm_flush_pmds(current, ctx);
4497
4498 /*
4499 * at this point we are done with the PMU
4500 * so we can unreserve the resource.
4501 */
4502 if (prev_state != PFM_CTX_ZOMBIE)
4503 pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
4504
4505 /*
4506 * disconnect context from task
4507 */
4508 task->thread.pfm_context = NULL;
4509 /*
4510 * disconnect task from context
4511 */
4512 ctx->ctx_task = NULL;
4513
4514 /*
4515 * There is nothing more to cleanup here.
4516 */
4517 return 0;
4518 }
4519
4520 /*
4521 * per-task mode
4522 */
4523 tregs = task == current ? regs : ia64_task_regs(task);
4524
4525 if (task == current) {
4526 /*
4527 * cancel user level control
4528 */
4529 ia64_psr(regs)->sp = 1;
4530
4531 DPRINT(("setting psr.sp for [%d]\n", task->pid));
4532 }
4533 /*
4534 * save PMDs to context
4535 * release ownership
4536 */
4537 pfm_flush_pmds(task, ctx);
4538
4539 /*
4540 * at this point we are done with the PMU
4541 * so we can unreserve the resource.
4542 *
4543 * when state was ZOMBIE, we have already unreserved.
4544 */
4545 if (prev_state != PFM_CTX_ZOMBIE)
4546 pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
4547
4548 /*
4549 * reset activation counter and psr
4550 */
4551 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4552 SET_LAST_CPU(ctx, -1);
4553
4554 /*
4555 * PMU state will not be restored
4556 */
4557 task->thread.flags &= ~IA64_THREAD_PM_VALID;
4558
4559 /*
4560 * break links between context and task
4561 */
4562 task->thread.pfm_context = NULL;
4563 ctx->ctx_task = NULL;
4564
4565 PFM_SET_WORK_PENDING(task, 0);
4566
4567 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4568 ctx->ctx_fl_can_restart = 0;
4569 ctx->ctx_fl_going_zombie = 0;
4570
4571 DPRINT(("disconnected [%d] from context\n", task->pid));
4572
4573 return 0;
4574}
4575
4576
4577/*
4578 * called only from exit_thread(): task == current
4579 * we come here only if current has a context attached (loaded or masked)
4580 */
4581void
4582pfm_exit_thread(struct task_struct *task)
4583{
4584 pfm_context_t *ctx;
4585 unsigned long flags;
4586 struct pt_regs *regs = ia64_task_regs(task);
4587 int ret, state;
4588 int free_ok = 0;
4589
4590 ctx = PFM_GET_CTX(task);
4591
4592 PROTECT_CTX(ctx, flags);
4593
4594 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task->pid));
4595
4596 state = ctx->ctx_state;
4597 switch(state) {
4598 case PFM_CTX_UNLOADED:
4599 /*
4600 * only comes to thios function if pfm_context is not NULL, i.e., cannot
4601 * be in unloaded state
4602 */
4603 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid);
4604 break;
4605 case PFM_CTX_LOADED:
4606 case PFM_CTX_MASKED:
4607 ret = pfm_context_unload(ctx, NULL, 0, regs);
4608 if (ret) {
4609 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret);
4610 }
4611 DPRINT(("ctx unloaded for current state was %d\n", state));
4612
4613 pfm_end_notify_user(ctx);
4614 break;
4615 case PFM_CTX_ZOMBIE:
4616 ret = pfm_context_unload(ctx, NULL, 0, regs);
4617 if (ret) {
4618 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret);
4619 }
4620 free_ok = 1;
4621 break;
4622 default:
4623 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task->pid, state);
4624 break;
4625 }
4626 UNPROTECT_CTX(ctx, flags);
4627
4628 { u64 psr = pfm_get_psr();
4629 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
4630 BUG_ON(GET_PMU_OWNER());
4631 BUG_ON(ia64_psr(regs)->up);
4632 BUG_ON(ia64_psr(regs)->pp);
4633 }
4634
4635 /*
4636 * All memory free operations (especially for vmalloc'ed memory)
4637 * MUST be done with interrupts ENABLED.
4638 */
4639 if (free_ok) pfm_context_free(ctx);
4640}
4641
4642/*
4643 * functions MUST be listed in the increasing order of their index (see permfon.h)
4644 */
4645#define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
4646#define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
4647#define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
4648#define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
4649#define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
4650
4651static pfm_cmd_desc_t pfm_cmd_tab[]={
4652/* 0 */PFM_CMD_NONE,
4653/* 1 */PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4654/* 2 */PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4655/* 3 */PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4656/* 4 */PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
4657/* 5 */PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
4658/* 6 */PFM_CMD_NONE,
4659/* 7 */PFM_CMD_NONE,
4660/* 8 */PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
4661/* 9 */PFM_CMD_NONE,
4662/* 10 */PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
4663/* 11 */PFM_CMD_NONE,
4664/* 12 */PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
4665/* 13 */PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
4666/* 14 */PFM_CMD_NONE,
4667/* 15 */PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4668/* 16 */PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
4669/* 17 */PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
4670/* 18 */PFM_CMD_NONE,
4671/* 19 */PFM_CMD_NONE,
4672/* 20 */PFM_CMD_NONE,
4673/* 21 */PFM_CMD_NONE,
4674/* 22 */PFM_CMD_NONE,
4675/* 23 */PFM_CMD_NONE,
4676/* 24 */PFM_CMD_NONE,
4677/* 25 */PFM_CMD_NONE,
4678/* 26 */PFM_CMD_NONE,
4679/* 27 */PFM_CMD_NONE,
4680/* 28 */PFM_CMD_NONE,
4681/* 29 */PFM_CMD_NONE,
4682/* 30 */PFM_CMD_NONE,
4683/* 31 */PFM_CMD_NONE,
4684/* 32 */PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
4685/* 33 */PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
4686};
4687#define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
4688
4689static int
4690pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
4691{
4692 struct task_struct *task;
4693 int state, old_state;
4694
4695recheck:
4696 state = ctx->ctx_state;
4697 task = ctx->ctx_task;
4698
4699 if (task == NULL) {
4700 DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
4701 return 0;
4702 }
4703
4704 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4705 ctx->ctx_fd,
4706 state,
4707 task->pid,
4708 task->state, PFM_CMD_STOPPED(cmd)));
4709
4710 /*
4711 * self-monitoring always ok.
4712 *
4713 * for system-wide the caller can either be the creator of the
4714 * context (to one to which the context is attached to) OR
4715 * a task running on the same CPU as the session.
4716 */
4717 if (task == current || ctx->ctx_fl_system) return 0;
4718
4719 /*
a5a70b75 4720 * we are monitoring another thread
1da177e4 4721 */
a5a70b75 4722 switch(state) {
4723 case PFM_CTX_UNLOADED:
4724 /*
4725 * if context is UNLOADED we are safe to go
4726 */
4727 return 0;
4728 case PFM_CTX_ZOMBIE:
4729 /*
4730 * no command can operate on a zombie context
4731 */
4732 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
4733 return -EINVAL;
4734 case PFM_CTX_MASKED:
4735 /*
4736 * PMU state has been saved to software even though
4737 * the thread may still be running.
4738 */
4739 if (cmd != PFM_UNLOAD_CONTEXT) return 0;
1da177e4
LT
4740 }
4741
4742 /*
4743 * context is LOADED or MASKED. Some commands may need to have
4744 * the task stopped.
4745 *
4746 * We could lift this restriction for UP but it would mean that
4747 * the user has no guarantee the task would not run between
4748 * two successive calls to perfmonctl(). That's probably OK.
4749 * If this user wants to ensure the task does not run, then
4750 * the task must be stopped.
4751 */
4752 if (PFM_CMD_STOPPED(cmd)) {
4753 if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
4754 DPRINT(("[%d] task not in stopped state\n", task->pid));
4755 return -EBUSY;
4756 }
4757 /*
4758 * task is now stopped, wait for ctxsw out
4759 *
4760 * This is an interesting point in the code.
4761 * We need to unprotect the context because
4762 * the pfm_save_regs() routines needs to grab
4763 * the same lock. There are danger in doing
4764 * this because it leaves a window open for
4765 * another task to get access to the context
4766 * and possibly change its state. The one thing
4767 * that is not possible is for the context to disappear
4768 * because we are protected by the VFS layer, i.e.,
4769 * get_fd()/put_fd().
4770 */
4771 old_state = state;
4772
4773 UNPROTECT_CTX(ctx, flags);
4774
4775 wait_task_inactive(task);
4776
4777 PROTECT_CTX(ctx, flags);
4778
4779 /*
4780 * we must recheck to verify if state has changed
4781 */
4782 if (ctx->ctx_state != old_state) {
4783 DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
4784 goto recheck;
4785 }
4786 }
4787 return 0;
4788}
4789
4790/*
4791 * system-call entry point (must return long)
4792 */
4793asmlinkage long
4794sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
4795{
4796 struct file *file = NULL;
4797 pfm_context_t *ctx = NULL;
4798 unsigned long flags = 0UL;
4799 void *args_k = NULL;
4800 long ret; /* will expand int return types */
4801 size_t base_sz, sz, xtra_sz = 0;
4802 int narg, completed_args = 0, call_made = 0, cmd_flags;
4803 int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
4804 int (*getsize)(void *arg, size_t *sz);
4805#define PFM_MAX_ARGSIZE 4096
4806
4807 /*
4808 * reject any call if perfmon was disabled at initialization
4809 */
4810 if (unlikely(pmu_conf == NULL)) return -ENOSYS;
4811
4812 if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
4813 DPRINT(("invalid cmd=%d\n", cmd));
4814 return -EINVAL;
4815 }
4816
4817 func = pfm_cmd_tab[cmd].cmd_func;
4818 narg = pfm_cmd_tab[cmd].cmd_narg;
4819 base_sz = pfm_cmd_tab[cmd].cmd_argsize;
4820 getsize = pfm_cmd_tab[cmd].cmd_getsize;
4821 cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
4822
4823 if (unlikely(func == NULL)) {
4824 DPRINT(("invalid cmd=%d\n", cmd));
4825 return -EINVAL;
4826 }
4827
4828 DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
4829 PFM_CMD_NAME(cmd),
4830 cmd,
4831 narg,
4832 base_sz,
4833 count));
4834
4835 /*
4836 * check if number of arguments matches what the command expects
4837 */
4838 if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
4839 return -EINVAL;
4840
4841restart_args:
4842 sz = xtra_sz + base_sz*count;
4843 /*
4844 * limit abuse to min page size
4845 */
4846 if (unlikely(sz > PFM_MAX_ARGSIZE)) {
4847 printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", current->pid, sz);
4848 return -E2BIG;
4849 }
4850
4851 /*
4852 * allocate default-sized argument buffer
4853 */
4854 if (likely(count && args_k == NULL)) {
4855 args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
4856 if (args_k == NULL) return -ENOMEM;
4857 }
4858
4859 ret = -EFAULT;
4860
4861 /*
4862 * copy arguments
4863 *
4864 * assume sz = 0 for command without parameters
4865 */
4866 if (sz && copy_from_user(args_k, arg, sz)) {
4867 DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
4868 goto error_args;
4869 }
4870
4871 /*
4872 * check if command supports extra parameters
4873 */
4874 if (completed_args == 0 && getsize) {
4875 /*
4876 * get extra parameters size (based on main argument)
4877 */
4878 ret = (*getsize)(args_k, &xtra_sz);
4879 if (ret) goto error_args;
4880
4881 completed_args = 1;
4882
4883 DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
4884
4885 /* retry if necessary */
4886 if (likely(xtra_sz)) goto restart_args;
4887 }
4888
4889 if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
4890
4891 ret = -EBADF;
4892
4893 file = fget(fd);
4894 if (unlikely(file == NULL)) {
4895 DPRINT(("invalid fd %d\n", fd));
4896 goto error_args;
4897 }
4898 if (unlikely(PFM_IS_FILE(file) == 0)) {
4899 DPRINT(("fd %d not related to perfmon\n", fd));
4900 goto error_args;
4901 }
4902
4903 ctx = (pfm_context_t *)file->private_data;
4904 if (unlikely(ctx == NULL)) {
4905 DPRINT(("no context for fd %d\n", fd));
4906 goto error_args;
4907 }
4908 prefetch(&ctx->ctx_state);
4909
4910 PROTECT_CTX(ctx, flags);
4911
4912 /*
4913 * check task is stopped
4914 */
4915 ret = pfm_check_task_state(ctx, cmd, flags);
4916 if (unlikely(ret)) goto abort_locked;
4917
4918skip_fd:
4919 ret = (*func)(ctx, args_k, count, ia64_task_regs(current));
4920
4921 call_made = 1;
4922
4923abort_locked:
4924 if (likely(ctx)) {
4925 DPRINT(("context unlocked\n"));
4926 UNPROTECT_CTX(ctx, flags);
4927 fput(file);
4928 }
4929
4930 /* copy argument back to user, if needed */
4931 if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
4932
4933error_args:
4934 if (args_k) kfree(args_k);
4935
4936 DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
4937
4938 return ret;
4939}
4940
4941static void
4942pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
4943{
4944 pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
4945 pfm_ovfl_ctrl_t rst_ctrl;
4946 int state;
4947 int ret = 0;
4948
4949 state = ctx->ctx_state;
4950 /*
4951 * Unlock sampling buffer and reset index atomically
4952 * XXX: not really needed when blocking
4953 */
4954 if (CTX_HAS_SMPL(ctx)) {
4955
4956 rst_ctrl.bits.mask_monitoring = 0;
4957 rst_ctrl.bits.reset_ovfl_pmds = 0;
4958
4959 if (state == PFM_CTX_LOADED)
4960 ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4961 else
4962 ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4963 } else {
4964 rst_ctrl.bits.mask_monitoring = 0;
4965 rst_ctrl.bits.reset_ovfl_pmds = 1;
4966 }
4967
4968 if (ret == 0) {
4969 if (rst_ctrl.bits.reset_ovfl_pmds) {
4970 pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
4971 }
4972 if (rst_ctrl.bits.mask_monitoring == 0) {
4973 DPRINT(("resuming monitoring\n"));
4974 if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
4975 } else {
4976 DPRINT(("stopping monitoring\n"));
4977 //pfm_stop_monitoring(current, regs);
4978 }
4979 ctx->ctx_state = PFM_CTX_LOADED;
4980 }
4981}
4982
4983/*
4984 * context MUST BE LOCKED when calling
4985 * can only be called for current
4986 */
4987static void
4988pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
4989{
4990 int ret;
4991
4992 DPRINT(("entering for [%d]\n", current->pid));
4993
4994 ret = pfm_context_unload(ctx, NULL, 0, regs);
4995 if (ret) {
4996 printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", current->pid, ret);
4997 }
4998
4999 /*
5000 * and wakeup controlling task, indicating we are now disconnected
5001 */
5002 wake_up_interruptible(&ctx->ctx_zombieq);
5003
5004 /*
5005 * given that context is still locked, the controlling
5006 * task will only get access when we return from
5007 * pfm_handle_work().
5008 */
5009}
5010
5011static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
4944930a
SE
5012 /*
5013 * pfm_handle_work() can be called with interrupts enabled
5014 * (TIF_NEED_RESCHED) or disabled. The down_interruptible
5015 * call may sleep, therefore we must re-enable interrupts
5016 * to avoid deadlocks. It is safe to do so because this function
5017 * is called ONLY when returning to user level (PUStk=1), in which case
5018 * there is no risk of kernel stack overflow due to deep
5019 * interrupt nesting.
5020 */
1da177e4
LT
5021void
5022pfm_handle_work(void)
5023{
5024 pfm_context_t *ctx;
5025 struct pt_regs *regs;
4944930a 5026 unsigned long flags, dummy_flags;
1da177e4
LT
5027 unsigned long ovfl_regs;
5028 unsigned int reason;
5029 int ret;
5030
5031 ctx = PFM_GET_CTX(current);
5032 if (ctx == NULL) {
5033 printk(KERN_ERR "perfmon: [%d] has no PFM context\n", current->pid);
5034 return;
5035 }
5036
5037 PROTECT_CTX(ctx, flags);
5038
5039 PFM_SET_WORK_PENDING(current, 0);
5040
5041 pfm_clear_task_notify();
5042
5043 regs = ia64_task_regs(current);
5044
5045 /*
5046 * extract reason for being here and clear
5047 */
5048 reason = ctx->ctx_fl_trap_reason;
5049 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
5050 ovfl_regs = ctx->ctx_ovfl_regs[0];
5051
5052 DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
5053
5054 /*
5055 * must be done before we check for simple-reset mode
5056 */
5057 if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) goto do_zombie;
5058
5059
5060 //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
5061 if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
5062
4944930a
SE
5063 /*
5064 * restore interrupt mask to what it was on entry.
5065 * Could be enabled/diasbled.
5066 */
1da177e4
LT
5067 UNPROTECT_CTX(ctx, flags);
5068
4944930a
SE
5069 /*
5070 * force interrupt enable because of down_interruptible()
5071 */
1da177e4
LT
5072 local_irq_enable();
5073
5074 DPRINT(("before block sleeping\n"));
5075
5076 /*
5077 * may go through without blocking on SMP systems
5078 * if restart has been received already by the time we call down()
5079 */
5080 ret = down_interruptible(&ctx->ctx_restart_sem);
5081
5082 DPRINT(("after block sleeping ret=%d\n", ret));
5083
5084 /*
4944930a
SE
5085 * lock context and mask interrupts again
5086 * We save flags into a dummy because we may have
5087 * altered interrupts mask compared to entry in this
5088 * function.
1da177e4 5089 */
4944930a 5090 PROTECT_CTX(ctx, dummy_flags);
1da177e4
LT
5091
5092 /*
5093 * we need to read the ovfl_regs only after wake-up
5094 * because we may have had pfm_write_pmds() in between
5095 * and that can changed PMD values and therefore
5096 * ovfl_regs is reset for these new PMD values.
5097 */
5098 ovfl_regs = ctx->ctx_ovfl_regs[0];
5099
5100 if (ctx->ctx_fl_going_zombie) {
5101do_zombie:
5102 DPRINT(("context is zombie, bailing out\n"));
5103 pfm_context_force_terminate(ctx, regs);
5104 goto nothing_to_do;
5105 }
5106 /*
5107 * in case of interruption of down() we don't restart anything
5108 */
5109 if (ret < 0) goto nothing_to_do;
5110
5111skip_blocking:
5112 pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
5113 ctx->ctx_ovfl_regs[0] = 0UL;
5114
5115nothing_to_do:
4944930a
SE
5116 /*
5117 * restore flags as they were upon entry
5118 */
1da177e4
LT
5119 UNPROTECT_CTX(ctx, flags);
5120}
5121
5122static int
5123pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
5124{
5125 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5126 DPRINT(("ignoring overflow notification, owner is zombie\n"));
5127 return 0;
5128 }
5129
5130 DPRINT(("waking up somebody\n"));
5131
5132 if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
5133
5134 /*
5135 * safe, we are not in intr handler, nor in ctxsw when
5136 * we come here
5137 */
5138 kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
5139
5140 return 0;
5141}
5142
5143static int
5144pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
5145{
5146 pfm_msg_t *msg = NULL;
5147
5148 if (ctx->ctx_fl_no_msg == 0) {
5149 msg = pfm_get_new_msg(ctx);
5150 if (msg == NULL) {
5151 printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
5152 return -1;
5153 }
5154
5155 msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
5156 msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
5157 msg->pfm_ovfl_msg.msg_active_set = 0;
5158 msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
5159 msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
5160 msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
5161 msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
5162 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5163 }
5164
5165 DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
5166 msg,
5167 ctx->ctx_fl_no_msg,
5168 ctx->ctx_fd,
5169 ovfl_pmds));
5170
5171 return pfm_notify_user(ctx, msg);
5172}
5173
5174static int
5175pfm_end_notify_user(pfm_context_t *ctx)
5176{
5177 pfm_msg_t *msg;
5178
5179 msg = pfm_get_new_msg(ctx);
5180 if (msg == NULL) {
5181 printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
5182 return -1;
5183 }
5184 /* no leak */
5185 memset(msg, 0, sizeof(*msg));
5186
5187 msg->pfm_end_msg.msg_type = PFM_MSG_END;
5188 msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
5189 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5190
5191 DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
5192 msg,
5193 ctx->ctx_fl_no_msg,
5194 ctx->ctx_fd));
5195
5196 return pfm_notify_user(ctx, msg);
5197}
5198
5199/*
5200 * main overflow processing routine.
5201 * it can be called from the interrupt path or explicitely during the context switch code
5202 */
5203static void
5204pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
5205{
5206 pfm_ovfl_arg_t *ovfl_arg;
5207 unsigned long mask;
5208 unsigned long old_val, ovfl_val, new_val;
5209 unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
5210 unsigned long tstamp;
5211 pfm_ovfl_ctrl_t ovfl_ctrl;
5212 unsigned int i, has_smpl;
5213 int must_notify = 0;
5214
5215 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
5216
5217 /*
5218 * sanity test. Should never happen
5219 */
5220 if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
5221
5222 tstamp = ia64_get_itc();
5223 mask = pmc0 >> PMU_FIRST_COUNTER;
5224 ovfl_val = pmu_conf->ovfl_val;
5225 has_smpl = CTX_HAS_SMPL(ctx);
5226
5227 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5228 "used_pmds=0x%lx\n",
5229 pmc0,
5230 task ? task->pid: -1,
5231 (regs ? regs->cr_iip : 0),
5232 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
5233 ctx->ctx_used_pmds[0]));
5234
5235
5236 /*
5237 * first we update the virtual counters
5238 * assume there was a prior ia64_srlz_d() issued
5239 */
5240 for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
5241
5242 /* skip pmd which did not overflow */
5243 if ((mask & 0x1) == 0) continue;
5244
5245 /*
5246 * Note that the pmd is not necessarily 0 at this point as qualified events
5247 * may have happened before the PMU was frozen. The residual count is not
5248 * taken into consideration here but will be with any read of the pmd via
5249 * pfm_read_pmds().
5250 */
5251 old_val = new_val = ctx->ctx_pmds[i].val;
5252 new_val += 1 + ovfl_val;
5253 ctx->ctx_pmds[i].val = new_val;
5254
5255 /*
5256 * check for overflow condition
5257 */
5258 if (likely(old_val > new_val)) {
5259 ovfl_pmds |= 1UL << i;
5260 if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
5261 }
5262
5263 DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
5264 i,
5265 new_val,
5266 old_val,
5267 ia64_get_pmd(i) & ovfl_val,
5268 ovfl_pmds,
5269 ovfl_notify));
5270 }
5271
5272 /*
5273 * there was no 64-bit overflow, nothing else to do
5274 */
5275 if (ovfl_pmds == 0UL) return;
5276
5277 /*
5278 * reset all control bits
5279 */
5280 ovfl_ctrl.val = 0;
5281 reset_pmds = 0UL;
5282
5283 /*
5284 * if a sampling format module exists, then we "cache" the overflow by
5285 * calling the module's handler() routine.
5286 */
5287 if (has_smpl) {
5288 unsigned long start_cycles, end_cycles;
5289 unsigned long pmd_mask;
5290 int j, k, ret = 0;
5291 int this_cpu = smp_processor_id();
5292
5293 pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
5294 ovfl_arg = &ctx->ctx_ovfl_arg;
5295
5296 prefetch(ctx->ctx_smpl_hdr);
5297
5298 for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
5299
5300 mask = 1UL << i;
5301
5302 if ((pmd_mask & 0x1) == 0) continue;
5303
5304 ovfl_arg->ovfl_pmd = (unsigned char )i;
5305 ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
5306 ovfl_arg->active_set = 0;
5307 ovfl_arg->ovfl_ctrl.val = 0; /* module must fill in all fields */
5308 ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
5309
5310 ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
5311 ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
5312 ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
5313
5314 /*
5315 * copy values of pmds of interest. Sampling format may copy them
5316 * into sampling buffer.
5317 */
5318 if (smpl_pmds) {
5319 for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
5320 if ((smpl_pmds & 0x1) == 0) continue;
5321 ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
5322 DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
5323 }
5324 }
5325
5326 pfm_stats[this_cpu].pfm_smpl_handler_calls++;
5327
5328 start_cycles = ia64_get_itc();
5329
5330 /*
5331 * call custom buffer format record (handler) routine
5332 */
5333 ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
5334
5335 end_cycles = ia64_get_itc();
5336
5337 /*
5338 * For those controls, we take the union because they have
5339 * an all or nothing behavior.
5340 */
5341 ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
5342 ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
5343 ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
5344 /*
5345 * build the bitmask of pmds to reset now
5346 */
5347 if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
5348
5349 pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
5350 }
5351 /*
5352 * when the module cannot handle the rest of the overflows, we abort right here
5353 */
5354 if (ret && pmd_mask) {
5355 DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
5356 pmd_mask<<PMU_FIRST_COUNTER));
5357 }
5358 /*
5359 * remove the pmds we reset now from the set of pmds to reset in pfm_restart()
5360 */
5361 ovfl_pmds &= ~reset_pmds;
5362 } else {
5363 /*
5364 * when no sampling module is used, then the default
5365 * is to notify on overflow if requested by user
5366 */
5367 ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
5368 ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
5369 ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */
5370 ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
5371 /*
5372 * if needed, we reset all overflowed pmds
5373 */
5374 if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
5375 }
5376
5377 DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
5378
5379 /*
5380 * reset the requested PMD registers using the short reset values
5381 */
5382 if (reset_pmds) {
5383 unsigned long bm = reset_pmds;
5384 pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
5385 }
5386
5387 if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
5388 /*
5389 * keep track of what to reset when unblocking
5390 */
5391 ctx->ctx_ovfl_regs[0] = ovfl_pmds;
5392
5393 /*
5394 * check for blocking context
5395 */
5396 if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
5397
5398 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
5399
5400 /*
5401 * set the perfmon specific checking pending work for the task
5402 */
5403 PFM_SET_WORK_PENDING(task, 1);
5404
5405 /*
5406 * when coming from ctxsw, current still points to the
5407 * previous task, therefore we must work with task and not current.
5408 */
5409 pfm_set_task_notify(task);
5410 }
5411 /*
5412 * defer until state is changed (shorten spin window). the context is locked
5413 * anyway, so the signal receiver would come spin for nothing.
5414 */
5415 must_notify = 1;
5416 }
5417
5418 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
5419 GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1,
5420 PFM_GET_WORK_PENDING(task),
5421 ctx->ctx_fl_trap_reason,
5422 ovfl_pmds,
5423 ovfl_notify,
5424 ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
5425 /*
5426 * in case monitoring must be stopped, we toggle the psr bits
5427 */
5428 if (ovfl_ctrl.bits.mask_monitoring) {
5429 pfm_mask_monitoring(task);
5430 ctx->ctx_state = PFM_CTX_MASKED;
5431 ctx->ctx_fl_can_restart = 1;
5432 }
5433
5434 /*
5435 * send notification now
5436 */
5437 if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
5438
5439 return;
5440
5441sanity_check:
5442 printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5443 smp_processor_id(),
5444 task ? task->pid : -1,
5445 pmc0);
5446 return;
5447
5448stop_monitoring:
5449 /*
5450 * in SMP, zombie context is never restored but reclaimed in pfm_load_regs().
5451 * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can
5452 * come here as zombie only if the task is the current task. In which case, we
5453 * can access the PMU hardware directly.
5454 *
5455 * Note that zombies do have PM_VALID set. So here we do the minimal.
5456 *
5457 * In case the context was zombified it could not be reclaimed at the time
5458 * the monitoring program exited. At this point, the PMU reservation has been
5459 * returned, the sampiing buffer has been freed. We must convert this call
5460 * into a spurious interrupt. However, we must also avoid infinite overflows
5461 * by stopping monitoring for this task. We can only come here for a per-task
5462 * context. All we need to do is to stop monitoring using the psr bits which
5463 * are always task private. By re-enabling secure montioring, we ensure that
5464 * the monitored task will not be able to re-activate monitoring.
5465 * The task will eventually be context switched out, at which point the context
5466 * will be reclaimed (that includes releasing ownership of the PMU).
5467 *
5468 * So there might be a window of time where the number of per-task session is zero
5469 * yet one PMU might have a owner and get at most one overflow interrupt for a zombie
5470 * context. This is safe because if a per-task session comes in, it will push this one
5471 * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide
5472 * session is force on that CPU, given that we use task pinning, pfm_save_regs() will
5473 * also push our zombie context out.
5474 *
5475 * Overall pretty hairy stuff....
5476 */
5477 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task->pid: -1));
5478 pfm_clear_psr_up();
5479 ia64_psr(regs)->up = 0;
5480 ia64_psr(regs)->sp = 1;
5481 return;
5482}
5483
5484static int
5485pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
5486{
5487 struct task_struct *task;
5488 pfm_context_t *ctx;
5489 unsigned long flags;
5490 u64 pmc0;
5491 int this_cpu = smp_processor_id();
5492 int retval = 0;
5493
5494 pfm_stats[this_cpu].pfm_ovfl_intr_count++;
5495
5496 /*
5497 * srlz.d done before arriving here
5498 */
5499 pmc0 = ia64_get_pmc(0);
5500
5501 task = GET_PMU_OWNER();
5502 ctx = GET_PMU_CTX();
5503
5504 /*
5505 * if we have some pending bits set
5506 * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
5507 */
5508 if (PMC0_HAS_OVFL(pmc0) && task) {
5509 /*
5510 * we assume that pmc0.fr is always set here
5511 */
5512
5513 /* sanity check */
5514 if (!ctx) goto report_spurious1;
5515
5516 if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
5517 goto report_spurious2;
5518
5519 PROTECT_CTX_NOPRINT(ctx, flags);
5520
5521 pfm_overflow_handler(task, ctx, pmc0, regs);
5522
5523 UNPROTECT_CTX_NOPRINT(ctx, flags);
5524
5525 } else {
5526 pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
5527 retval = -1;
5528 }
5529 /*
5530 * keep it unfrozen at all times
5531 */
5532 pfm_unfreeze_pmu();
5533
5534 return retval;
5535
5536report_spurious1:
5537 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
5538 this_cpu, task->pid);
5539 pfm_unfreeze_pmu();
5540 return -1;
5541report_spurious2:
5542 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5543 this_cpu,
5544 task->pid);
5545 pfm_unfreeze_pmu();
5546 return -1;
5547}
5548
5549static irqreturn_t
5550pfm_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
5551{
5552 unsigned long start_cycles, total_cycles;
5553 unsigned long min, max;
5554 int this_cpu;
5555 int ret;
5556
5557 this_cpu = get_cpu();
5558 min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
5559 max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
5560
5561 start_cycles = ia64_get_itc();
5562
5563 ret = pfm_do_interrupt_handler(irq, arg, regs);
5564
5565 total_cycles = ia64_get_itc();
5566
5567 /*
5568 * don't measure spurious interrupts
5569 */
5570 if (likely(ret == 0)) {
5571 total_cycles -= start_cycles;
5572
5573 if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
5574 if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
5575
5576 pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
5577 }
5578 put_cpu_no_resched();
5579 return IRQ_HANDLED;
5580}
5581
5582/*
5583 * /proc/perfmon interface, for debug only
5584 */
5585
5586#define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1)
5587
5588static void *
5589pfm_proc_start(struct seq_file *m, loff_t *pos)
5590{
5591 if (*pos == 0) {
5592 return PFM_PROC_SHOW_HEADER;
5593 }
5594
5595 while (*pos <= NR_CPUS) {
5596 if (cpu_online(*pos - 1)) {
5597 return (void *)*pos;
5598 }
5599 ++*pos;
5600 }
5601 return NULL;
5602}
5603
5604static void *
5605pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
5606{
5607 ++*pos;
5608 return pfm_proc_start(m, pos);
5609}
5610
5611static void
5612pfm_proc_stop(struct seq_file *m, void *v)
5613{
5614}
5615
5616static void
5617pfm_proc_show_header(struct seq_file *m)
5618{
5619 struct list_head * pos;
5620 pfm_buffer_fmt_t * entry;
5621 unsigned long flags;
5622
5623 seq_printf(m,
5624 "perfmon version : %u.%u\n"
5625 "model : %s\n"
5626 "fastctxsw : %s\n"
5627 "expert mode : %s\n"
5628 "ovfl_mask : 0x%lx\n"
5629 "PMU flags : 0x%x\n",
5630 PFM_VERSION_MAJ, PFM_VERSION_MIN,
5631 pmu_conf->pmu_name,
5632 pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
5633 pfm_sysctl.expert_mode > 0 ? "Yes": "No",
5634 pmu_conf->ovfl_val,
5635 pmu_conf->flags);
5636
5637 LOCK_PFS(flags);
5638
5639 seq_printf(m,
5640 "proc_sessions : %u\n"
5641 "sys_sessions : %u\n"
5642 "sys_use_dbregs : %u\n"
5643 "ptrace_use_dbregs : %u\n",
5644 pfm_sessions.pfs_task_sessions,
5645 pfm_sessions.pfs_sys_sessions,
5646 pfm_sessions.pfs_sys_use_dbregs,
5647 pfm_sessions.pfs_ptrace_use_dbregs);
5648
5649 UNLOCK_PFS(flags);
5650
5651 spin_lock(&pfm_buffer_fmt_lock);
5652
5653 list_for_each(pos, &pfm_buffer_fmt_list) {
5654 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
5655 seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
5656 entry->fmt_uuid[0],
5657 entry->fmt_uuid[1],
5658 entry->fmt_uuid[2],
5659 entry->fmt_uuid[3],
5660 entry->fmt_uuid[4],
5661 entry->fmt_uuid[5],
5662 entry->fmt_uuid[6],
5663 entry->fmt_uuid[7],
5664 entry->fmt_uuid[8],
5665 entry->fmt_uuid[9],
5666 entry->fmt_uuid[10],
5667 entry->fmt_uuid[11],
5668 entry->fmt_uuid[12],
5669 entry->fmt_uuid[13],
5670 entry->fmt_uuid[14],
5671 entry->fmt_uuid[15],
5672 entry->fmt_name);
5673 }
5674 spin_unlock(&pfm_buffer_fmt_lock);
5675
5676}
5677
5678static int
5679pfm_proc_show(struct seq_file *m, void *v)
5680{
5681 unsigned long psr;
5682 unsigned int i;
5683 int cpu;
5684
5685 if (v == PFM_PROC_SHOW_HEADER) {
5686 pfm_proc_show_header(m);
5687 return 0;
5688 }
5689
5690 /* show info for CPU (v - 1) */
5691
5692 cpu = (long)v - 1;
5693 seq_printf(m,
5694 "CPU%-2d overflow intrs : %lu\n"
5695 "CPU%-2d overflow cycles : %lu\n"
5696 "CPU%-2d overflow min : %lu\n"
5697 "CPU%-2d overflow max : %lu\n"
5698 "CPU%-2d smpl handler calls : %lu\n"
5699 "CPU%-2d smpl handler cycles : %lu\n"
5700 "CPU%-2d spurious intrs : %lu\n"
5701 "CPU%-2d replay intrs : %lu\n"
5702 "CPU%-2d syst_wide : %d\n"
5703 "CPU%-2d dcr_pp : %d\n"
5704 "CPU%-2d exclude idle : %d\n"
5705 "CPU%-2d owner : %d\n"
5706 "CPU%-2d context : %p\n"
5707 "CPU%-2d activations : %lu\n",
5708 cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
5709 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
5710 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
5711 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
5712 cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
5713 cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
5714 cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
5715 cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
5716 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
5717 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
5718 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
5719 cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
5720 cpu, pfm_get_cpu_data(pmu_ctx, cpu),
5721 cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
5722
5723 if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
5724
5725 psr = pfm_get_psr();
5726
5727 ia64_srlz_d();
5728
5729 seq_printf(m,
5730 "CPU%-2d psr : 0x%lx\n"
5731 "CPU%-2d pmc0 : 0x%lx\n",
5732 cpu, psr,
5733 cpu, ia64_get_pmc(0));
5734
5735 for (i=0; PMC_IS_LAST(i) == 0; i++) {
5736 if (PMC_IS_COUNTING(i) == 0) continue;
5737 seq_printf(m,
5738 "CPU%-2d pmc%u : 0x%lx\n"
5739 "CPU%-2d pmd%u : 0x%lx\n",
5740 cpu, i, ia64_get_pmc(i),
5741 cpu, i, ia64_get_pmd(i));
5742 }
5743 }
5744 return 0;
5745}
5746
5747struct seq_operations pfm_seq_ops = {
5748 .start = pfm_proc_start,
5749 .next = pfm_proc_next,
5750 .stop = pfm_proc_stop,
5751 .show = pfm_proc_show
5752};
5753
5754static int
5755pfm_proc_open(struct inode *inode, struct file *file)
5756{
5757 return seq_open(file, &pfm_seq_ops);
5758}
5759
5760
5761/*
5762 * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens
5763 * during pfm_enable() hence before pfm_start(). We cannot assume monitoring
5764 * is active or inactive based on mode. We must rely on the value in
5765 * local_cpu_data->pfm_syst_info
5766 */
5767void
5768pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
5769{
5770 struct pt_regs *regs;
5771 unsigned long dcr;
5772 unsigned long dcr_pp;
5773
5774 dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
5775
5776 /*
5777 * pid 0 is guaranteed to be the idle task. There is one such task with pid 0
5778 * on every CPU, so we can rely on the pid to identify the idle task.
5779 */
5780 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
5781 regs = ia64_task_regs(task);
5782 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
5783 return;
5784 }
5785 /*
5786 * if monitoring has started
5787 */
5788 if (dcr_pp) {
5789 dcr = ia64_getreg(_IA64_REG_CR_DCR);
5790 /*
5791 * context switching in?
5792 */
5793 if (is_ctxswin) {
5794 /* mask monitoring for the idle task */
5795 ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
5796 pfm_clear_psr_pp();
5797 ia64_srlz_i();
5798 return;
5799 }
5800 /*
5801 * context switching out
5802 * restore monitoring for next task
5803 *
5804 * Due to inlining this odd if-then-else construction generates
5805 * better code.
5806 */
5807 ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
5808 pfm_set_psr_pp();
5809 ia64_srlz_i();
5810 }
5811}
5812
5813#ifdef CONFIG_SMP
5814
5815static void
5816pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
5817{
5818 struct task_struct *task = ctx->ctx_task;
5819
5820 ia64_psr(regs)->up = 0;
5821 ia64_psr(regs)->sp = 1;
5822
5823 if (GET_PMU_OWNER() == task) {
5824 DPRINT(("cleared ownership for [%d]\n", ctx->ctx_task->pid));
5825 SET_PMU_OWNER(NULL, NULL);
5826 }
5827
5828 /*
5829 * disconnect the task from the context and vice-versa
5830 */
5831 PFM_SET_WORK_PENDING(task, 0);
5832
5833 task->thread.pfm_context = NULL;
5834 task->thread.flags &= ~IA64_THREAD_PM_VALID;
5835
5836 DPRINT(("force cleanup for [%d]\n", task->pid));
5837}
5838
5839
5840/*
5841 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
5842 */
5843void
5844pfm_save_regs(struct task_struct *task)
5845{
5846 pfm_context_t *ctx;
5847 struct thread_struct *t;
5848 unsigned long flags;
5849 u64 psr;
5850
5851
5852 ctx = PFM_GET_CTX(task);
5853 if (ctx == NULL) return;
5854 t = &task->thread;
5855
5856 /*
5857 * we always come here with interrupts ALREADY disabled by
5858 * the scheduler. So we simply need to protect against concurrent
5859 * access, not CPU concurrency.
5860 */
5861 flags = pfm_protect_ctx_ctxsw(ctx);
5862
5863 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5864 struct pt_regs *regs = ia64_task_regs(task);
5865
5866 pfm_clear_psr_up();
5867
5868 pfm_force_cleanup(ctx, regs);
5869
5870 BUG_ON(ctx->ctx_smpl_hdr);
5871
5872 pfm_unprotect_ctx_ctxsw(ctx, flags);
5873
5874 pfm_context_free(ctx);
5875 return;
5876 }
5877
5878 /*
5879 * save current PSR: needed because we modify it
5880 */
5881 ia64_srlz_d();
5882 psr = pfm_get_psr();
5883
5884 BUG_ON(psr & (IA64_PSR_I));
5885
5886 /*
5887 * stop monitoring:
5888 * This is the last instruction which may generate an overflow
5889 *
5890 * We do not need to set psr.sp because, it is irrelevant in kernel.
5891 * It will be restored from ipsr when going back to user level
5892 */
5893 pfm_clear_psr_up();
5894
5895 /*
5896 * keep a copy of psr.up (for reload)
5897 */
5898 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5899
5900 /*
5901 * release ownership of this PMU.
5902 * PM interrupts are masked, so nothing
5903 * can happen.
5904 */
5905 SET_PMU_OWNER(NULL, NULL);
5906
5907 /*
5908 * we systematically save the PMD as we have no
5909 * guarantee we will be schedule at that same
5910 * CPU again.
5911 */
5912 pfm_save_pmds(t->pmds, ctx->ctx_used_pmds[0]);
5913
5914 /*
5915 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
5916 * we will need it on the restore path to check
5917 * for pending overflow.
5918 */
5919 t->pmcs[0] = ia64_get_pmc(0);
5920
5921 /*
5922 * unfreeze PMU if had pending overflows
5923 */
5924 if (t->pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5925
5926 /*
5927 * finally, allow context access.
5928 * interrupts will still be masked after this call.
5929 */
5930 pfm_unprotect_ctx_ctxsw(ctx, flags);
5931}
5932
5933#else /* !CONFIG_SMP */
5934void
5935pfm_save_regs(struct task_struct *task)
5936{
5937 pfm_context_t *ctx;
5938 u64 psr;
5939
5940 ctx = PFM_GET_CTX(task);
5941 if (ctx == NULL) return;
5942
5943 /*
5944 * save current PSR: needed because we modify it
5945 */
5946 psr = pfm_get_psr();
5947
5948 BUG_ON(psr & (IA64_PSR_I));
5949
5950 /*
5951 * stop monitoring:
5952 * This is the last instruction which may generate an overflow
5953 *
5954 * We do not need to set psr.sp because, it is irrelevant in kernel.
5955 * It will be restored from ipsr when going back to user level
5956 */
5957 pfm_clear_psr_up();
5958
5959 /*
5960 * keep a copy of psr.up (for reload)
5961 */
5962 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5963}
5964
5965static void
5966pfm_lazy_save_regs (struct task_struct *task)
5967{
5968 pfm_context_t *ctx;
5969 struct thread_struct *t;
5970 unsigned long flags;
5971
5972 { u64 psr = pfm_get_psr();
5973 BUG_ON(psr & IA64_PSR_UP);
5974 }
5975
5976 ctx = PFM_GET_CTX(task);
5977 t = &task->thread;
5978
5979 /*
5980 * we need to mask PMU overflow here to
5981 * make sure that we maintain pmc0 until
5982 * we save it. overflow interrupts are
5983 * treated as spurious if there is no
5984 * owner.
5985 *
5986 * XXX: I don't think this is necessary
5987 */
5988 PROTECT_CTX(ctx,flags);
5989
5990 /*
5991 * release ownership of this PMU.
5992 * must be done before we save the registers.
5993 *
5994 * after this call any PMU interrupt is treated
5995 * as spurious.
5996 */
5997 SET_PMU_OWNER(NULL, NULL);
5998
5999 /*
6000 * save all the pmds we use
6001 */
6002 pfm_save_pmds(t->pmds, ctx->ctx_used_pmds[0]);
6003
6004 /*
6005 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
6006 * it is needed to check for pended overflow
6007 * on the restore path
6008 */
6009 t->pmcs[0] = ia64_get_pmc(0);
6010
6011 /*
6012 * unfreeze PMU if had pending overflows
6013 */
6014 if (t->pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
6015
6016 /*
6017 * now get can unmask PMU interrupts, they will
6018 * be treated as purely spurious and we will not
6019 * lose any information
6020 */
6021 UNPROTECT_CTX(ctx,flags);
6022}
6023#endif /* CONFIG_SMP */
6024
6025#ifdef CONFIG_SMP
6026/*
6027 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
6028 */
6029void
6030pfm_load_regs (struct task_struct *task)
6031{
6032 pfm_context_t *ctx;
6033 struct thread_struct *t;
6034 unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
6035 unsigned long flags;
6036 u64 psr, psr_up;
6037 int need_irq_resend;
6038
6039 ctx = PFM_GET_CTX(task);
6040 if (unlikely(ctx == NULL)) return;
6041
6042 BUG_ON(GET_PMU_OWNER());
6043
6044 t = &task->thread;
6045 /*
6046 * possible on unload
6047 */
6048 if (unlikely((t->flags & IA64_THREAD_PM_VALID) == 0)) return;
6049
6050 /*
6051 * we always come here with interrupts ALREADY disabled by
6052 * the scheduler. So we simply need to protect against concurrent
6053 * access, not CPU concurrency.
6054 */
6055 flags = pfm_protect_ctx_ctxsw(ctx);
6056 psr = pfm_get_psr();
6057
6058 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6059
6060 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6061 BUG_ON(psr & IA64_PSR_I);
6062
6063 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
6064 struct pt_regs *regs = ia64_task_regs(task);
6065
6066 BUG_ON(ctx->ctx_smpl_hdr);
6067
6068 pfm_force_cleanup(ctx, regs);
6069
6070 pfm_unprotect_ctx_ctxsw(ctx, flags);
6071
6072 /*
6073 * this one (kmalloc'ed) is fine with interrupts disabled
6074 */
6075 pfm_context_free(ctx);
6076
6077 return;
6078 }
6079
6080 /*
6081 * we restore ALL the debug registers to avoid picking up
6082 * stale state.
6083 */
6084 if (ctx->ctx_fl_using_dbreg) {
6085 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6086 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6087 }
6088 /*
6089 * retrieve saved psr.up
6090 */
6091 psr_up = ctx->ctx_saved_psr_up;
6092
6093 /*
6094 * if we were the last user of the PMU on that CPU,
6095 * then nothing to do except restore psr
6096 */
6097 if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
6098
6099 /*
6100 * retrieve partial reload masks (due to user modifications)
6101 */
6102 pmc_mask = ctx->ctx_reload_pmcs[0];
6103 pmd_mask = ctx->ctx_reload_pmds[0];
6104
6105 } else {
6106 /*
6107 * To avoid leaking information to the user level when psr.sp=0,
6108 * we must reload ALL implemented pmds (even the ones we don't use).
6109 * In the kernel we only allow PFM_READ_PMDS on registers which
6110 * we initialized or requested (sampling) so there is no risk there.
6111 */
6112 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6113
6114 /*
6115 * ALL accessible PMCs are systematically reloaded, unused registers
6116 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6117 * up stale configuration.
6118 *
6119 * PMC0 is never in the mask. It is always restored separately.
6120 */
6121 pmc_mask = ctx->ctx_all_pmcs[0];
6122 }
6123 /*
6124 * when context is MASKED, we will restore PMC with plm=0
6125 * and PMD with stale information, but that's ok, nothing
6126 * will be captured.
6127 *
6128 * XXX: optimize here
6129 */
6130 if (pmd_mask) pfm_restore_pmds(t->pmds, pmd_mask);
6131 if (pmc_mask) pfm_restore_pmcs(t->pmcs, pmc_mask);
6132
6133 /*
6134 * check for pending overflow at the time the state
6135 * was saved.
6136 */
6137 if (unlikely(PMC0_HAS_OVFL(t->pmcs[0]))) {
6138 /*
6139 * reload pmc0 with the overflow information
6140 * On McKinley PMU, this will trigger a PMU interrupt
6141 */
6142 ia64_set_pmc(0, t->pmcs[0]);
6143 ia64_srlz_d();
6144 t->pmcs[0] = 0UL;
6145
6146 /*
6147 * will replay the PMU interrupt
6148 */
6149 if (need_irq_resend) hw_resend_irq(NULL, IA64_PERFMON_VECTOR);
6150
6151 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6152 }
6153
6154 /*
6155 * we just did a reload, so we reset the partial reload fields
6156 */
6157 ctx->ctx_reload_pmcs[0] = 0UL;
6158 ctx->ctx_reload_pmds[0] = 0UL;
6159
6160 SET_LAST_CPU(ctx, smp_processor_id());
6161
6162 /*
6163 * dump activation value for this PMU
6164 */
6165 INC_ACTIVATION();
6166 /*
6167 * record current activation for this context
6168 */
6169 SET_ACTIVATION(ctx);
6170
6171 /*
6172 * establish new ownership.
6173 */
6174 SET_PMU_OWNER(task, ctx);
6175
6176 /*
6177 * restore the psr.up bit. measurement
6178 * is active again.
6179 * no PMU interrupt can happen at this point
6180 * because we still have interrupts disabled.
6181 */
6182 if (likely(psr_up)) pfm_set_psr_up();
6183
6184 /*
6185 * allow concurrent access to context
6186 */
6187 pfm_unprotect_ctx_ctxsw(ctx, flags);
6188}
6189#else /* !CONFIG_SMP */
6190/*
6191 * reload PMU state for UP kernels
6192 * in 2.5 we come here with interrupts disabled
6193 */
6194void
6195pfm_load_regs (struct task_struct *task)
6196{
6197 struct thread_struct *t;
6198 pfm_context_t *ctx;
6199 struct task_struct *owner;
6200 unsigned long pmd_mask, pmc_mask;
6201 u64 psr, psr_up;
6202 int need_irq_resend;
6203
6204 owner = GET_PMU_OWNER();
6205 ctx = PFM_GET_CTX(task);
6206 t = &task->thread;
6207 psr = pfm_get_psr();
6208
6209 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6210 BUG_ON(psr & IA64_PSR_I);
6211
6212 /*
6213 * we restore ALL the debug registers to avoid picking up
6214 * stale state.
6215 *
6216 * This must be done even when the task is still the owner
6217 * as the registers may have been modified via ptrace()
6218 * (not perfmon) by the previous task.
6219 */
6220 if (ctx->ctx_fl_using_dbreg) {
6221 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6222 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6223 }
6224
6225 /*
6226 * retrieved saved psr.up
6227 */
6228 psr_up = ctx->ctx_saved_psr_up;
6229 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6230
6231 /*
6232 * short path, our state is still there, just
6233 * need to restore psr and we go
6234 *
6235 * we do not touch either PMC nor PMD. the psr is not touched
6236 * by the overflow_handler. So we are safe w.r.t. to interrupt
6237 * concurrency even without interrupt masking.
6238 */
6239 if (likely(owner == task)) {
6240 if (likely(psr_up)) pfm_set_psr_up();
6241 return;
6242 }
6243
6244 /*
6245 * someone else is still using the PMU, first push it out and
6246 * then we'll be able to install our stuff !
6247 *
6248 * Upon return, there will be no owner for the current PMU
6249 */
6250 if (owner) pfm_lazy_save_regs(owner);
6251
6252 /*
6253 * To avoid leaking information to the user level when psr.sp=0,
6254 * we must reload ALL implemented pmds (even the ones we don't use).
6255 * In the kernel we only allow PFM_READ_PMDS on registers which
6256 * we initialized or requested (sampling) so there is no risk there.
6257 */
6258 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6259
6260 /*
6261 * ALL accessible PMCs are systematically reloaded, unused registers
6262 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6263 * up stale configuration.
6264 *
6265 * PMC0 is never in the mask. It is always restored separately
6266 */
6267 pmc_mask = ctx->ctx_all_pmcs[0];
6268
6269 pfm_restore_pmds(t->pmds, pmd_mask);
6270 pfm_restore_pmcs(t->pmcs, pmc_mask);
6271
6272 /*
6273 * check for pending overflow at the time the state
6274 * was saved.
6275 */
6276 if (unlikely(PMC0_HAS_OVFL(t->pmcs[0]))) {
6277 /*
6278 * reload pmc0 with the overflow information
6279 * On McKinley PMU, this will trigger a PMU interrupt
6280 */
6281 ia64_set_pmc(0, t->pmcs[0]);
6282 ia64_srlz_d();
6283
6284 t->pmcs[0] = 0UL;
6285
6286 /*
6287 * will replay the PMU interrupt
6288 */
6289 if (need_irq_resend) hw_resend_irq(NULL, IA64_PERFMON_VECTOR);
6290
6291 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6292 }
6293
6294 /*
6295 * establish new ownership.
6296 */
6297 SET_PMU_OWNER(task, ctx);
6298
6299 /*
6300 * restore the psr.up bit. measurement
6301 * is active again.
6302 * no PMU interrupt can happen at this point
6303 * because we still have interrupts disabled.
6304 */
6305 if (likely(psr_up)) pfm_set_psr_up();
6306}
6307#endif /* CONFIG_SMP */
6308
6309/*
6310 * this function assumes monitoring is stopped
6311 */
6312static void
6313pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6314{
6315 u64 pmc0;
6316 unsigned long mask2, val, pmd_val, ovfl_val;
6317 int i, can_access_pmu = 0;
6318 int is_self;
6319
6320 /*
6321 * is the caller the task being monitored (or which initiated the
6322 * session for system wide measurements)
6323 */
6324 is_self = ctx->ctx_task == task ? 1 : 0;
6325
6326 /*
6327 * can access PMU is task is the owner of the PMU state on the current CPU
6328 * or if we are running on the CPU bound to the context in system-wide mode
6329 * (that is not necessarily the task the context is attached to in this mode).
6330 * In system-wide we always have can_access_pmu true because a task running on an
6331 * invalid processor is flagged earlier in the call stack (see pfm_stop).
6332 */
6333 can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
6334 if (can_access_pmu) {
6335 /*
6336 * Mark the PMU as not owned
6337 * This will cause the interrupt handler to do nothing in case an overflow
6338 * interrupt was in-flight
6339 * This also guarantees that pmc0 will contain the final state
6340 * It virtually gives us full control on overflow processing from that point
6341 * on.
6342 */
6343 SET_PMU_OWNER(NULL, NULL);
6344 DPRINT(("releasing ownership\n"));
6345
6346 /*
6347 * read current overflow status:
6348 *
6349 * we are guaranteed to read the final stable state
6350 */
6351 ia64_srlz_d();
6352 pmc0 = ia64_get_pmc(0); /* slow */
6353
6354 /*
6355 * reset freeze bit, overflow status information destroyed
6356 */
6357 pfm_unfreeze_pmu();
6358 } else {
6359 pmc0 = task->thread.pmcs[0];
6360 /*
6361 * clear whatever overflow status bits there were
6362 */
6363 task->thread.pmcs[0] = 0;
6364 }
6365 ovfl_val = pmu_conf->ovfl_val;
6366 /*
6367 * we save all the used pmds
6368 * we take care of overflows for counting PMDs
6369 *
6370 * XXX: sampling situation is not taken into account here
6371 */
6372 mask2 = ctx->ctx_used_pmds[0];
6373
6374 DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
6375
6376 for (i = 0; mask2; i++, mask2>>=1) {
6377
6378 /* skip non used pmds */
6379 if ((mask2 & 0x1) == 0) continue;
6380
6381 /*
6382 * can access PMU always true in system wide mode
6383 */
6384 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : task->thread.pmds[i];
6385
6386 if (PMD_IS_COUNTING(i)) {
6387 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
6388 task->pid,
6389 i,
6390 ctx->ctx_pmds[i].val,
6391 val & ovfl_val));
6392
6393 /*
6394 * we rebuild the full 64 bit value of the counter
6395 */
6396 val = ctx->ctx_pmds[i].val + (val & ovfl_val);
6397
6398 /*
6399 * now everything is in ctx_pmds[] and we need
6400 * to clear the saved context from save_regs() such that
6401 * pfm_read_pmds() gets the correct value
6402 */
6403 pmd_val = 0UL;
6404
6405 /*
6406 * take care of overflow inline
6407 */
6408 if (pmc0 & (1UL << i)) {
6409 val += 1 + ovfl_val;
6410 DPRINT(("[%d] pmd[%d] overflowed\n", task->pid, i));
6411 }
6412 }
6413
6414 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, i, val, pmd_val));
6415
6416 if (is_self) task->thread.pmds[i] = pmd_val;
6417
6418 ctx->ctx_pmds[i].val = val;
6419 }
6420}
6421
6422static struct irqaction perfmon_irqaction = {
6423 .handler = pfm_interrupt_handler,
6424 .flags = SA_INTERRUPT,
6425 .name = "perfmon"
6426};
6427
6428/*
6429 * perfmon initialization routine, called from the initcall() table
6430 */
6431static int init_pfm_fs(void);
6432
6433static int __init
6434pfm_probe_pmu(void)
6435{
6436 pmu_config_t **p;
6437 int family;
6438
6439 family = local_cpu_data->family;
6440 p = pmu_confs;
6441
6442 while(*p) {
6443 if ((*p)->probe) {
6444 if ((*p)->probe() == 0) goto found;
6445 } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
6446 goto found;
6447 }
6448 p++;
6449 }
6450 return -1;
6451found:
6452 pmu_conf = *p;
6453 return 0;
6454}
6455
6456static struct file_operations pfm_proc_fops = {
6457 .open = pfm_proc_open,
6458 .read = seq_read,
6459 .llseek = seq_lseek,
6460 .release = seq_release,
6461};
6462
6463int __init
6464pfm_init(void)
6465{
6466 unsigned int n, n_counters, i;
6467
6468 printk("perfmon: version %u.%u IRQ %u\n",
6469 PFM_VERSION_MAJ,
6470 PFM_VERSION_MIN,
6471 IA64_PERFMON_VECTOR);
6472
6473 if (pfm_probe_pmu()) {
6474 printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
6475 local_cpu_data->family);
6476 return -ENODEV;
6477 }
6478
6479 /*
6480 * compute the number of implemented PMD/PMC from the
6481 * description tables
6482 */
6483 n = 0;
6484 for (i=0; PMC_IS_LAST(i) == 0; i++) {
6485 if (PMC_IS_IMPL(i) == 0) continue;
6486 pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
6487 n++;
6488 }
6489 pmu_conf->num_pmcs = n;
6490
6491 n = 0; n_counters = 0;
6492 for (i=0; PMD_IS_LAST(i) == 0; i++) {
6493 if (PMD_IS_IMPL(i) == 0) continue;
6494 pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
6495 n++;
6496 if (PMD_IS_COUNTING(i)) n_counters++;
6497 }
6498 pmu_conf->num_pmds = n;
6499 pmu_conf->num_counters = n_counters;
6500
6501 /*
6502 * sanity checks on the number of debug registers
6503 */
6504 if (pmu_conf->use_rr_dbregs) {
6505 if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
6506 printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
6507 pmu_conf = NULL;
6508 return -1;
6509 }
6510 if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
6511 printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
6512 pmu_conf = NULL;
6513 return -1;
6514 }
6515 }
6516
6517 printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
6518 pmu_conf->pmu_name,
6519 pmu_conf->num_pmcs,
6520 pmu_conf->num_pmds,
6521 pmu_conf->num_counters,
6522 ffz(pmu_conf->ovfl_val));
6523
6524 /* sanity check */
6525 if (pmu_conf->num_pmds >= IA64_NUM_PMD_REGS || pmu_conf->num_pmcs >= IA64_NUM_PMC_REGS) {
6526 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
6527 pmu_conf = NULL;
6528 return -1;
6529 }
6530
6531 /*
6532 * create /proc/perfmon (mostly for debugging purposes)
6533 */
6534 perfmon_dir = create_proc_entry("perfmon", S_IRUGO, NULL);
6535 if (perfmon_dir == NULL) {
6536 printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
6537 pmu_conf = NULL;
6538 return -1;
6539 }
6540 /*
6541 * install customized file operations for /proc/perfmon entry
6542 */
6543 perfmon_dir->proc_fops = &pfm_proc_fops;
6544
6545 /*
6546 * create /proc/sys/kernel/perfmon (for debugging purposes)
6547 */
6548 pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root, 0);
6549
6550 /*
6551 * initialize all our spinlocks
6552 */
6553 spin_lock_init(&pfm_sessions.pfs_lock);
6554 spin_lock_init(&pfm_buffer_fmt_lock);
6555
6556 init_pfm_fs();
6557
6558 for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
6559
6560 return 0;
6561}
6562
6563__initcall(pfm_init);
6564
6565/*
6566 * this function is called before pfm_init()
6567 */
6568void
6569pfm_init_percpu (void)
6570{
6571 /*
6572 * make sure no measurement is active
6573 * (may inherit programmed PMCs from EFI).
6574 */
6575 pfm_clear_psr_pp();
6576 pfm_clear_psr_up();
6577
6578 /*
6579 * we run with the PMU not frozen at all times
6580 */
6581 pfm_unfreeze_pmu();
6582
6583 if (smp_processor_id() == 0)
6584 register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
6585
6586 ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
6587 ia64_srlz_d();
6588}
6589
6590/*
6591 * used for debug purposes only
6592 */
6593void
6594dump_pmu_state(const char *from)
6595{
6596 struct task_struct *task;
6597 struct thread_struct *t;
6598 struct pt_regs *regs;
6599 pfm_context_t *ctx;
6600 unsigned long psr, dcr, info, flags;
6601 int i, this_cpu;
6602
6603 local_irq_save(flags);
6604
6605 this_cpu = smp_processor_id();
6606 regs = ia64_task_regs(current);
6607 info = PFM_CPUINFO_GET();
6608 dcr = ia64_getreg(_IA64_REG_CR_DCR);
6609
6610 if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
6611 local_irq_restore(flags);
6612 return;
6613 }
6614
6615 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6616 this_cpu,
6617 from,
6618 current->pid,
6619 regs->cr_iip,
6620 current->comm);
6621
6622 task = GET_PMU_OWNER();
6623 ctx = GET_PMU_CTX();
6624
6625 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task->pid : -1, ctx);
6626
6627 psr = pfm_get_psr();
6628
6629 printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
6630 this_cpu,
6631 ia64_get_pmc(0),
6632 psr & IA64_PSR_PP ? 1 : 0,
6633 psr & IA64_PSR_UP ? 1 : 0,
6634 dcr & IA64_DCR_PP ? 1 : 0,
6635 info,
6636 ia64_psr(regs)->up,
6637 ia64_psr(regs)->pp);
6638
6639 ia64_psr(regs)->up = 0;
6640 ia64_psr(regs)->pp = 0;
6641
6642 t = &current->thread;
6643
6644 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6645 if (PMC_IS_IMPL(i) == 0) continue;
6646 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, t->pmcs[i]);
6647 }
6648
6649 for (i=1; PMD_IS_LAST(i) == 0; i++) {
6650 if (PMD_IS_IMPL(i) == 0) continue;
6651 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, t->pmds[i]);
6652 }
6653
6654 if (ctx) {
6655 printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
6656 this_cpu,
6657 ctx->ctx_state,
6658 ctx->ctx_smpl_vaddr,
6659 ctx->ctx_smpl_hdr,
6660 ctx->ctx_msgq_head,
6661 ctx->ctx_msgq_tail,
6662 ctx->ctx_saved_psr_up);
6663 }
6664 local_irq_restore(flags);
6665}
6666
6667/*
6668 * called from process.c:copy_thread(). task is new child.
6669 */
6670void
6671pfm_inherit(struct task_struct *task, struct pt_regs *regs)
6672{
6673 struct thread_struct *thread;
6674
6675 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task->pid));
6676
6677 thread = &task->thread;
6678
6679 /*
6680 * cut links inherited from parent (current)
6681 */
6682 thread->pfm_context = NULL;
6683
6684 PFM_SET_WORK_PENDING(task, 0);
6685
6686 /*
6687 * the psr bits are already set properly in copy_threads()
6688 */
6689}
6690#else /* !CONFIG_PERFMON */
6691asmlinkage long
6692sys_perfmonctl (int fd, int cmd, void *arg, int count)
6693{
6694 return -ENOSYS;
6695}
6696#endif /* CONFIG_PERFMON */