]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/ia64/kernel/perfmon.c
[IA64] minor irq handler cleanups
[net-next-2.6.git] / arch / ia64 / kernel / perfmon.c
CommitLineData
1da177e4
LT
1/*
2 * This file implements the perfmon-2 subsystem which is used
3 * to program the IA-64 Performance Monitoring Unit (PMU).
4 *
5 * The initial version of perfmon.c was written by
6 * Ganesh Venkitachalam, IBM Corp.
7 *
8 * Then it was modified for perfmon-1.x by Stephane Eranian and
9 * David Mosberger, Hewlett Packard Co.
10 *
11 * Version Perfmon-2.x is a rewrite of perfmon-1.x
12 * by Stephane Eranian, Hewlett Packard Co.
13 *
a1ecf7f6 14 * Copyright (C) 1999-2005 Hewlett Packard Co
1da177e4
LT
15 * Stephane Eranian <eranian@hpl.hp.com>
16 * David Mosberger-Tang <davidm@hpl.hp.com>
17 *
18 * More information about perfmon available at:
19 * http://www.hpl.hp.com/research/linux/perfmon
20 */
21
1da177e4
LT
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/interrupt.h>
1da177e4
LT
26#include <linux/proc_fs.h>
27#include <linux/seq_file.h>
28#include <linux/init.h>
29#include <linux/vmalloc.h>
30#include <linux/mm.h>
31#include <linux/sysctl.h>
32#include <linux/list.h>
33#include <linux/file.h>
34#include <linux/poll.h>
35#include <linux/vfs.h>
a3bc0dbc 36#include <linux/smp.h>
1da177e4
LT
37#include <linux/pagemap.h>
38#include <linux/mount.h>
1da177e4 39#include <linux/bitops.h>
a9415644 40#include <linux/capability.h>
badf1662 41#include <linux/rcupdate.h>
60f1c444 42#include <linux/completion.h>
1da177e4
LT
43
44#include <asm/errno.h>
45#include <asm/intrinsics.h>
46#include <asm/page.h>
47#include <asm/perfmon.h>
48#include <asm/processor.h>
49#include <asm/signal.h>
50#include <asm/system.h>
51#include <asm/uaccess.h>
52#include <asm/delay.h>
53
54#ifdef CONFIG_PERFMON
55/*
56 * perfmon context state
57 */
58#define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */
59#define PFM_CTX_LOADED 2 /* context is loaded onto a task */
60#define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
61#define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
62
63#define PFM_INVALID_ACTIVATION (~0UL)
64
35589a8f
KA
65#define PFM_NUM_PMC_REGS 64 /* PMC save area for ctxsw */
66#define PFM_NUM_PMD_REGS 64 /* PMD save area for ctxsw */
67
1da177e4
LT
68/*
69 * depth of message queue
70 */
71#define PFM_MAX_MSGS 32
72#define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
73
74/*
75 * type of a PMU register (bitmask).
76 * bitmask structure:
77 * bit0 : register implemented
78 * bit1 : end marker
79 * bit2-3 : reserved
80 * bit4 : pmc has pmc.pm
81 * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter
82 * bit6-7 : register type
83 * bit8-31: reserved
84 */
85#define PFM_REG_NOTIMPL 0x0 /* not implemented at all */
86#define PFM_REG_IMPL 0x1 /* register implemented */
87#define PFM_REG_END 0x2 /* end marker */
88#define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
89#define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */
90#define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */
91#define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */
92#define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */
93
94#define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
95#define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
96
97#define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
98
99/* i assumed unsigned */
100#define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
101#define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
102
103/* XXX: these assume that register i is implemented */
104#define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
105#define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
106#define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
107#define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
108
109#define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
110#define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
111#define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
112#define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
113
114#define PFM_NUM_IBRS IA64_NUM_DBG_REGS
115#define PFM_NUM_DBRS IA64_NUM_DBG_REGS
116
117#define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
118#define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
119#define PFM_CTX_TASK(h) (h)->ctx_task
120
121#define PMU_PMC_OI 5 /* position of pmc.oi bit */
122
123/* XXX: does not support more than 64 PMDs */
124#define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
125#define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
126
127#define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
128
129#define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
130#define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
131#define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
132#define PFM_CODE_RR 0 /* requesting code range restriction */
133#define PFM_DATA_RR 1 /* requestion data range restriction */
134
135#define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
136#define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
137#define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
138
139#define RDEP(x) (1UL<<(x))
140
141/*
142 * context protection macros
143 * in SMP:
144 * - we need to protect against CPU concurrency (spin_lock)
145 * - we need to protect against PMU overflow interrupts (local_irq_disable)
146 * in UP:
147 * - we need to protect against PMU overflow interrupts (local_irq_disable)
148 *
85d1fe09 149 * spin_lock_irqsave()/spin_unlock_irqrestore():
1da177e4
LT
150 * in SMP: local_irq_disable + spin_lock
151 * in UP : local_irq_disable
152 *
153 * spin_lock()/spin_lock():
154 * in UP : removed automatically
155 * in SMP: protect against context accesses from other CPU. interrupts
156 * are not masked. This is useful for the PMU interrupt handler
157 * because we know we will not get PMU concurrency in that code.
158 */
159#define PROTECT_CTX(c, f) \
160 do { \
19c5870c 161 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
1da177e4 162 spin_lock_irqsave(&(c)->ctx_lock, f); \
19c5870c 163 DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
1da177e4
LT
164 } while(0)
165
166#define UNPROTECT_CTX(c, f) \
167 do { \
19c5870c 168 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
1da177e4
LT
169 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
170 } while(0)
171
172#define PROTECT_CTX_NOPRINT(c, f) \
173 do { \
174 spin_lock_irqsave(&(c)->ctx_lock, f); \
175 } while(0)
176
177
178#define UNPROTECT_CTX_NOPRINT(c, f) \
179 do { \
180 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
181 } while(0)
182
183
184#define PROTECT_CTX_NOIRQ(c) \
185 do { \
186 spin_lock(&(c)->ctx_lock); \
187 } while(0)
188
189#define UNPROTECT_CTX_NOIRQ(c) \
190 do { \
191 spin_unlock(&(c)->ctx_lock); \
192 } while(0)
193
194
195#ifdef CONFIG_SMP
196
197#define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
198#define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
199#define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
200
201#else /* !CONFIG_SMP */
202#define SET_ACTIVATION(t) do {} while(0)
203#define GET_ACTIVATION(t) do {} while(0)
204#define INC_ACTIVATION(t) do {} while(0)
205#endif /* CONFIG_SMP */
206
207#define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
208#define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
209#define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
210
211#define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
212#define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
213
214#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
215
216/*
217 * cmp0 must be the value of pmc0
218 */
219#define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
220
221#define PFMFS_MAGIC 0xa0b4d889
222
223/*
224 * debugging
225 */
226#define PFM_DEBUGGING 1
227#ifdef PFM_DEBUGGING
228#define DPRINT(a) \
229 do { \
d4ed8084 230 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
1da177e4
LT
231 } while (0)
232
233#define DPRINT_ovfl(a) \
234 do { \
d4ed8084 235 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
1da177e4
LT
236 } while (0)
237#endif
238
239/*
240 * 64-bit software counter structure
241 *
242 * the next_reset_type is applied to the next call to pfm_reset_regs()
243 */
244typedef struct {
245 unsigned long val; /* virtual 64bit counter value */
246 unsigned long lval; /* last reset value */
247 unsigned long long_reset; /* reset value on sampling overflow */
248 unsigned long short_reset; /* reset value on overflow */
249 unsigned long reset_pmds[4]; /* which other pmds to reset when this counter overflows */
250 unsigned long smpl_pmds[4]; /* which pmds are accessed when counter overflow */
251 unsigned long seed; /* seed for random-number generator */
252 unsigned long mask; /* mask for random-number generator */
253 unsigned int flags; /* notify/do not notify */
254 unsigned long eventid; /* overflow event identifier */
255} pfm_counter_t;
256
257/*
258 * context flags
259 */
260typedef struct {
261 unsigned int block:1; /* when 1, task will blocked on user notifications */
262 unsigned int system:1; /* do system wide monitoring */
263 unsigned int using_dbreg:1; /* using range restrictions (debug registers) */
264 unsigned int is_sampling:1; /* true if using a custom format */
265 unsigned int excl_idle:1; /* exclude idle task in system wide session */
266 unsigned int going_zombie:1; /* context is zombie (MASKED+blocking) */
267 unsigned int trap_reason:2; /* reason for going into pfm_handle_work() */
268 unsigned int no_msg:1; /* no message sent on overflow */
269 unsigned int can_restart:1; /* allowed to issue a PFM_RESTART */
270 unsigned int reserved:22;
271} pfm_context_flags_t;
272
273#define PFM_TRAP_REASON_NONE 0x0 /* default value */
274#define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */
275#define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */
276
277
278/*
279 * perfmon context: encapsulates all the state of a monitoring session
280 */
281
282typedef struct pfm_context {
283 spinlock_t ctx_lock; /* context protection */
284
285 pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */
286 unsigned int ctx_state; /* state: active/inactive (no bitfield) */
287
288 struct task_struct *ctx_task; /* task to which context is attached */
289
290 unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
291
60f1c444 292 struct completion ctx_restart_done; /* use for blocking notification mode */
1da177e4
LT
293
294 unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
295 unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
296 unsigned long ctx_reload_pmds[4]; /* bitmask of force reload PMD on ctxsw in */
297
298 unsigned long ctx_all_pmcs[4]; /* bitmask of all accessible PMCs */
299 unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */
300 unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */
301
35589a8f 302 unsigned long ctx_pmcs[PFM_NUM_PMC_REGS]; /* saved copies of PMC values */
1da177e4
LT
303
304 unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */
305 unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */
306 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */
307 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */
308
35589a8f
KA
309 pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS]; /* software state for PMDS */
310
311 unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */
312 unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */
1da177e4
LT
313
314 u64 ctx_saved_psr_up; /* only contains psr.up value */
315
316 unsigned long ctx_last_activation; /* context last activation number for last_cpu */
317 unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */
318 unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */
319
320 int ctx_fd; /* file descriptor used my this context */
321 pfm_ovfl_arg_t ctx_ovfl_arg; /* argument to custom buffer format handler */
322
323 pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */
324 void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */
325 unsigned long ctx_smpl_size; /* size of sampling buffer */
326 void *ctx_smpl_vaddr; /* user level virtual address of smpl buffer */
327
328 wait_queue_head_t ctx_msgq_wait;
329 pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
330 int ctx_msgq_head;
331 int ctx_msgq_tail;
332 struct fasync_struct *ctx_async_queue;
333
334 wait_queue_head_t ctx_zombieq; /* termination cleanup wait queue */
335} pfm_context_t;
336
337/*
338 * magic number used to verify that structure is really
339 * a perfmon context
340 */
341#define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
342
343#define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
344
345#ifdef CONFIG_SMP
346#define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
347#define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
348#else
349#define SET_LAST_CPU(ctx, v) do {} while(0)
350#define GET_LAST_CPU(ctx) do {} while(0)
351#endif
352
353
354#define ctx_fl_block ctx_flags.block
355#define ctx_fl_system ctx_flags.system
356#define ctx_fl_using_dbreg ctx_flags.using_dbreg
357#define ctx_fl_is_sampling ctx_flags.is_sampling
358#define ctx_fl_excl_idle ctx_flags.excl_idle
359#define ctx_fl_going_zombie ctx_flags.going_zombie
360#define ctx_fl_trap_reason ctx_flags.trap_reason
361#define ctx_fl_no_msg ctx_flags.no_msg
362#define ctx_fl_can_restart ctx_flags.can_restart
363
364#define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
365#define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
366
367/*
368 * global information about all sessions
369 * mostly used to synchronize between system wide and per-process
370 */
371typedef struct {
372 spinlock_t pfs_lock; /* lock the structure */
373
374 unsigned int pfs_task_sessions; /* number of per task sessions */
375 unsigned int pfs_sys_sessions; /* number of per system wide sessions */
376 unsigned int pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */
377 unsigned int pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */
378 struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */
379} pfm_session_t;
380
381/*
382 * information about a PMC or PMD.
383 * dep_pmd[]: a bitmask of dependent PMD registers
384 * dep_pmc[]: a bitmask of dependent PMC registers
385 */
386typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
387typedef struct {
388 unsigned int type;
389 int pm_pos;
390 unsigned long default_value; /* power-on default value */
391 unsigned long reserved_mask; /* bitmask of reserved bits */
392 pfm_reg_check_t read_check;
393 pfm_reg_check_t write_check;
394 unsigned long dep_pmd[4];
395 unsigned long dep_pmc[4];
396} pfm_reg_desc_t;
397
398/* assume cnum is a valid monitor */
399#define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
400
401/*
402 * This structure is initialized at boot time and contains
403 * a description of the PMU main characteristics.
404 *
405 * If the probe function is defined, detection is based
406 * on its return value:
407 * - 0 means recognized PMU
408 * - anything else means not supported
409 * When the probe function is not defined, then the pmu_family field
410 * is used and it must match the host CPU family such that:
411 * - cpu->family & config->pmu_family != 0
412 */
413typedef struct {
414 unsigned long ovfl_val; /* overflow value for counters */
415
416 pfm_reg_desc_t *pmc_desc; /* detailed PMC register dependencies descriptions */
417 pfm_reg_desc_t *pmd_desc; /* detailed PMD register dependencies descriptions */
418
419 unsigned int num_pmcs; /* number of PMCS: computed at init time */
420 unsigned int num_pmds; /* number of PMDS: computed at init time */
421 unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */
422 unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */
423
424 char *pmu_name; /* PMU family name */
425 unsigned int pmu_family; /* cpuid family pattern used to identify pmu */
426 unsigned int flags; /* pmu specific flags */
427 unsigned int num_ibrs; /* number of IBRS: computed at init time */
428 unsigned int num_dbrs; /* number of DBRS: computed at init time */
429 unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */
430 int (*probe)(void); /* customized probe routine */
431 unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */
432} pmu_config_t;
433/*
434 * PMU specific flags
435 */
436#define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */
437
438/*
439 * debug register related type definitions
440 */
441typedef struct {
442 unsigned long ibr_mask:56;
443 unsigned long ibr_plm:4;
444 unsigned long ibr_ig:3;
445 unsigned long ibr_x:1;
446} ibr_mask_reg_t;
447
448typedef struct {
449 unsigned long dbr_mask:56;
450 unsigned long dbr_plm:4;
451 unsigned long dbr_ig:2;
452 unsigned long dbr_w:1;
453 unsigned long dbr_r:1;
454} dbr_mask_reg_t;
455
456typedef union {
457 unsigned long val;
458 ibr_mask_reg_t ibr;
459 dbr_mask_reg_t dbr;
460} dbreg_t;
461
462
463/*
464 * perfmon command descriptions
465 */
466typedef struct {
467 int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
468 char *cmd_name;
469 int cmd_flags;
470 unsigned int cmd_narg;
471 size_t cmd_argsize;
472 int (*cmd_getsize)(void *arg, size_t *sz);
473} pfm_cmd_desc_t;
474
475#define PFM_CMD_FD 0x01 /* command requires a file descriptor */
476#define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */
477#define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */
478#define PFM_CMD_STOP 0x08 /* command does not work on zombie context */
479
480
481#define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
482#define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
483#define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
484#define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
485#define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
486
487#define PFM_CMD_ARG_MANY -1 /* cannot be zero */
488
1da177e4
LT
489typedef struct {
490 unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
491 unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
492 unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */
493 unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */
494 unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */
495 unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */
496 unsigned long pfm_smpl_handler_calls;
497 unsigned long pfm_smpl_handler_cycles;
498 char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
499} pfm_stats_t;
500
501/*
502 * perfmon internal variables
503 */
504static pfm_stats_t pfm_stats[NR_CPUS];
505static pfm_session_t pfm_sessions; /* global sessions information */
506
a9f6a0dd 507static DEFINE_SPINLOCK(pfm_alt_install_check);
a1ecf7f6
TL
508static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
509
1da177e4
LT
510static struct proc_dir_entry *perfmon_dir;
511static pfm_uuid_t pfm_null_uuid = {0,};
512
513static spinlock_t pfm_buffer_fmt_lock;
514static LIST_HEAD(pfm_buffer_fmt_list);
515
516static pmu_config_t *pmu_conf;
517
518/* sysctl() controls */
4944930a
SE
519pfm_sysctl_t pfm_sysctl;
520EXPORT_SYMBOL(pfm_sysctl);
1da177e4
LT
521
522static ctl_table pfm_ctl_table[]={
4e009901
EB
523 {
524 .ctl_name = CTL_UNNUMBERED,
525 .procname = "debug",
526 .data = &pfm_sysctl.debug,
527 .maxlen = sizeof(int),
528 .mode = 0666,
529 .proc_handler = &proc_dointvec,
530 },
531 {
532 .ctl_name = CTL_UNNUMBERED,
533 .procname = "debug_ovfl",
534 .data = &pfm_sysctl.debug_ovfl,
535 .maxlen = sizeof(int),
536 .mode = 0666,
537 .proc_handler = &proc_dointvec,
538 },
539 {
540 .ctl_name = CTL_UNNUMBERED,
541 .procname = "fastctxsw",
542 .data = &pfm_sysctl.fastctxsw,
543 .maxlen = sizeof(int),
544 .mode = 0600,
545 .proc_handler = &proc_dointvec,
546 },
547 {
548 .ctl_name = CTL_UNNUMBERED,
549 .procname = "expert_mode",
550 .data = &pfm_sysctl.expert_mode,
551 .maxlen = sizeof(int),
552 .mode = 0600,
553 .proc_handler = &proc_dointvec,
554 },
555 {}
1da177e4
LT
556};
557static ctl_table pfm_sysctl_dir[] = {
4e009901
EB
558 {
559 .ctl_name = CTL_UNNUMBERED,
560 .procname = "perfmon",
e3ad42be 561 .mode = 0555,
4e009901
EB
562 .child = pfm_ctl_table,
563 },
564 {}
1da177e4
LT
565};
566static ctl_table pfm_sysctl_root[] = {
4e009901
EB
567 {
568 .ctl_name = CTL_KERN,
569 .procname = "kernel",
e3ad42be 570 .mode = 0555,
4e009901
EB
571 .child = pfm_sysctl_dir,
572 },
573 {}
1da177e4
LT
574};
575static struct ctl_table_header *pfm_sysctl_header;
576
577static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
1da177e4
LT
578
579#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
580#define pfm_get_cpu_data(a,b) per_cpu(a, b)
581
582static inline void
583pfm_put_task(struct task_struct *task)
584{
585 if (task != current) put_task_struct(task);
586}
587
1da177e4
LT
588static inline void
589pfm_reserve_page(unsigned long a)
590{
591 SetPageReserved(vmalloc_to_page((void *)a));
592}
593static inline void
594pfm_unreserve_page(unsigned long a)
595{
596 ClearPageReserved(vmalloc_to_page((void*)a));
597}
598
599static inline unsigned long
600pfm_protect_ctx_ctxsw(pfm_context_t *x)
601{
602 spin_lock(&(x)->ctx_lock);
603 return 0UL;
604}
605
24b8e0cc 606static inline void
1da177e4
LT
607pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
608{
609 spin_unlock(&(x)->ctx_lock);
610}
611
612static inline unsigned int
613pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct)
614{
615 return do_munmap(mm, addr, len);
616}
617
618static inline unsigned long
619pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
620{
621 return get_unmapped_area(file, addr, len, pgoff, flags);
622}
623
624
454e2398
DH
625static int
626pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data,
627 struct vfsmount *mnt)
1da177e4 628{
454e2398 629 return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC, mnt);
1da177e4
LT
630}
631
632static struct file_system_type pfm_fs_type = {
633 .name = "pfmfs",
634 .get_sb = pfmfs_get_sb,
635 .kill_sb = kill_anon_super,
636};
637
638DEFINE_PER_CPU(unsigned long, pfm_syst_info);
639DEFINE_PER_CPU(struct task_struct *, pmu_owner);
640DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
641DEFINE_PER_CPU(unsigned long, pmu_activation_number);
fffcc150 642EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
1da177e4
LT
643
644
645/* forward declaration */
5dfe4c96 646static const struct file_operations pfm_file_ops;
1da177e4
LT
647
648/*
649 * forward declarations
650 */
651#ifndef CONFIG_SMP
652static void pfm_lazy_save_regs (struct task_struct *ta);
653#endif
654
655void dump_pmu_state(const char *);
656static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
657
658#include "perfmon_itanium.h"
659#include "perfmon_mckinley.h"
9179cb65 660#include "perfmon_montecito.h"
1da177e4
LT
661#include "perfmon_generic.h"
662
663static pmu_config_t *pmu_confs[]={
9179cb65 664 &pmu_conf_mont,
1da177e4
LT
665 &pmu_conf_mck,
666 &pmu_conf_ita,
667 &pmu_conf_gen, /* must be last */
668 NULL
669};
670
671
672static int pfm_end_notify_user(pfm_context_t *ctx);
673
674static inline void
675pfm_clear_psr_pp(void)
676{
677 ia64_rsm(IA64_PSR_PP);
678 ia64_srlz_i();
679}
680
681static inline void
682pfm_set_psr_pp(void)
683{
684 ia64_ssm(IA64_PSR_PP);
685 ia64_srlz_i();
686}
687
688static inline void
689pfm_clear_psr_up(void)
690{
691 ia64_rsm(IA64_PSR_UP);
692 ia64_srlz_i();
693}
694
695static inline void
696pfm_set_psr_up(void)
697{
698 ia64_ssm(IA64_PSR_UP);
699 ia64_srlz_i();
700}
701
702static inline unsigned long
703pfm_get_psr(void)
704{
705 unsigned long tmp;
706 tmp = ia64_getreg(_IA64_REG_PSR);
707 ia64_srlz_i();
708 return tmp;
709}
710
711static inline void
712pfm_set_psr_l(unsigned long val)
713{
714 ia64_setreg(_IA64_REG_PSR_L, val);
715 ia64_srlz_i();
716}
717
718static inline void
719pfm_freeze_pmu(void)
720{
721 ia64_set_pmc(0,1UL);
722 ia64_srlz_d();
723}
724
725static inline void
726pfm_unfreeze_pmu(void)
727{
728 ia64_set_pmc(0,0UL);
729 ia64_srlz_d();
730}
731
732static inline void
733pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
734{
735 int i;
736
737 for (i=0; i < nibrs; i++) {
738 ia64_set_ibr(i, ibrs[i]);
739 ia64_dv_serialize_instruction();
740 }
741 ia64_srlz_i();
742}
743
744static inline void
745pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
746{
747 int i;
748
749 for (i=0; i < ndbrs; i++) {
750 ia64_set_dbr(i, dbrs[i]);
751 ia64_dv_serialize_data();
752 }
753 ia64_srlz_d();
754}
755
756/*
757 * PMD[i] must be a counter. no check is made
758 */
759static inline unsigned long
760pfm_read_soft_counter(pfm_context_t *ctx, int i)
761{
762 return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
763}
764
765/*
766 * PMD[i] must be a counter. no check is made
767 */
768static inline void
769pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
770{
771 unsigned long ovfl_val = pmu_conf->ovfl_val;
772
773 ctx->ctx_pmds[i].val = val & ~ovfl_val;
774 /*
775 * writing to unimplemented part is ignore, so we do not need to
776 * mask off top part
777 */
778 ia64_set_pmd(i, val & ovfl_val);
779}
780
781static pfm_msg_t *
782pfm_get_new_msg(pfm_context_t *ctx)
783{
784 int idx, next;
785
786 next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
787
788 DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
789 if (next == ctx->ctx_msgq_head) return NULL;
790
791 idx = ctx->ctx_msgq_tail;
792 ctx->ctx_msgq_tail = next;
793
794 DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
795
796 return ctx->ctx_msgq+idx;
797}
798
799static pfm_msg_t *
800pfm_get_next_msg(pfm_context_t *ctx)
801{
802 pfm_msg_t *msg;
803
804 DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
805
806 if (PFM_CTXQ_EMPTY(ctx)) return NULL;
807
808 /*
809 * get oldest message
810 */
811 msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
812
813 /*
814 * and move forward
815 */
816 ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
817
818 DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
819
820 return msg;
821}
822
823static void
824pfm_reset_msgq(pfm_context_t *ctx)
825{
826 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
827 DPRINT(("ctx=%p msgq reset\n", ctx));
828}
829
830static void *
831pfm_rvmalloc(unsigned long size)
832{
833 void *mem;
834 unsigned long addr;
835
836 size = PAGE_ALIGN(size);
837 mem = vmalloc(size);
838 if (mem) {
839 //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
840 memset(mem, 0, size);
841 addr = (unsigned long)mem;
842 while (size > 0) {
843 pfm_reserve_page(addr);
844 addr+=PAGE_SIZE;
845 size-=PAGE_SIZE;
846 }
847 }
848 return mem;
849}
850
851static void
852pfm_rvfree(void *mem, unsigned long size)
853{
854 unsigned long addr;
855
856 if (mem) {
857 DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
858 addr = (unsigned long) mem;
859 while ((long) size > 0) {
860 pfm_unreserve_page(addr);
861 addr+=PAGE_SIZE;
862 size-=PAGE_SIZE;
863 }
864 vfree(mem);
865 }
866 return;
867}
868
869static pfm_context_t *
870pfm_context_alloc(void)
871{
872 pfm_context_t *ctx;
873
874 /*
875 * allocate context descriptor
876 * must be able to free with interrupts disabled
877 */
52fd9108 878 ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
1da177e4 879 if (ctx) {
1da177e4
LT
880 DPRINT(("alloc ctx @%p\n", ctx));
881 }
882 return ctx;
883}
884
885static void
886pfm_context_free(pfm_context_t *ctx)
887{
888 if (ctx) {
889 DPRINT(("free ctx @%p\n", ctx));
890 kfree(ctx);
891 }
892}
893
894static void
895pfm_mask_monitoring(struct task_struct *task)
896{
897 pfm_context_t *ctx = PFM_GET_CTX(task);
1da177e4
LT
898 unsigned long mask, val, ovfl_mask;
899 int i;
900
19c5870c 901 DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
1da177e4
LT
902
903 ovfl_mask = pmu_conf->ovfl_val;
904 /*
905 * monitoring can only be masked as a result of a valid
906 * counter overflow. In UP, it means that the PMU still
907 * has an owner. Note that the owner can be different
908 * from the current task. However the PMU state belongs
909 * to the owner.
910 * In SMP, a valid overflow only happens when task is
911 * current. Therefore if we come here, we know that
912 * the PMU state belongs to the current task, therefore
913 * we can access the live registers.
914 *
915 * So in both cases, the live register contains the owner's
916 * state. We can ONLY touch the PMU registers and NOT the PSR.
917 *
35589a8f 918 * As a consequence to this call, the ctx->th_pmds[] array
1da177e4
LT
919 * contains stale information which must be ignored
920 * when context is reloaded AND monitoring is active (see
921 * pfm_restart).
922 */
923 mask = ctx->ctx_used_pmds[0];
924 for (i = 0; mask; i++, mask>>=1) {
925 /* skip non used pmds */
926 if ((mask & 0x1) == 0) continue;
927 val = ia64_get_pmd(i);
928
929 if (PMD_IS_COUNTING(i)) {
930 /*
931 * we rebuild the full 64 bit value of the counter
932 */
933 ctx->ctx_pmds[i].val += (val & ovfl_mask);
934 } else {
935 ctx->ctx_pmds[i].val = val;
936 }
937 DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
938 i,
939 ctx->ctx_pmds[i].val,
940 val & ovfl_mask));
941 }
942 /*
943 * mask monitoring by setting the privilege level to 0
944 * we cannot use psr.pp/psr.up for this, it is controlled by
945 * the user
946 *
947 * if task is current, modify actual registers, otherwise modify
948 * thread save state, i.e., what will be restored in pfm_load_regs()
949 */
950 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
951 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
952 if ((mask & 0x1) == 0UL) continue;
35589a8f
KA
953 ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
954 ctx->th_pmcs[i] &= ~0xfUL;
955 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1da177e4
LT
956 }
957 /*
958 * make all of this visible
959 */
960 ia64_srlz_d();
961}
962
963/*
964 * must always be done with task == current
965 *
966 * context must be in MASKED state when calling
967 */
968static void
969pfm_restore_monitoring(struct task_struct *task)
970{
971 pfm_context_t *ctx = PFM_GET_CTX(task);
1da177e4
LT
972 unsigned long mask, ovfl_mask;
973 unsigned long psr, val;
974 int i, is_system;
975
976 is_system = ctx->ctx_fl_system;
977 ovfl_mask = pmu_conf->ovfl_val;
978
979 if (task != current) {
19c5870c 980 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
1da177e4
LT
981 return;
982 }
983 if (ctx->ctx_state != PFM_CTX_MASKED) {
984 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
19c5870c 985 task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
1da177e4
LT
986 return;
987 }
988 psr = pfm_get_psr();
989 /*
990 * monitoring is masked via the PMC.
991 * As we restore their value, we do not want each counter to
992 * restart right away. We stop monitoring using the PSR,
993 * restore the PMC (and PMD) and then re-establish the psr
994 * as it was. Note that there can be no pending overflow at
995 * this point, because monitoring was MASKED.
996 *
997 * system-wide session are pinned and self-monitoring
998 */
999 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1000 /* disable dcr pp */
1001 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
1002 pfm_clear_psr_pp();
1003 } else {
1004 pfm_clear_psr_up();
1005 }
1006 /*
1007 * first, we restore the PMD
1008 */
1009 mask = ctx->ctx_used_pmds[0];
1010 for (i = 0; mask; i++, mask>>=1) {
1011 /* skip non used pmds */
1012 if ((mask & 0x1) == 0) continue;
1013
1014 if (PMD_IS_COUNTING(i)) {
1015 /*
1016 * we split the 64bit value according to
1017 * counter width
1018 */
1019 val = ctx->ctx_pmds[i].val & ovfl_mask;
1020 ctx->ctx_pmds[i].val &= ~ovfl_mask;
1021 } else {
1022 val = ctx->ctx_pmds[i].val;
1023 }
1024 ia64_set_pmd(i, val);
1025
1026 DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
1027 i,
1028 ctx->ctx_pmds[i].val,
1029 val));
1030 }
1031 /*
1032 * restore the PMCs
1033 */
1034 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
1035 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
1036 if ((mask & 0x1) == 0UL) continue;
35589a8f
KA
1037 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1038 ia64_set_pmc(i, ctx->th_pmcs[i]);
19c5870c
AD
1039 DPRINT(("[%d] pmc[%d]=0x%lx\n",
1040 task_pid_nr(task), i, ctx->th_pmcs[i]));
1da177e4
LT
1041 }
1042 ia64_srlz_d();
1043
1044 /*
1045 * must restore DBR/IBR because could be modified while masked
1046 * XXX: need to optimize
1047 */
1048 if (ctx->ctx_fl_using_dbreg) {
1049 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
1050 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
1051 }
1052
1053 /*
1054 * now restore PSR
1055 */
1056 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1057 /* enable dcr pp */
1058 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
1059 ia64_srlz_i();
1060 }
1061 pfm_set_psr_l(psr);
1062}
1063
1064static inline void
1065pfm_save_pmds(unsigned long *pmds, unsigned long mask)
1066{
1067 int i;
1068
1069 ia64_srlz_d();
1070
1071 for (i=0; mask; i++, mask>>=1) {
1072 if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
1073 }
1074}
1075
1076/*
1077 * reload from thread state (used for ctxw only)
1078 */
1079static inline void
1080pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
1081{
1082 int i;
1083 unsigned long val, ovfl_val = pmu_conf->ovfl_val;
1084
1085 for (i=0; mask; i++, mask>>=1) {
1086 if ((mask & 0x1) == 0) continue;
1087 val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
1088 ia64_set_pmd(i, val);
1089 }
1090 ia64_srlz_d();
1091}
1092
1093/*
1094 * propagate PMD from context to thread-state
1095 */
1096static inline void
1097pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1098{
1da177e4
LT
1099 unsigned long ovfl_val = pmu_conf->ovfl_val;
1100 unsigned long mask = ctx->ctx_all_pmds[0];
1101 unsigned long val;
1102 int i;
1103
1104 DPRINT(("mask=0x%lx\n", mask));
1105
1106 for (i=0; mask; i++, mask>>=1) {
1107
1108 val = ctx->ctx_pmds[i].val;
1109
1110 /*
1111 * We break up the 64 bit value into 2 pieces
1112 * the lower bits go to the machine state in the
1113 * thread (will be reloaded on ctxsw in).
1114 * The upper part stays in the soft-counter.
1115 */
1116 if (PMD_IS_COUNTING(i)) {
1117 ctx->ctx_pmds[i].val = val & ~ovfl_val;
1118 val &= ovfl_val;
1119 }
35589a8f 1120 ctx->th_pmds[i] = val;
1da177e4
LT
1121
1122 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1123 i,
35589a8f 1124 ctx->th_pmds[i],
1da177e4
LT
1125 ctx->ctx_pmds[i].val));
1126 }
1127}
1128
1129/*
1130 * propagate PMC from context to thread-state
1131 */
1132static inline void
1133pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1134{
1da177e4
LT
1135 unsigned long mask = ctx->ctx_all_pmcs[0];
1136 int i;
1137
1138 DPRINT(("mask=0x%lx\n", mask));
1139
1140 for (i=0; mask; i++, mask>>=1) {
1141 /* masking 0 with ovfl_val yields 0 */
35589a8f
KA
1142 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1143 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1da177e4
LT
1144 }
1145}
1146
1147
1148
1149static inline void
1150pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
1151{
1152 int i;
1153
1154 for (i=0; mask; i++, mask>>=1) {
1155 if ((mask & 0x1) == 0) continue;
1156 ia64_set_pmc(i, pmcs[i]);
1157 }
1158 ia64_srlz_d();
1159}
1160
1161static inline int
1162pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
1163{
1164 return memcmp(a, b, sizeof(pfm_uuid_t));
1165}
1166
1167static inline int
1168pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
1169{
1170 int ret = 0;
1171 if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
1172 return ret;
1173}
1174
1175static inline int
1176pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
1177{
1178 int ret = 0;
1179 if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
1180 return ret;
1181}
1182
1183
1184static inline int
1185pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
1186 int cpu, void *arg)
1187{
1188 int ret = 0;
1189 if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
1190 return ret;
1191}
1192
1193static inline int
1194pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
1195 int cpu, void *arg)
1196{
1197 int ret = 0;
1198 if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
1199 return ret;
1200}
1201
1202static inline int
1203pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1204{
1205 int ret = 0;
1206 if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
1207 return ret;
1208}
1209
1210static inline int
1211pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1212{
1213 int ret = 0;
1214 if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
1215 return ret;
1216}
1217
1218static pfm_buffer_fmt_t *
1219__pfm_find_buffer_fmt(pfm_uuid_t uuid)
1220{
1221 struct list_head * pos;
1222 pfm_buffer_fmt_t * entry;
1223
1224 list_for_each(pos, &pfm_buffer_fmt_list) {
1225 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
1226 if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
1227 return entry;
1228 }
1229 return NULL;
1230}
1231
1232/*
1233 * find a buffer format based on its uuid
1234 */
1235static pfm_buffer_fmt_t *
1236pfm_find_buffer_fmt(pfm_uuid_t uuid)
1237{
1238 pfm_buffer_fmt_t * fmt;
1239 spin_lock(&pfm_buffer_fmt_lock);
1240 fmt = __pfm_find_buffer_fmt(uuid);
1241 spin_unlock(&pfm_buffer_fmt_lock);
1242 return fmt;
1243}
1244
1245int
1246pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
1247{
1248 int ret = 0;
1249
1250 /* some sanity checks */
1251 if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
1252
1253 /* we need at least a handler */
1254 if (fmt->fmt_handler == NULL) return -EINVAL;
1255
1256 /*
1257 * XXX: need check validity of fmt_arg_size
1258 */
1259
1260 spin_lock(&pfm_buffer_fmt_lock);
1261
1262 if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
1263 printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
1264 ret = -EBUSY;
1265 goto out;
1266 }
1267 list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
1268 printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
1269
1270out:
1271 spin_unlock(&pfm_buffer_fmt_lock);
1272 return ret;
1273}
1274EXPORT_SYMBOL(pfm_register_buffer_fmt);
1275
1276int
1277pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
1278{
1279 pfm_buffer_fmt_t *fmt;
1280 int ret = 0;
1281
1282 spin_lock(&pfm_buffer_fmt_lock);
1283
1284 fmt = __pfm_find_buffer_fmt(uuid);
1285 if (!fmt) {
1286 printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
1287 ret = -EINVAL;
1288 goto out;
1289 }
1290 list_del_init(&fmt->fmt_list);
1291 printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
1292
1293out:
1294 spin_unlock(&pfm_buffer_fmt_lock);
1295 return ret;
1296
1297}
1298EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1299
8df5a500
SE
1300extern void update_pal_halt_status(int);
1301
1da177e4
LT
1302static int
1303pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1304{
1305 unsigned long flags;
1306 /*
72fdbdce 1307 * validity checks on cpu_mask have been done upstream
1da177e4
LT
1308 */
1309 LOCK_PFS(flags);
1310
1311 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1312 pfm_sessions.pfs_sys_sessions,
1313 pfm_sessions.pfs_task_sessions,
1314 pfm_sessions.pfs_sys_use_dbregs,
1315 is_syswide,
1316 cpu));
1317
1318 if (is_syswide) {
1319 /*
1320 * cannot mix system wide and per-task sessions
1321 */
1322 if (pfm_sessions.pfs_task_sessions > 0UL) {
1323 DPRINT(("system wide not possible, %u conflicting task_sessions\n",
1324 pfm_sessions.pfs_task_sessions));
1325 goto abort;
1326 }
1327
1328 if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
1329
1330 DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
1331
1332 pfm_sessions.pfs_sys_session[cpu] = task;
1333
1334 pfm_sessions.pfs_sys_sessions++ ;
1335
1336 } else {
1337 if (pfm_sessions.pfs_sys_sessions) goto abort;
1338 pfm_sessions.pfs_task_sessions++;
1339 }
1340
1341 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1342 pfm_sessions.pfs_sys_sessions,
1343 pfm_sessions.pfs_task_sessions,
1344 pfm_sessions.pfs_sys_use_dbregs,
1345 is_syswide,
1346 cpu));
1347
8df5a500
SE
1348 /*
1349 * disable default_idle() to go to PAL_HALT
1350 */
1351 update_pal_halt_status(0);
1352
1da177e4
LT
1353 UNLOCK_PFS(flags);
1354
1355 return 0;
1356
1357error_conflict:
1358 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
19c5870c 1359 task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
a1ecf7f6 1360 cpu));
1da177e4
LT
1361abort:
1362 UNLOCK_PFS(flags);
1363
1364 return -EBUSY;
1365
1366}
1367
1368static int
1369pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1370{
1371 unsigned long flags;
1372 /*
72fdbdce 1373 * validity checks on cpu_mask have been done upstream
1da177e4
LT
1374 */
1375 LOCK_PFS(flags);
1376
1377 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1378 pfm_sessions.pfs_sys_sessions,
1379 pfm_sessions.pfs_task_sessions,
1380 pfm_sessions.pfs_sys_use_dbregs,
1381 is_syswide,
1382 cpu));
1383
1384
1385 if (is_syswide) {
1386 pfm_sessions.pfs_sys_session[cpu] = NULL;
1387 /*
1388 * would not work with perfmon+more than one bit in cpu_mask
1389 */
1390 if (ctx && ctx->ctx_fl_using_dbreg) {
1391 if (pfm_sessions.pfs_sys_use_dbregs == 0) {
1392 printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
1393 } else {
1394 pfm_sessions.pfs_sys_use_dbregs--;
1395 }
1396 }
1397 pfm_sessions.pfs_sys_sessions--;
1398 } else {
1399 pfm_sessions.pfs_task_sessions--;
1400 }
1401 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1402 pfm_sessions.pfs_sys_sessions,
1403 pfm_sessions.pfs_task_sessions,
1404 pfm_sessions.pfs_sys_use_dbregs,
1405 is_syswide,
1406 cpu));
1407
8df5a500
SE
1408 /*
1409 * if possible, enable default_idle() to go into PAL_HALT
1410 */
1411 if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
1412 update_pal_halt_status(1);
1413
1da177e4
LT
1414 UNLOCK_PFS(flags);
1415
1416 return 0;
1417}
1418
1419/*
1420 * removes virtual mapping of the sampling buffer.
1421 * IMPORTANT: cannot be called with interrupts disable, e.g. inside
1422 * a PROTECT_CTX() section.
1423 */
1424static int
1425pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size)
1426{
1427 int r;
1428
1429 /* sanity checks */
1430 if (task->mm == NULL || size == 0UL || vaddr == NULL) {
19c5870c 1431 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
1da177e4
LT
1432 return -EINVAL;
1433 }
1434
1435 DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
1436
1437 /*
1438 * does the actual unmapping
1439 */
1440 down_write(&task->mm->mmap_sem);
1441
1442 DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size));
1443
1444 r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0);
1445
1446 up_write(&task->mm->mmap_sem);
1447 if (r !=0) {
19c5870c 1448 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
1da177e4
LT
1449 }
1450
1451 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
1452
1453 return 0;
1454}
1455
1456/*
1457 * free actual physical storage used by sampling buffer
1458 */
1459#if 0
1460static int
1461pfm_free_smpl_buffer(pfm_context_t *ctx)
1462{
1463 pfm_buffer_fmt_t *fmt;
1464
1465 if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
1466
1467 /*
1468 * we won't use the buffer format anymore
1469 */
1470 fmt = ctx->ctx_buf_fmt;
1471
1472 DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
1473 ctx->ctx_smpl_hdr,
1474 ctx->ctx_smpl_size,
1475 ctx->ctx_smpl_vaddr));
1476
1477 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1478
1479 /*
1480 * free the buffer
1481 */
1482 pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
1483
1484 ctx->ctx_smpl_hdr = NULL;
1485 ctx->ctx_smpl_size = 0UL;
1486
1487 return 0;
1488
1489invalid_free:
19c5870c 1490 printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
1da177e4
LT
1491 return -EINVAL;
1492}
1493#endif
1494
1495static inline void
1496pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
1497{
1498 if (fmt == NULL) return;
1499
1500 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1501
1502}
1503
1504/*
1505 * pfmfs should _never_ be mounted by userland - too much of security hassle,
1506 * no real gain from having the whole whorehouse mounted. So we don't need
1507 * any operations on the root directory. However, we need a non-trivial
1508 * d_name - pfm: will go nicely and kill the special-casing in procfs.
1509 */
1510static struct vfsmount *pfmfs_mnt;
1511
1512static int __init
1513init_pfm_fs(void)
1514{
1515 int err = register_filesystem(&pfm_fs_type);
1516 if (!err) {
1517 pfmfs_mnt = kern_mount(&pfm_fs_type);
1518 err = PTR_ERR(pfmfs_mnt);
1519 if (IS_ERR(pfmfs_mnt))
1520 unregister_filesystem(&pfm_fs_type);
1521 else
1522 err = 0;
1523 }
1524 return err;
1525}
1526
1da177e4
LT
1527static ssize_t
1528pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1529{
1530 pfm_context_t *ctx;
1531 pfm_msg_t *msg;
1532 ssize_t ret;
1533 unsigned long flags;
1534 DECLARE_WAITQUEUE(wait, current);
1535 if (PFM_IS_FILE(filp) == 0) {
19c5870c 1536 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1da177e4
LT
1537 return -EINVAL;
1538 }
1539
1540 ctx = (pfm_context_t *)filp->private_data;
1541 if (ctx == NULL) {
19c5870c 1542 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
1da177e4
LT
1543 return -EINVAL;
1544 }
1545
1546 /*
1547 * check even when there is no message
1548 */
1549 if (size < sizeof(pfm_msg_t)) {
1550 DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
1551 return -EINVAL;
1552 }
1553
1554 PROTECT_CTX(ctx, flags);
1555
1556 /*
1557 * put ourselves on the wait queue
1558 */
1559 add_wait_queue(&ctx->ctx_msgq_wait, &wait);
1560
1561
1562 for(;;) {
1563 /*
1564 * check wait queue
1565 */
1566
1567 set_current_state(TASK_INTERRUPTIBLE);
1568
1569 DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
1570
1571 ret = 0;
1572 if(PFM_CTXQ_EMPTY(ctx) == 0) break;
1573
1574 UNPROTECT_CTX(ctx, flags);
1575
1576 /*
1577 * check non-blocking read
1578 */
1579 ret = -EAGAIN;
1580 if(filp->f_flags & O_NONBLOCK) break;
1581
1582 /*
1583 * check pending signals
1584 */
1585 if(signal_pending(current)) {
1586 ret = -EINTR;
1587 break;
1588 }
1589 /*
1590 * no message, so wait
1591 */
1592 schedule();
1593
1594 PROTECT_CTX(ctx, flags);
1595 }
19c5870c 1596 DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
1da177e4
LT
1597 set_current_state(TASK_RUNNING);
1598 remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
1599
1600 if (ret < 0) goto abort;
1601
1602 ret = -EINVAL;
1603 msg = pfm_get_next_msg(ctx);
1604 if (msg == NULL) {
19c5870c 1605 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
1da177e4
LT
1606 goto abort_locked;
1607 }
1608
4944930a 1609 DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
1da177e4
LT
1610
1611 ret = -EFAULT;
1612 if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
1613
1614abort_locked:
1615 UNPROTECT_CTX(ctx, flags);
1616abort:
1617 return ret;
1618}
1619
1620static ssize_t
1621pfm_write(struct file *file, const char __user *ubuf,
1622 size_t size, loff_t *ppos)
1623{
1624 DPRINT(("pfm_write called\n"));
1625 return -EINVAL;
1626}
1627
1628static unsigned int
1629pfm_poll(struct file *filp, poll_table * wait)
1630{
1631 pfm_context_t *ctx;
1632 unsigned long flags;
1633 unsigned int mask = 0;
1634
1635 if (PFM_IS_FILE(filp) == 0) {
19c5870c 1636 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1da177e4
LT
1637 return 0;
1638 }
1639
1640 ctx = (pfm_context_t *)filp->private_data;
1641 if (ctx == NULL) {
19c5870c 1642 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
1da177e4
LT
1643 return 0;
1644 }
1645
1646
1647 DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
1648
1649 poll_wait(filp, &ctx->ctx_msgq_wait, wait);
1650
1651 PROTECT_CTX(ctx, flags);
1652
1653 if (PFM_CTXQ_EMPTY(ctx) == 0)
1654 mask = POLLIN | POLLRDNORM;
1655
1656 UNPROTECT_CTX(ctx, flags);
1657
1658 DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
1659
1660 return mask;
1661}
1662
1663static int
1664pfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1665{
1666 DPRINT(("pfm_ioctl called\n"));
1667 return -EINVAL;
1668}
1669
1670/*
1671 * interrupt cannot be masked when coming here
1672 */
1673static inline int
1674pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
1675{
1676 int ret;
1677
1678 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
1679
1680 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
19c5870c 1681 task_pid_nr(current),
1da177e4
LT
1682 fd,
1683 on,
1684 ctx->ctx_async_queue, ret));
1685
1686 return ret;
1687}
1688
1689static int
1690pfm_fasync(int fd, struct file *filp, int on)
1691{
1692 pfm_context_t *ctx;
1693 int ret;
1694
1695 if (PFM_IS_FILE(filp) == 0) {
19c5870c 1696 printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
1da177e4
LT
1697 return -EBADF;
1698 }
1699
1700 ctx = (pfm_context_t *)filp->private_data;
1701 if (ctx == NULL) {
19c5870c 1702 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
1da177e4
LT
1703 return -EBADF;
1704 }
1705 /*
1706 * we cannot mask interrupts during this call because this may
1707 * may go to sleep if memory is not readily avalaible.
1708 *
1709 * We are protected from the conetxt disappearing by the get_fd()/put_fd()
1710 * done in caller. Serialization of this function is ensured by caller.
1711 */
1712 ret = pfm_do_fasync(fd, filp, ctx, on);
1713
1714
1715 DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1716 fd,
1717 on,
1718 ctx->ctx_async_queue, ret));
1719
1720 return ret;
1721}
1722
1723#ifdef CONFIG_SMP
1724/*
1725 * this function is exclusively called from pfm_close().
1726 * The context is not protected at that time, nor are interrupts
1727 * on the remote CPU. That's necessary to avoid deadlocks.
1728 */
1729static void
1730pfm_syswide_force_stop(void *info)
1731{
1732 pfm_context_t *ctx = (pfm_context_t *)info;
6450578f 1733 struct pt_regs *regs = task_pt_regs(current);
1da177e4
LT
1734 struct task_struct *owner;
1735 unsigned long flags;
1736 int ret;
1737
1738 if (ctx->ctx_cpu != smp_processor_id()) {
1739 printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
1740 ctx->ctx_cpu,
1741 smp_processor_id());
1742 return;
1743 }
1744 owner = GET_PMU_OWNER();
1745 if (owner != ctx->ctx_task) {
1746 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1747 smp_processor_id(),
19c5870c 1748 task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
1da177e4
LT
1749 return;
1750 }
1751 if (GET_PMU_CTX() != ctx) {
1752 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
1753 smp_processor_id(),
1754 GET_PMU_CTX(), ctx);
1755 return;
1756 }
1757
19c5870c 1758 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
1da177e4
LT
1759 /*
1760 * the context is already protected in pfm_close(), we simply
1761 * need to mask interrupts to avoid a PMU interrupt race on
1762 * this CPU
1763 */
1764 local_irq_save(flags);
1765
1766 ret = pfm_context_unload(ctx, NULL, 0, regs);
1767 if (ret) {
1768 DPRINT(("context_unload returned %d\n", ret));
1769 }
1770
1771 /*
1772 * unmask interrupts, PMU interrupts are now spurious here
1773 */
1774 local_irq_restore(flags);
1775}
1776
1777static void
1778pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
1779{
1780 int ret;
1781
1782 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
1783 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1);
1784 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
1785}
1786#endif /* CONFIG_SMP */
1787
1788/*
1789 * called for each close(). Partially free resources.
1790 * When caller is self-monitoring, the context is unloaded.
1791 */
1792static int
75e1fcc0 1793pfm_flush(struct file *filp, fl_owner_t id)
1da177e4
LT
1794{
1795 pfm_context_t *ctx;
1796 struct task_struct *task;
1797 struct pt_regs *regs;
1798 unsigned long flags;
1799 unsigned long smpl_buf_size = 0UL;
1800 void *smpl_buf_vaddr = NULL;
1801 int state, is_system;
1802
1803 if (PFM_IS_FILE(filp) == 0) {
1804 DPRINT(("bad magic for\n"));
1805 return -EBADF;
1806 }
1807
1808 ctx = (pfm_context_t *)filp->private_data;
1809 if (ctx == NULL) {
19c5870c 1810 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
1da177e4
LT
1811 return -EBADF;
1812 }
1813
1814 /*
1815 * remove our file from the async queue, if we use this mode.
1816 * This can be done without the context being protected. We come
72fdbdce 1817 * here when the context has become unreachable by other tasks.
1da177e4
LT
1818 *
1819 * We may still have active monitoring at this point and we may
1820 * end up in pfm_overflow_handler(). However, fasync_helper()
1821 * operates with interrupts disabled and it cleans up the
1822 * queue. If the PMU handler is called prior to entering
1823 * fasync_helper() then it will send a signal. If it is
1824 * invoked after, it will find an empty queue and no
1825 * signal will be sent. In both case, we are safe
1826 */
1827 if (filp->f_flags & FASYNC) {
1828 DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue));
1829 pfm_do_fasync (-1, filp, ctx, 0);
1830 }
1831
1832 PROTECT_CTX(ctx, flags);
1833
1834 state = ctx->ctx_state;
1835 is_system = ctx->ctx_fl_system;
1836
1837 task = PFM_CTX_TASK(ctx);
6450578f 1838 regs = task_pt_regs(task);
1da177e4
LT
1839
1840 DPRINT(("ctx_state=%d is_current=%d\n",
1841 state,
1842 task == current ? 1 : 0));
1843
1844 /*
1845 * if state == UNLOADED, then task is NULL
1846 */
1847
1848 /*
1849 * we must stop and unload because we are losing access to the context.
1850 */
1851 if (task == current) {
1852#ifdef CONFIG_SMP
1853 /*
1854 * the task IS the owner but it migrated to another CPU: that's bad
1855 * but we must handle this cleanly. Unfortunately, the kernel does
1856 * not provide a mechanism to block migration (while the context is loaded).
1857 *
1858 * We need to release the resource on the ORIGINAL cpu.
1859 */
1860 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
1861
1862 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
1863 /*
1864 * keep context protected but unmask interrupt for IPI
1865 */
1866 local_irq_restore(flags);
1867
1868 pfm_syswide_cleanup_other_cpu(ctx);
1869
1870 /*
1871 * restore interrupt masking
1872 */
1873 local_irq_save(flags);
1874
1875 /*
1876 * context is unloaded at this point
1877 */
1878 } else
1879#endif /* CONFIG_SMP */
1880 {
1881
1882 DPRINT(("forcing unload\n"));
1883 /*
1884 * stop and unload, returning with state UNLOADED
1885 * and session unreserved.
1886 */
1887 pfm_context_unload(ctx, NULL, 0, regs);
1888
1889 DPRINT(("ctx_state=%d\n", ctx->ctx_state));
1890 }
1891 }
1892
1893 /*
1894 * remove virtual mapping, if any, for the calling task.
1895 * cannot reset ctx field until last user is calling close().
1896 *
1897 * ctx_smpl_vaddr must never be cleared because it is needed
1898 * by every task with access to the context
1899 *
1900 * When called from do_exit(), the mm context is gone already, therefore
1901 * mm is NULL, i.e., the VMA is already gone and we do not have to
1902 * do anything here
1903 */
1904 if (ctx->ctx_smpl_vaddr && current->mm) {
1905 smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
1906 smpl_buf_size = ctx->ctx_smpl_size;
1907 }
1908
1909 UNPROTECT_CTX(ctx, flags);
1910
1911 /*
1912 * if there was a mapping, then we systematically remove it
1913 * at this point. Cannot be done inside critical section
1914 * because some VM function reenables interrupts.
1915 *
1916 */
1917 if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size);
1918
1919 return 0;
1920}
1921/*
1922 * called either on explicit close() or from exit_files().
1923 * Only the LAST user of the file gets to this point, i.e., it is
1924 * called only ONCE.
1925 *
1926 * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
1927 * (fput()),i.e, last task to access the file. Nobody else can access the
1928 * file at this point.
1929 *
1930 * When called from exit_files(), the VMA has been freed because exit_mm()
1931 * is executed before exit_files().
1932 *
1933 * When called from exit_files(), the current task is not yet ZOMBIE but we
1934 * flush the PMU state to the context.
1935 */
1936static int
1937pfm_close(struct inode *inode, struct file *filp)
1938{
1939 pfm_context_t *ctx;
1940 struct task_struct *task;
1941 struct pt_regs *regs;
1942 DECLARE_WAITQUEUE(wait, current);
1943 unsigned long flags;
1944 unsigned long smpl_buf_size = 0UL;
1945 void *smpl_buf_addr = NULL;
1946 int free_possible = 1;
1947 int state, is_system;
1948
1949 DPRINT(("pfm_close called private=%p\n", filp->private_data));
1950
1951 if (PFM_IS_FILE(filp) == 0) {
1952 DPRINT(("bad magic\n"));
1953 return -EBADF;
1954 }
1955
1956 ctx = (pfm_context_t *)filp->private_data;
1957 if (ctx == NULL) {
19c5870c 1958 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
1da177e4
LT
1959 return -EBADF;
1960 }
1961
1962 PROTECT_CTX(ctx, flags);
1963
1964 state = ctx->ctx_state;
1965 is_system = ctx->ctx_fl_system;
1966
1967 task = PFM_CTX_TASK(ctx);
6450578f 1968 regs = task_pt_regs(task);
1da177e4
LT
1969
1970 DPRINT(("ctx_state=%d is_current=%d\n",
1971 state,
1972 task == current ? 1 : 0));
1973
1974 /*
1975 * if task == current, then pfm_flush() unloaded the context
1976 */
1977 if (state == PFM_CTX_UNLOADED) goto doit;
1978
1979 /*
1980 * context is loaded/masked and task != current, we need to
1981 * either force an unload or go zombie
1982 */
1983
1984 /*
1985 * The task is currently blocked or will block after an overflow.
1986 * we must force it to wakeup to get out of the
1987 * MASKED state and transition to the unloaded state by itself.
1988 *
1989 * This situation is only possible for per-task mode
1990 */
1991 if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
1992
1993 /*
1994 * set a "partial" zombie state to be checked
1995 * upon return from down() in pfm_handle_work().
1996 *
1997 * We cannot use the ZOMBIE state, because it is checked
1998 * by pfm_load_regs() which is called upon wakeup from down().
1999 * In such case, it would free the context and then we would
2000 * return to pfm_handle_work() which would access the
2001 * stale context. Instead, we set a flag invisible to pfm_load_regs()
2002 * but visible to pfm_handle_work().
2003 *
2004 * For some window of time, we have a zombie context with
2005 * ctx_state = MASKED and not ZOMBIE
2006 */
2007 ctx->ctx_fl_going_zombie = 1;
2008
2009 /*
2010 * force task to wake up from MASKED state
2011 */
60f1c444 2012 complete(&ctx->ctx_restart_done);
1da177e4
LT
2013
2014 DPRINT(("waking up ctx_state=%d\n", state));
2015
2016 /*
2017 * put ourself to sleep waiting for the other
2018 * task to report completion
2019 *
2020 * the context is protected by mutex, therefore there
2021 * is no risk of being notified of completion before
2022 * begin actually on the waitq.
2023 */
2024 set_current_state(TASK_INTERRUPTIBLE);
2025 add_wait_queue(&ctx->ctx_zombieq, &wait);
2026
2027 UNPROTECT_CTX(ctx, flags);
2028
2029 /*
2030 * XXX: check for signals :
2031 * - ok for explicit close
2032 * - not ok when coming from exit_files()
2033 */
2034 schedule();
2035
2036
2037 PROTECT_CTX(ctx, flags);
2038
2039
2040 remove_wait_queue(&ctx->ctx_zombieq, &wait);
2041 set_current_state(TASK_RUNNING);
2042
2043 /*
2044 * context is unloaded at this point
2045 */
2046 DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
2047 }
2048 else if (task != current) {
2049#ifdef CONFIG_SMP
2050 /*
2051 * switch context to zombie state
2052 */
2053 ctx->ctx_state = PFM_CTX_ZOMBIE;
2054
19c5870c 2055 DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
1da177e4
LT
2056 /*
2057 * cannot free the context on the spot. deferred until
2058 * the task notices the ZOMBIE state
2059 */
2060 free_possible = 0;
2061#else
2062 pfm_context_unload(ctx, NULL, 0, regs);
2063#endif
2064 }
2065
2066doit:
2067 /* reload state, may have changed during opening of critical section */
2068 state = ctx->ctx_state;
2069
2070 /*
2071 * the context is still attached to a task (possibly current)
2072 * we cannot destroy it right now
2073 */
2074
2075 /*
2076 * we must free the sampling buffer right here because
2077 * we cannot rely on it being cleaned up later by the
2078 * monitored task. It is not possible to free vmalloc'ed
2079 * memory in pfm_load_regs(). Instead, we remove the buffer
2080 * now. should there be subsequent PMU overflow originally
2081 * meant for sampling, the will be converted to spurious
2082 * and that's fine because the monitoring tools is gone anyway.
2083 */
2084 if (ctx->ctx_smpl_hdr) {
2085 smpl_buf_addr = ctx->ctx_smpl_hdr;
2086 smpl_buf_size = ctx->ctx_smpl_size;
2087 /* no more sampling */
2088 ctx->ctx_smpl_hdr = NULL;
2089 ctx->ctx_fl_is_sampling = 0;
2090 }
2091
2092 DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
2093 state,
2094 free_possible,
2095 smpl_buf_addr,
2096 smpl_buf_size));
2097
2098 if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
2099
2100 /*
2101 * UNLOADED that the session has already been unreserved.
2102 */
2103 if (state == PFM_CTX_ZOMBIE) {
2104 pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
2105 }
2106
2107 /*
2108 * disconnect file descriptor from context must be done
2109 * before we unlock.
2110 */
2111 filp->private_data = NULL;
2112
2113 /*
72fdbdce 2114 * if we free on the spot, the context is now completely unreachable
1da177e4
LT
2115 * from the callers side. The monitored task side is also cut, so we
2116 * can freely cut.
2117 *
2118 * If we have a deferred free, only the caller side is disconnected.
2119 */
2120 UNPROTECT_CTX(ctx, flags);
2121
2122 /*
2123 * All memory free operations (especially for vmalloc'ed memory)
2124 * MUST be done with interrupts ENABLED.
2125 */
2126 if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
2127
2128 /*
2129 * return the memory used by the context
2130 */
2131 if (free_possible) pfm_context_free(ctx);
2132
2133 return 0;
2134}
2135
2136static int
2137pfm_no_open(struct inode *irrelevant, struct file *dontcare)
2138{
2139 DPRINT(("pfm_no_open called\n"));
2140 return -ENXIO;
2141}
2142
2143
2144
5dfe4c96 2145static const struct file_operations pfm_file_ops = {
1da177e4
LT
2146 .llseek = no_llseek,
2147 .read = pfm_read,
2148 .write = pfm_write,
2149 .poll = pfm_poll,
2150 .ioctl = pfm_ioctl,
2151 .open = pfm_no_open, /* special open code to disallow open via /proc */
2152 .fasync = pfm_fasync,
2153 .release = pfm_close,
2154 .flush = pfm_flush
2155};
2156
2157static int
2158pfmfs_delete_dentry(struct dentry *dentry)
2159{
2160 return 1;
2161}
2162
2163static struct dentry_operations pfmfs_dentry_operations = {
2164 .d_delete = pfmfs_delete_dentry,
2165};
2166
2167
2168static int
2169pfm_alloc_fd(struct file **cfile)
2170{
2171 int fd, ret = 0;
2172 struct file *file = NULL;
2173 struct inode * inode;
2174 char name[32];
2175 struct qstr this;
2176
2177 fd = get_unused_fd();
2178 if (fd < 0) return -ENFILE;
2179
2180 ret = -ENFILE;
2181
2182 file = get_empty_filp();
2183 if (!file) goto out;
2184
2185 /*
2186 * allocate a new inode
2187 */
2188 inode = new_inode(pfmfs_mnt->mnt_sb);
2189 if (!inode) goto out;
2190
2191 DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
2192
2193 inode->i_mode = S_IFCHR|S_IRUGO;
2194 inode->i_uid = current->fsuid;
2195 inode->i_gid = current->fsgid;
2196
2197 sprintf(name, "[%lu]", inode->i_ino);
2198 this.name = name;
2199 this.len = strlen(name);
2200 this.hash = inode->i_ino;
2201
2202 ret = -ENOMEM;
2203
2204 /*
2205 * allocate a new dcache entry
2206 */
b66ffad9
JS
2207 file->f_path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
2208 if (!file->f_path.dentry) goto out;
1da177e4 2209
b66ffad9 2210 file->f_path.dentry->d_op = &pfmfs_dentry_operations;
1da177e4 2211
b66ffad9
JS
2212 d_add(file->f_path.dentry, inode);
2213 file->f_path.mnt = mntget(pfmfs_mnt);
1da177e4
LT
2214 file->f_mapping = inode->i_mapping;
2215
2216 file->f_op = &pfm_file_ops;
2217 file->f_mode = FMODE_READ;
2218 file->f_flags = O_RDONLY;
2219 file->f_pos = 0;
2220
2221 /*
2222 * may have to delay until context is attached?
2223 */
2224 fd_install(fd, file);
2225
2226 /*
2227 * the file structure we will use
2228 */
2229 *cfile = file;
2230
2231 return fd;
2232out:
2233 if (file) put_filp(file);
2234 put_unused_fd(fd);
2235 return ret;
2236}
2237
2238static void
2239pfm_free_fd(int fd, struct file *file)
2240{
2241 struct files_struct *files = current->files;
4fb3a538 2242 struct fdtable *fdt;
1da177e4
LT
2243
2244 /*
2245 * there ie no fd_uninstall(), so we do it here
2246 */
2247 spin_lock(&files->file_lock);
4fb3a538 2248 fdt = files_fdtable(files);
badf1662 2249 rcu_assign_pointer(fdt->fd[fd], NULL);
1da177e4
LT
2250 spin_unlock(&files->file_lock);
2251
badf1662
DS
2252 if (file)
2253 put_filp(file);
1da177e4
LT
2254 put_unused_fd(fd);
2255}
2256
2257static int
2258pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
2259{
2260 DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
2261
2262 while (size > 0) {
2263 unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
2264
2265
2266 if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
2267 return -ENOMEM;
2268
2269 addr += PAGE_SIZE;
2270 buf += PAGE_SIZE;
2271 size -= PAGE_SIZE;
2272 }
2273 return 0;
2274}
2275
2276/*
2277 * allocate a sampling buffer and remaps it into the user address space of the task
2278 */
2279static int
41d5e5d7 2280pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
1da177e4
LT
2281{
2282 struct mm_struct *mm = task->mm;
2283 struct vm_area_struct *vma = NULL;
2284 unsigned long size;
2285 void *smpl_buf;
2286
2287
2288 /*
2289 * the fixed header + requested size and align to page boundary
2290 */
2291 size = PAGE_ALIGN(rsize);
2292
2293 DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
2294
2295 /*
2296 * check requested size to avoid Denial-of-service attacks
2297 * XXX: may have to refine this test
2298 * Check against address space limit.
2299 *
2300 * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
2301 * return -ENOMEM;
2302 */
2303 if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
2304 return -ENOMEM;
2305
2306 /*
2307 * We do the easy to undo allocations first.
2308 *
2309 * pfm_rvmalloc(), clears the buffer, so there is no leak
2310 */
2311 smpl_buf = pfm_rvmalloc(size);
2312 if (smpl_buf == NULL) {
2313 DPRINT(("Can't allocate sampling buffer\n"));
2314 return -ENOMEM;
2315 }
2316
2317 DPRINT(("smpl_buf @%p\n", smpl_buf));
2318
2319 /* allocate vma */
c3762229 2320 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1da177e4
LT
2321 if (!vma) {
2322 DPRINT(("Cannot allocate vma\n"));
2323 goto error_kmem;
2324 }
1da177e4
LT
2325
2326 /*
2327 * partially initialize the vma for the sampling buffer
2328 */
2329 vma->vm_mm = mm;
41d5e5d7 2330 vma->vm_file = filp;
1da177e4
LT
2331 vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
2332 vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
2333
2334 /*
2335 * Now we have everything we need and we can initialize
2336 * and connect all the data structures
2337 */
2338
2339 ctx->ctx_smpl_hdr = smpl_buf;
2340 ctx->ctx_smpl_size = size; /* aligned size */
2341
2342 /*
2343 * Let's do the difficult operations next.
2344 *
2345 * now we atomically find some area in the address space and
2346 * remap the buffer in it.
2347 */
2348 down_write(&task->mm->mmap_sem);
2349
2350 /* find some free area in address space, must have mmap sem held */
2351 vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0);
2352 if (vma->vm_start == 0UL) {
2353 DPRINT(("Cannot find unmapped area for size %ld\n", size));
2354 up_write(&task->mm->mmap_sem);
2355 goto error;
2356 }
2357 vma->vm_end = vma->vm_start + size;
2358 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2359
2360 DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
2361
2362 /* can only be applied to current task, need to have the mm semaphore held when called */
2363 if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
2364 DPRINT(("Can't remap buffer\n"));
2365 up_write(&task->mm->mmap_sem);
2366 goto error;
2367 }
2368
41d5e5d7
NP
2369 get_file(filp);
2370
1da177e4
LT
2371 /*
2372 * now insert the vma in the vm list for the process, must be
2373 * done with mmap lock held
2374 */
2375 insert_vm_struct(mm, vma);
2376
2377 mm->total_vm += size >> PAGE_SHIFT;
ab50b8ed
HD
2378 vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
2379 vma_pages(vma));
1da177e4
LT
2380 up_write(&task->mm->mmap_sem);
2381
2382 /*
2383 * keep track of user level virtual address
2384 */
2385 ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
2386 *(unsigned long *)user_vaddr = vma->vm_start;
2387
2388 return 0;
2389
2390error:
2391 kmem_cache_free(vm_area_cachep, vma);
2392error_kmem:
2393 pfm_rvfree(smpl_buf, size);
2394
2395 return -ENOMEM;
2396}
2397
2398/*
2399 * XXX: do something better here
2400 */
2401static int
2402pfm_bad_permissions(struct task_struct *task)
2403{
2404 /* inspired by ptrace_attach() */
2405 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
2406 current->uid,
2407 current->gid,
2408 task->euid,
2409 task->suid,
2410 task->uid,
2411 task->egid,
2412 task->sgid));
2413
2414 return ((current->uid != task->euid)
2415 || (current->uid != task->suid)
2416 || (current->uid != task->uid)
2417 || (current->gid != task->egid)
2418 || (current->gid != task->sgid)
2419 || (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE);
2420}
2421
2422static int
2423pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
2424{
2425 int ctx_flags;
2426
2427 /* valid signal */
2428
2429 ctx_flags = pfx->ctx_flags;
2430
2431 if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
2432
2433 /*
2434 * cannot block in this mode
2435 */
2436 if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
2437 DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
2438 return -EINVAL;
2439 }
2440 } else {
2441 }
2442 /* probably more to add here */
2443
2444 return 0;
2445}
2446
2447static int
41d5e5d7 2448pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
1da177e4
LT
2449 unsigned int cpu, pfarg_context_t *arg)
2450{
2451 pfm_buffer_fmt_t *fmt = NULL;
2452 unsigned long size = 0UL;
2453 void *uaddr = NULL;
2454 void *fmt_arg = NULL;
2455 int ret = 0;
2456#define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
2457
2458 /* invoke and lock buffer format, if found */
2459 fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
2460 if (fmt == NULL) {
19c5870c 2461 DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
1da177e4
LT
2462 return -EINVAL;
2463 }
2464
2465 /*
2466 * buffer argument MUST be contiguous to pfarg_context_t
2467 */
2468 if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
2469
2470 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
2471
19c5870c 2472 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
1da177e4
LT
2473
2474 if (ret) goto error;
2475
2476 /* link buffer format and context */
2477 ctx->ctx_buf_fmt = fmt;
2478
2479 /*
2480 * check if buffer format wants to use perfmon buffer allocation/mapping service
2481 */
2482 ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
2483 if (ret) goto error;
2484
2485 if (size) {
2486 /*
2487 * buffer is always remapped into the caller's address space
2488 */
41d5e5d7 2489 ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
1da177e4
LT
2490 if (ret) goto error;
2491
2492 /* keep track of user address of buffer */
2493 arg->ctx_smpl_vaddr = uaddr;
2494 }
2495 ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
2496
2497error:
2498 return ret;
2499}
2500
2501static void
2502pfm_reset_pmu_state(pfm_context_t *ctx)
2503{
2504 int i;
2505
2506 /*
2507 * install reset values for PMC.
2508 */
2509 for (i=1; PMC_IS_LAST(i) == 0; i++) {
2510 if (PMC_IS_IMPL(i) == 0) continue;
2511 ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
2512 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
2513 }
2514 /*
2515 * PMD registers are set to 0UL when the context in memset()
2516 */
2517
2518 /*
2519 * On context switched restore, we must restore ALL pmc and ALL pmd even
2520 * when they are not actively used by the task. In UP, the incoming process
2521 * may otherwise pick up left over PMC, PMD state from the previous process.
2522 * As opposed to PMD, stale PMC can cause harm to the incoming
2523 * process because they may change what is being measured.
2524 * Therefore, we must systematically reinstall the entire
2525 * PMC state. In SMP, the same thing is possible on the
2526 * same CPU but also on between 2 CPUs.
2527 *
2528 * The problem with PMD is information leaking especially
2529 * to user level when psr.sp=0
2530 *
2531 * There is unfortunately no easy way to avoid this problem
2532 * on either UP or SMP. This definitively slows down the
2533 * pfm_load_regs() function.
2534 */
2535
2536 /*
2537 * bitmask of all PMCs accessible to this context
2538 *
2539 * PMC0 is treated differently.
2540 */
2541 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
2542
2543 /*
72fdbdce 2544 * bitmask of all PMDs that are accessible to this context
1da177e4
LT
2545 */
2546 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
2547
2548 DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
2549
2550 /*
2551 * useful in case of re-enable after disable
2552 */
2553 ctx->ctx_used_ibrs[0] = 0UL;
2554 ctx->ctx_used_dbrs[0] = 0UL;
2555}
2556
2557static int
2558pfm_ctx_getsize(void *arg, size_t *sz)
2559{
2560 pfarg_context_t *req = (pfarg_context_t *)arg;
2561 pfm_buffer_fmt_t *fmt;
2562
2563 *sz = 0;
2564
2565 if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
2566
2567 fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
2568 if (fmt == NULL) {
2569 DPRINT(("cannot find buffer format\n"));
2570 return -EINVAL;
2571 }
2572 /* get just enough to copy in user parameters */
2573 *sz = fmt->fmt_arg_size;
2574 DPRINT(("arg_size=%lu\n", *sz));
2575
2576 return 0;
2577}
2578
2579
2580
2581/*
2582 * cannot attach if :
2583 * - kernel task
2584 * - task not owned by caller
2585 * - task incompatible with context mode
2586 */
2587static int
2588pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2589{
2590 /*
2591 * no kernel task or task not owner by caller
2592 */
2593 if (task->mm == NULL) {
19c5870c 2594 DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
1da177e4
LT
2595 return -EPERM;
2596 }
2597 if (pfm_bad_permissions(task)) {
19c5870c 2598 DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
1da177e4
LT
2599 return -EPERM;
2600 }
2601 /*
2602 * cannot block in self-monitoring mode
2603 */
2604 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
19c5870c 2605 DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
1da177e4
LT
2606 return -EINVAL;
2607 }
2608
2609 if (task->exit_state == EXIT_ZOMBIE) {
19c5870c 2610 DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
1da177e4
LT
2611 return -EBUSY;
2612 }
2613
2614 /*
2615 * always ok for self
2616 */
2617 if (task == current) return 0;
2618
21498223 2619 if (!task_is_stopped_or_traced(task)) {
19c5870c 2620 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
1da177e4
LT
2621 return -EBUSY;
2622 }
2623 /*
2624 * make sure the task is off any CPU
2625 */
2626 wait_task_inactive(task);
2627
2628 /* more to come... */
2629
2630 return 0;
2631}
2632
2633static int
2634pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
2635{
2636 struct task_struct *p = current;
2637 int ret;
2638
2639 /* XXX: need to add more checks here */
2640 if (pid < 2) return -EPERM;
2641
e1b0d4ba 2642 if (pid != task_pid_vnr(current)) {
1da177e4
LT
2643
2644 read_lock(&tasklist_lock);
2645
e1b0d4ba 2646 p = find_task_by_vpid(pid);
1da177e4
LT
2647
2648 /* make sure task cannot go away while we operate on it */
2649 if (p) get_task_struct(p);
2650
2651 read_unlock(&tasklist_lock);
2652
2653 if (p == NULL) return -ESRCH;
2654 }
2655
2656 ret = pfm_task_incompatible(ctx, p);
2657 if (ret == 0) {
2658 *task = p;
2659 } else if (p != current) {
2660 pfm_put_task(p);
2661 }
2662 return ret;
2663}
2664
2665
2666
2667static int
2668pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2669{
2670 pfarg_context_t *req = (pfarg_context_t *)arg;
2671 struct file *filp;
2672 int ctx_flags;
2673 int ret;
2674
2675 /* let's check the arguments first */
2676 ret = pfarg_is_sane(current, req);
2677 if (ret < 0) return ret;
2678
2679 ctx_flags = req->ctx_flags;
2680
2681 ret = -ENOMEM;
2682
2683 ctx = pfm_context_alloc();
2684 if (!ctx) goto error;
2685
2686 ret = pfm_alloc_fd(&filp);
2687 if (ret < 0) goto error_file;
2688
2689 req->ctx_fd = ctx->ctx_fd = ret;
2690
2691 /*
2692 * attach context to file
2693 */
2694 filp->private_data = ctx;
2695
2696 /*
2697 * does the user want to sample?
2698 */
2699 if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
41d5e5d7 2700 ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
1da177e4
LT
2701 if (ret) goto buffer_error;
2702 }
2703
2704 /*
2705 * init context protection lock
2706 */
2707 spin_lock_init(&ctx->ctx_lock);
2708
2709 /*
2710 * context is unloaded
2711 */
2712 ctx->ctx_state = PFM_CTX_UNLOADED;
2713
2714 /*
2715 * initialization of context's flags
2716 */
2717 ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
2718 ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
2719 ctx->ctx_fl_is_sampling = ctx->ctx_buf_fmt ? 1 : 0; /* assume record() is defined */
2720 ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
2721 /*
2722 * will move to set properties
2723 * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
2724 */
2725
2726 /*
2727 * init restart semaphore to locked
2728 */
60f1c444 2729 init_completion(&ctx->ctx_restart_done);
1da177e4
LT
2730
2731 /*
2732 * activation is used in SMP only
2733 */
2734 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
2735 SET_LAST_CPU(ctx, -1);
2736
2737 /*
2738 * initialize notification message queue
2739 */
2740 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
2741 init_waitqueue_head(&ctx->ctx_msgq_wait);
2742 init_waitqueue_head(&ctx->ctx_zombieq);
2743
2744 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n",
2745 ctx,
2746 ctx_flags,
2747 ctx->ctx_fl_system,
2748 ctx->ctx_fl_block,
2749 ctx->ctx_fl_excl_idle,
2750 ctx->ctx_fl_no_msg,
2751 ctx->ctx_fd));
2752
2753 /*
2754 * initialize soft PMU state
2755 */
2756 pfm_reset_pmu_state(ctx);
2757
2758 return 0;
2759
2760buffer_error:
2761 pfm_free_fd(ctx->ctx_fd, filp);
2762
2763 if (ctx->ctx_buf_fmt) {
2764 pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
2765 }
2766error_file:
2767 pfm_context_free(ctx);
2768
2769error:
2770 return ret;
2771}
2772
2773static inline unsigned long
2774pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
2775{
2776 unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
2777 unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
2778 extern unsigned long carta_random32 (unsigned long seed);
2779
2780 if (reg->flags & PFM_REGFL_RANDOM) {
2781 new_seed = carta_random32(old_seed);
2782 val -= (old_seed & mask); /* counter values are negative numbers! */
2783 if ((mask >> 32) != 0)
2784 /* construct a full 64-bit random value: */
2785 new_seed |= carta_random32(old_seed >> 32) << 32;
2786 reg->seed = new_seed;
2787 }
2788 reg->lval = val;
2789 return val;
2790}
2791
2792static void
2793pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2794{
2795 unsigned long mask = ovfl_regs[0];
2796 unsigned long reset_others = 0UL;
2797 unsigned long val;
2798 int i;
2799
2800 /*
2801 * now restore reset value on sampling overflowed counters
2802 */
2803 mask >>= PMU_FIRST_COUNTER;
2804 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2805
2806 if ((mask & 0x1UL) == 0UL) continue;
2807
2808 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2809 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2810
2811 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2812 }
2813
2814 /*
2815 * Now take care of resetting the other registers
2816 */
2817 for(i = 0; reset_others; i++, reset_others >>= 1) {
2818
2819 if ((reset_others & 0x1) == 0) continue;
2820
2821 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2822
2823 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2824 is_long_reset ? "long" : "short", i, val));
2825 }
2826}
2827
2828static void
2829pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2830{
2831 unsigned long mask = ovfl_regs[0];
2832 unsigned long reset_others = 0UL;
2833 unsigned long val;
2834 int i;
2835
2836 DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
2837
2838 if (ctx->ctx_state == PFM_CTX_MASKED) {
2839 pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
2840 return;
2841 }
2842
2843 /*
2844 * now restore reset value on sampling overflowed counters
2845 */
2846 mask >>= PMU_FIRST_COUNTER;
2847 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2848
2849 if ((mask & 0x1UL) == 0UL) continue;
2850
2851 val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2852 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2853
2854 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2855
2856 pfm_write_soft_counter(ctx, i, val);
2857 }
2858
2859 /*
2860 * Now take care of resetting the other registers
2861 */
2862 for(i = 0; reset_others; i++, reset_others >>= 1) {
2863
2864 if ((reset_others & 0x1) == 0) continue;
2865
2866 val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2867
2868 if (PMD_IS_COUNTING(i)) {
2869 pfm_write_soft_counter(ctx, i, val);
2870 } else {
2871 ia64_set_pmd(i, val);
2872 }
2873 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2874 is_long_reset ? "long" : "short", i, val));
2875 }
2876 ia64_srlz_d();
2877}
2878
2879static int
2880pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2881{
1da177e4
LT
2882 struct task_struct *task;
2883 pfarg_reg_t *req = (pfarg_reg_t *)arg;
2884 unsigned long value, pmc_pm;
2885 unsigned long smpl_pmds, reset_pmds, impl_pmds;
2886 unsigned int cnum, reg_flags, flags, pmc_type;
2887 int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
2888 int is_monitor, is_counting, state;
2889 int ret = -EINVAL;
2890 pfm_reg_check_t wr_func;
2891#define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
2892
2893 state = ctx->ctx_state;
2894 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
2895 is_system = ctx->ctx_fl_system;
2896 task = ctx->ctx_task;
2897 impl_pmds = pmu_conf->impl_pmds[0];
2898
2899 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
2900
2901 if (is_loaded) {
1da177e4
LT
2902 /*
2903 * In system wide and when the context is loaded, access can only happen
2904 * when the caller is running on the CPU being monitored by the session.
2905 * It does not have to be the owner (ctx_task) of the context per se.
2906 */
2907 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
2908 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
2909 return -EBUSY;
2910 }
2911 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
2912 }
2913 expert_mode = pfm_sysctl.expert_mode;
2914
2915 for (i = 0; i < count; i++, req++) {
2916
2917 cnum = req->reg_num;
2918 reg_flags = req->reg_flags;
2919 value = req->reg_value;
2920 smpl_pmds = req->reg_smpl_pmds[0];
2921 reset_pmds = req->reg_reset_pmds[0];
2922 flags = 0;
2923
2924
2925 if (cnum >= PMU_MAX_PMCS) {
2926 DPRINT(("pmc%u is invalid\n", cnum));
2927 goto error;
2928 }
2929
2930 pmc_type = pmu_conf->pmc_desc[cnum].type;
2931 pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
2932 is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
2933 is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
2934
2935 /*
2936 * we reject all non implemented PMC as well
2937 * as attempts to modify PMC[0-3] which are used
2938 * as status registers by the PMU
2939 */
2940 if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
2941 DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
2942 goto error;
2943 }
2944 wr_func = pmu_conf->pmc_desc[cnum].write_check;
2945 /*
2946 * If the PMC is a monitor, then if the value is not the default:
2947 * - system-wide session: PMCx.pm=1 (privileged monitor)
2948 * - per-task : PMCx.pm=0 (user monitor)
2949 */
2950 if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
2951 DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
2952 cnum,
2953 pmc_pm,
2954 is_system));
2955 goto error;
2956 }
2957
2958 if (is_counting) {
2959 /*
2960 * enforce generation of overflow interrupt. Necessary on all
2961 * CPUs.
2962 */
2963 value |= 1 << PMU_PMC_OI;
2964
2965 if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
2966 flags |= PFM_REGFL_OVFL_NOTIFY;
2967 }
2968
2969 if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
2970
2971 /* verify validity of smpl_pmds */
2972 if ((smpl_pmds & impl_pmds) != smpl_pmds) {
2973 DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
2974 goto error;
2975 }
2976
2977 /* verify validity of reset_pmds */
2978 if ((reset_pmds & impl_pmds) != reset_pmds) {
2979 DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
2980 goto error;
2981 }
2982 } else {
2983 if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
2984 DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
2985 goto error;
2986 }
2987 /* eventid on non-counting monitors are ignored */
2988 }
2989
2990 /*
2991 * execute write checker, if any
2992 */
2993 if (likely(expert_mode == 0 && wr_func)) {
2994 ret = (*wr_func)(task, ctx, cnum, &value, regs);
2995 if (ret) goto error;
2996 ret = -EINVAL;
2997 }
2998
2999 /*
3000 * no error on this register
3001 */
3002 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
3003
3004 /*
3005 * Now we commit the changes to the software state
3006 */
3007
3008 /*
3009 * update overflow information
3010 */
3011 if (is_counting) {
3012 /*
3013 * full flag update each time a register is programmed
3014 */
3015 ctx->ctx_pmds[cnum].flags = flags;
3016
3017 ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
3018 ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
3019 ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
3020
3021 /*
3022 * Mark all PMDS to be accessed as used.
3023 *
3024 * We do not keep track of PMC because we have to
3025 * systematically restore ALL of them.
3026 *
3027 * We do not update the used_monitors mask, because
3028 * if we have not programmed them, then will be in
3029 * a quiescent state, therefore we will not need to
3030 * mask/restore then when context is MASKED.
3031 */
3032 CTX_USED_PMD(ctx, reset_pmds);
3033 CTX_USED_PMD(ctx, smpl_pmds);
3034 /*
3035 * make sure we do not try to reset on
3036 * restart because we have established new values
3037 */
3038 if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3039 }
3040 /*
3041 * Needed in case the user does not initialize the equivalent
3042 * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no
3043 * possible leak here.
3044 */
3045 CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
3046
3047 /*
3048 * keep track of the monitor PMC that we are using.
3049 * we save the value of the pmc in ctx_pmcs[] and if
3050 * the monitoring is not stopped for the context we also
3051 * place it in the saved state area so that it will be
3052 * picked up later by the context switch code.
3053 *
3054 * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
3055 *
35589a8f 3056 * The value in th_pmcs[] may be modified on overflow, i.e., when
1da177e4
LT
3057 * monitoring needs to be stopped.
3058 */
3059 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
3060
3061 /*
3062 * update context state
3063 */
3064 ctx->ctx_pmcs[cnum] = value;
3065
3066 if (is_loaded) {
3067 /*
3068 * write thread state
3069 */
35589a8f 3070 if (is_system == 0) ctx->th_pmcs[cnum] = value;
1da177e4
LT
3071
3072 /*
3073 * write hardware register if we can
3074 */
3075 if (can_access_pmu) {
3076 ia64_set_pmc(cnum, value);
3077 }
3078#ifdef CONFIG_SMP
3079 else {
3080 /*
3081 * per-task SMP only here
3082 *
3083 * we are guaranteed that the task is not running on the other CPU,
3084 * we indicate that this PMD will need to be reloaded if the task
3085 * is rescheduled on the CPU it ran last on.
3086 */
3087 ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
3088 }
3089#endif
3090 }
3091
3092 DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
3093 cnum,
3094 value,
3095 is_loaded,
3096 can_access_pmu,
3097 flags,
3098 ctx->ctx_all_pmcs[0],
3099 ctx->ctx_used_pmds[0],
3100 ctx->ctx_pmds[cnum].eventid,
3101 smpl_pmds,
3102 reset_pmds,
3103 ctx->ctx_reload_pmcs[0],
3104 ctx->ctx_used_monitors[0],
3105 ctx->ctx_ovfl_regs[0]));
3106 }
3107
3108 /*
3109 * make sure the changes are visible
3110 */
3111 if (can_access_pmu) ia64_srlz_d();
3112
3113 return 0;
3114error:
3115 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3116 return ret;
3117}
3118
3119static int
3120pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3121{
1da177e4
LT
3122 struct task_struct *task;
3123 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3124 unsigned long value, hw_value, ovfl_mask;
3125 unsigned int cnum;
3126 int i, can_access_pmu = 0, state;
3127 int is_counting, is_loaded, is_system, expert_mode;
3128 int ret = -EINVAL;
3129 pfm_reg_check_t wr_func;
3130
3131
3132 state = ctx->ctx_state;
3133 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3134 is_system = ctx->ctx_fl_system;
3135 ovfl_mask = pmu_conf->ovfl_val;
3136 task = ctx->ctx_task;
3137
3138 if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
3139
3140 /*
3141 * on both UP and SMP, we can only write to the PMC when the task is
3142 * the owner of the local PMU.
3143 */
3144 if (likely(is_loaded)) {
1da177e4
LT
3145 /*
3146 * In system wide and when the context is loaded, access can only happen
3147 * when the caller is running on the CPU being monitored by the session.
3148 * It does not have to be the owner (ctx_task) of the context per se.
3149 */
3150 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3151 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3152 return -EBUSY;
3153 }
3154 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3155 }
3156 expert_mode = pfm_sysctl.expert_mode;
3157
3158 for (i = 0; i < count; i++, req++) {
3159
3160 cnum = req->reg_num;
3161 value = req->reg_value;
3162
3163 if (!PMD_IS_IMPL(cnum)) {
3164 DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
3165 goto abort_mission;
3166 }
3167 is_counting = PMD_IS_COUNTING(cnum);
3168 wr_func = pmu_conf->pmd_desc[cnum].write_check;
3169
3170 /*
3171 * execute write checker, if any
3172 */
3173 if (unlikely(expert_mode == 0 && wr_func)) {
3174 unsigned long v = value;
3175
3176 ret = (*wr_func)(task, ctx, cnum, &v, regs);
3177 if (ret) goto abort_mission;
3178
3179 value = v;
3180 ret = -EINVAL;
3181 }
3182
3183 /*
3184 * no error on this register
3185 */
3186 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
3187
3188 /*
3189 * now commit changes to software state
3190 */
3191 hw_value = value;
3192
3193 /*
3194 * update virtualized (64bits) counter
3195 */
3196 if (is_counting) {
3197 /*
3198 * write context state
3199 */
3200 ctx->ctx_pmds[cnum].lval = value;
3201
3202 /*
3203 * when context is load we use the split value
3204 */
3205 if (is_loaded) {
3206 hw_value = value & ovfl_mask;
3207 value = value & ~ovfl_mask;
3208 }
3209 }
3210 /*
3211 * update reset values (not just for counters)
3212 */
3213 ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
3214 ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
3215
3216 /*
3217 * update randomization parameters (not just for counters)
3218 */
3219 ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
3220 ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
3221
3222 /*
3223 * update context value
3224 */
3225 ctx->ctx_pmds[cnum].val = value;
3226
3227 /*
3228 * Keep track of what we use
3229 *
3230 * We do not keep track of PMC because we have to
3231 * systematically restore ALL of them.
3232 */
3233 CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
3234
3235 /*
3236 * mark this PMD register used as well
3237 */
3238 CTX_USED_PMD(ctx, RDEP(cnum));
3239
3240 /*
3241 * make sure we do not try to reset on
3242 * restart because we have established new values
3243 */
3244 if (is_counting && state == PFM_CTX_MASKED) {
3245 ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3246 }
3247
3248 if (is_loaded) {
3249 /*
3250 * write thread state
3251 */
35589a8f 3252 if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
1da177e4
LT
3253
3254 /*
3255 * write hardware register if we can
3256 */
3257 if (can_access_pmu) {
3258 ia64_set_pmd(cnum, hw_value);
3259 } else {
3260#ifdef CONFIG_SMP
3261 /*
3262 * we are guaranteed that the task is not running on the other CPU,
3263 * we indicate that this PMD will need to be reloaded if the task
3264 * is rescheduled on the CPU it ran last on.
3265 */
3266 ctx->ctx_reload_pmds[0] |= 1UL << cnum;
3267#endif
3268 }
3269 }
3270
3271 DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
3272 "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
3273 cnum,
3274 value,
3275 is_loaded,
3276 can_access_pmu,
3277 hw_value,
3278 ctx->ctx_pmds[cnum].val,
3279 ctx->ctx_pmds[cnum].short_reset,
3280 ctx->ctx_pmds[cnum].long_reset,
3281 PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
3282 ctx->ctx_pmds[cnum].seed,
3283 ctx->ctx_pmds[cnum].mask,
3284 ctx->ctx_used_pmds[0],
3285 ctx->ctx_pmds[cnum].reset_pmds[0],
3286 ctx->ctx_reload_pmds[0],
3287 ctx->ctx_all_pmds[0],
3288 ctx->ctx_ovfl_regs[0]));
3289 }
3290
3291 /*
3292 * make changes visible
3293 */
3294 if (can_access_pmu) ia64_srlz_d();
3295
3296 return 0;
3297
3298abort_mission:
3299 /*
3300 * for now, we have only one possibility for error
3301 */
3302 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3303 return ret;
3304}
3305
3306/*
3307 * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function.
3308 * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an
3309 * interrupt is delivered during the call, it will be kept pending until we leave, making
3310 * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are
3311 * guaranteed to return consistent data to the user, it may simply be old. It is not
3312 * trivial to treat the overflow while inside the call because you may end up in
3313 * some module sampling buffer code causing deadlocks.
3314 */
3315static int
3316pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3317{
1da177e4
LT
3318 struct task_struct *task;
3319 unsigned long val = 0UL, lval, ovfl_mask, sval;
3320 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3321 unsigned int cnum, reg_flags = 0;
3322 int i, can_access_pmu = 0, state;
3323 int is_loaded, is_system, is_counting, expert_mode;
3324 int ret = -EINVAL;
3325 pfm_reg_check_t rd_func;
3326
3327 /*
3328 * access is possible when loaded only for
3329 * self-monitoring tasks or in UP mode
3330 */
3331
3332 state = ctx->ctx_state;
3333 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3334 is_system = ctx->ctx_fl_system;
3335 ovfl_mask = pmu_conf->ovfl_val;
3336 task = ctx->ctx_task;
3337
3338 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3339
3340 if (likely(is_loaded)) {
1da177e4
LT
3341 /*
3342 * In system wide and when the context is loaded, access can only happen
3343 * when the caller is running on the CPU being monitored by the session.
3344 * It does not have to be the owner (ctx_task) of the context per se.
3345 */
3346 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3347 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3348 return -EBUSY;
3349 }
3350 /*
3351 * this can be true when not self-monitoring only in UP
3352 */
3353 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3354
3355 if (can_access_pmu) ia64_srlz_d();
3356 }
3357 expert_mode = pfm_sysctl.expert_mode;
3358
3359 DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
3360 is_loaded,
3361 can_access_pmu,
3362 state));
3363
3364 /*
3365 * on both UP and SMP, we can only read the PMD from the hardware register when
3366 * the task is the owner of the local PMU.
3367 */
3368
3369 for (i = 0; i < count; i++, req++) {
3370
3371 cnum = req->reg_num;
3372 reg_flags = req->reg_flags;
3373
3374 if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
3375 /*
3376 * we can only read the register that we use. That includes
72fdbdce 3377 * the one we explicitly initialize AND the one we want included
1da177e4
LT
3378 * in the sampling buffer (smpl_regs).
3379 *
3380 * Having this restriction allows optimization in the ctxsw routine
3381 * without compromising security (leaks)
3382 */
3383 if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
3384
3385 sval = ctx->ctx_pmds[cnum].val;
3386 lval = ctx->ctx_pmds[cnum].lval;
3387 is_counting = PMD_IS_COUNTING(cnum);
3388
3389 /*
3390 * If the task is not the current one, then we check if the
3391 * PMU state is still in the local live register due to lazy ctxsw.
3392 * If true, then we read directly from the registers.
3393 */
3394 if (can_access_pmu){
3395 val = ia64_get_pmd(cnum);
3396 } else {
3397 /*
3398 * context has been saved
3399 * if context is zombie, then task does not exist anymore.
3400 * In this case, we use the full value saved in the context (pfm_flush_regs()).
3401 */
35589a8f 3402 val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
1da177e4
LT
3403 }
3404 rd_func = pmu_conf->pmd_desc[cnum].read_check;
3405
3406 if (is_counting) {
3407 /*
3408 * XXX: need to check for overflow when loaded
3409 */
3410 val &= ovfl_mask;
3411 val += sval;
3412 }
3413
3414 /*
3415 * execute read checker, if any
3416 */
3417 if (unlikely(expert_mode == 0 && rd_func)) {
3418 unsigned long v = val;
3419 ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
3420 if (ret) goto error;
3421 val = v;
3422 ret = -EINVAL;
3423 }
3424
3425 PFM_REG_RETFLAG_SET(reg_flags, 0);
3426
3427 DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
3428
3429 /*
3430 * update register return value, abort all if problem during copy.
3431 * we only modify the reg_flags field. no check mode is fine because
3432 * access has been verified upfront in sys_perfmonctl().
3433 */
3434 req->reg_value = val;
3435 req->reg_flags = reg_flags;
3436 req->reg_last_reset_val = lval;
3437 }
3438
3439 return 0;
3440
3441error:
3442 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3443 return ret;
3444}
3445
3446int
3447pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3448{
3449 pfm_context_t *ctx;
3450
3451 if (req == NULL) return -EINVAL;
3452
3453 ctx = GET_PMU_CTX();
3454
3455 if (ctx == NULL) return -EINVAL;
3456
3457 /*
3458 * for now limit to current task, which is enough when calling
3459 * from overflow handler
3460 */
3461 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3462
3463 return pfm_write_pmcs(ctx, req, nreq, regs);
3464}
3465EXPORT_SYMBOL(pfm_mod_write_pmcs);
3466
3467int
3468pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3469{
3470 pfm_context_t *ctx;
3471
3472 if (req == NULL) return -EINVAL;
3473
3474 ctx = GET_PMU_CTX();
3475
3476 if (ctx == NULL) return -EINVAL;
3477
3478 /*
3479 * for now limit to current task, which is enough when calling
3480 * from overflow handler
3481 */
3482 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3483
3484 return pfm_read_pmds(ctx, req, nreq, regs);
3485}
3486EXPORT_SYMBOL(pfm_mod_read_pmds);
3487
3488/*
3489 * Only call this function when a process it trying to
3490 * write the debug registers (reading is always allowed)
3491 */
3492int
3493pfm_use_debug_registers(struct task_struct *task)
3494{
3495 pfm_context_t *ctx = task->thread.pfm_context;
3496 unsigned long flags;
3497 int ret = 0;
3498
3499 if (pmu_conf->use_rr_dbregs == 0) return 0;
3500
19c5870c 3501 DPRINT(("called for [%d]\n", task_pid_nr(task)));
1da177e4
LT
3502
3503 /*
3504 * do it only once
3505 */
3506 if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
3507
3508 /*
3509 * Even on SMP, we do not need to use an atomic here because
3510 * the only way in is via ptrace() and this is possible only when the
3511 * process is stopped. Even in the case where the ctxsw out is not totally
3512 * completed by the time we come here, there is no way the 'stopped' process
3513 * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine.
3514 * So this is always safe.
3515 */
3516 if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
3517
3518 LOCK_PFS(flags);
3519
3520 /*
3521 * We cannot allow setting breakpoints when system wide monitoring
3522 * sessions are using the debug registers.
3523 */
3524 if (pfm_sessions.pfs_sys_use_dbregs> 0)
3525 ret = -1;
3526 else
3527 pfm_sessions.pfs_ptrace_use_dbregs++;
3528
3529 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3530 pfm_sessions.pfs_ptrace_use_dbregs,
3531 pfm_sessions.pfs_sys_use_dbregs,
19c5870c 3532 task_pid_nr(task), ret));
1da177e4
LT
3533
3534 UNLOCK_PFS(flags);
3535
3536 return ret;
3537}
3538
3539/*
3540 * This function is called for every task that exits with the
3541 * IA64_THREAD_DBG_VALID set. This indicates a task which was
3542 * able to use the debug registers for debugging purposes via
3543 * ptrace(). Therefore we know it was not using them for
3544 * perfmormance monitoring, so we only decrement the number
3545 * of "ptraced" debug register users to keep the count up to date
3546 */
3547int
3548pfm_release_debug_registers(struct task_struct *task)
3549{
3550 unsigned long flags;
3551 int ret;
3552
3553 if (pmu_conf->use_rr_dbregs == 0) return 0;
3554
3555 LOCK_PFS(flags);
3556 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
19c5870c 3557 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
1da177e4
LT
3558 ret = -1;
3559 } else {
3560 pfm_sessions.pfs_ptrace_use_dbregs--;
3561 ret = 0;
3562 }
3563 UNLOCK_PFS(flags);
3564
3565 return ret;
3566}
3567
3568static int
3569pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3570{
3571 struct task_struct *task;
3572 pfm_buffer_fmt_t *fmt;
3573 pfm_ovfl_ctrl_t rst_ctrl;
3574 int state, is_system;
3575 int ret = 0;
3576
3577 state = ctx->ctx_state;
3578 fmt = ctx->ctx_buf_fmt;
3579 is_system = ctx->ctx_fl_system;
3580 task = PFM_CTX_TASK(ctx);
3581
3582 switch(state) {
3583 case PFM_CTX_MASKED:
3584 break;
3585 case PFM_CTX_LOADED:
3586 if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
3587 /* fall through */
3588 case PFM_CTX_UNLOADED:
3589 case PFM_CTX_ZOMBIE:
3590 DPRINT(("invalid state=%d\n", state));
3591 return -EBUSY;
3592 default:
3593 DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
3594 return -EINVAL;
3595 }
3596
3597 /*
3598 * In system wide and when the context is loaded, access can only happen
3599 * when the caller is running on the CPU being monitored by the session.
3600 * It does not have to be the owner (ctx_task) of the context per se.
3601 */
3602 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3603 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3604 return -EBUSY;
3605 }
3606
3607 /* sanity check */
3608 if (unlikely(task == NULL)) {
19c5870c 3609 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
1da177e4
LT
3610 return -EINVAL;
3611 }
3612
3613 if (task == current || is_system) {
3614
3615 fmt = ctx->ctx_buf_fmt;
3616
3617 DPRINT(("restarting self %d ovfl=0x%lx\n",
19c5870c 3618 task_pid_nr(task),
1da177e4
LT
3619 ctx->ctx_ovfl_regs[0]));
3620
3621 if (CTX_HAS_SMPL(ctx)) {
3622
3623 prefetch(ctx->ctx_smpl_hdr);
3624
3625 rst_ctrl.bits.mask_monitoring = 0;
3626 rst_ctrl.bits.reset_ovfl_pmds = 0;
3627
3628 if (state == PFM_CTX_LOADED)
3629 ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3630 else
3631 ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3632 } else {
3633 rst_ctrl.bits.mask_monitoring = 0;
3634 rst_ctrl.bits.reset_ovfl_pmds = 1;
3635 }
3636
3637 if (ret == 0) {
3638 if (rst_ctrl.bits.reset_ovfl_pmds)
3639 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
3640
3641 if (rst_ctrl.bits.mask_monitoring == 0) {
19c5870c 3642 DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
1da177e4
LT
3643
3644 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
3645 } else {
19c5870c 3646 DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
1da177e4
LT
3647
3648 // cannot use pfm_stop_monitoring(task, regs);
3649 }
3650 }
3651 /*
3652 * clear overflowed PMD mask to remove any stale information
3653 */
3654 ctx->ctx_ovfl_regs[0] = 0UL;
3655
3656 /*
3657 * back to LOADED state
3658 */
3659 ctx->ctx_state = PFM_CTX_LOADED;
3660
3661 /*
3662 * XXX: not really useful for self monitoring
3663 */
3664 ctx->ctx_fl_can_restart = 0;
3665
3666 return 0;
3667 }
3668
3669 /*
3670 * restart another task
3671 */
3672
3673 /*
3674 * When PFM_CTX_MASKED, we cannot issue a restart before the previous
3675 * one is seen by the task.
3676 */
3677 if (state == PFM_CTX_MASKED) {
3678 if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
3679 /*
3680 * will prevent subsequent restart before this one is
3681 * seen by other task
3682 */
3683 ctx->ctx_fl_can_restart = 0;
3684 }
3685
3686 /*
3687 * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e.
3688 * the task is blocked or on its way to block. That's the normal
3689 * restart path. If the monitoring is not masked, then the task
3690 * can be actively monitoring and we cannot directly intervene.
3691 * Therefore we use the trap mechanism to catch the task and
3692 * force it to reset the buffer/reset PMDs.
3693 *
3694 * if non-blocking, then we ensure that the task will go into
3695 * pfm_handle_work() before returning to user mode.
3696 *
72fdbdce 3697 * We cannot explicitly reset another task, it MUST always
1da177e4
LT
3698 * be done by the task itself. This works for system wide because
3699 * the tool that is controlling the session is logically doing
3700 * "self-monitoring".
3701 */
3702 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
19c5870c 3703 DPRINT(("unblocking [%d] \n", task_pid_nr(task)));
60f1c444 3704 complete(&ctx->ctx_restart_done);
1da177e4 3705 } else {
19c5870c 3706 DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
1da177e4
LT
3707
3708 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
3709
3710 PFM_SET_WORK_PENDING(task, 1);
3711
5aa92ffd 3712 tsk_set_notify_resume(task);
1da177e4
LT
3713
3714 /*
3715 * XXX: send reschedule if task runs on another CPU
3716 */
3717 }
3718 return 0;
3719}
3720
3721static int
3722pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3723{
3724 unsigned int m = *(unsigned int *)arg;
3725
3726 pfm_sysctl.debug = m == 0 ? 0 : 1;
3727
1da177e4
LT
3728 printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
3729
3730 if (m == 0) {
3731 memset(pfm_stats, 0, sizeof(pfm_stats));
3732 for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
3733 }
3734 return 0;
3735}
3736
3737/*
3738 * arg can be NULL and count can be zero for this function
3739 */
3740static int
3741pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3742{
3743 struct thread_struct *thread = NULL;
3744 struct task_struct *task;
3745 pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
3746 unsigned long flags;
3747 dbreg_t dbreg;
3748 unsigned int rnum;
3749 int first_time;
3750 int ret = 0, state;
3751 int i, can_access_pmu = 0;
3752 int is_system, is_loaded;
3753
3754 if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
3755
3756 state = ctx->ctx_state;
3757 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3758 is_system = ctx->ctx_fl_system;
3759 task = ctx->ctx_task;
3760
3761 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3762
3763 /*
3764 * on both UP and SMP, we can only write to the PMC when the task is
3765 * the owner of the local PMU.
3766 */
3767 if (is_loaded) {
3768 thread = &task->thread;
3769 /*
3770 * In system wide and when the context is loaded, access can only happen
3771 * when the caller is running on the CPU being monitored by the session.
3772 * It does not have to be the owner (ctx_task) of the context per se.
3773 */
3774 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3775 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3776 return -EBUSY;
3777 }
3778 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3779 }
3780
3781 /*
3782 * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w
3783 * ensuring that no real breakpoint can be installed via this call.
3784 *
3785 * IMPORTANT: regs can be NULL in this function
3786 */
3787
3788 first_time = ctx->ctx_fl_using_dbreg == 0;
3789
3790 /*
3791 * don't bother if we are loaded and task is being debugged
3792 */
3793 if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
19c5870c 3794 DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
1da177e4
LT
3795 return -EBUSY;
3796 }
3797
3798 /*
3799 * check for debug registers in system wide mode
3800 *
3801 * If though a check is done in pfm_context_load(),
3802 * we must repeat it here, in case the registers are
3803 * written after the context is loaded
3804 */
3805 if (is_loaded) {
3806 LOCK_PFS(flags);
3807
3808 if (first_time && is_system) {
3809 if (pfm_sessions.pfs_ptrace_use_dbregs)
3810 ret = -EBUSY;
3811 else
3812 pfm_sessions.pfs_sys_use_dbregs++;
3813 }
3814 UNLOCK_PFS(flags);
3815 }
3816
3817 if (ret != 0) return ret;
3818
3819 /*
3820 * mark ourself as user of the debug registers for
3821 * perfmon purposes.
3822 */
3823 ctx->ctx_fl_using_dbreg = 1;
3824
3825 /*
3826 * clear hardware registers to make sure we don't
3827 * pick up stale state.
3828 *
3829 * for a system wide session, we do not use
3830 * thread.dbr, thread.ibr because this process
3831 * never leaves the current CPU and the state
3832 * is shared by all processes running on it
3833 */
3834 if (first_time && can_access_pmu) {
19c5870c 3835 DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
1da177e4
LT
3836 for (i=0; i < pmu_conf->num_ibrs; i++) {
3837 ia64_set_ibr(i, 0UL);
3838 ia64_dv_serialize_instruction();
3839 }
3840 ia64_srlz_i();
3841 for (i=0; i < pmu_conf->num_dbrs; i++) {
3842 ia64_set_dbr(i, 0UL);
3843 ia64_dv_serialize_data();
3844 }
3845 ia64_srlz_d();
3846 }
3847
3848 /*
3849 * Now install the values into the registers
3850 */
3851 for (i = 0; i < count; i++, req++) {
3852
3853 rnum = req->dbreg_num;
3854 dbreg.val = req->dbreg_value;
3855
3856 ret = -EINVAL;
3857
3858 if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
3859 DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
3860 rnum, dbreg.val, mode, i, count));
3861
3862 goto abort_mission;
3863 }
3864
3865 /*
3866 * make sure we do not install enabled breakpoint
3867 */
3868 if (rnum & 0x1) {
3869 if (mode == PFM_CODE_RR)
3870 dbreg.ibr.ibr_x = 0;
3871 else
3872 dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
3873 }
3874
3875 PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
3876
3877 /*
3878 * Debug registers, just like PMC, can only be modified
3879 * by a kernel call. Moreover, perfmon() access to those
3880 * registers are centralized in this routine. The hardware
3881 * does not modify the value of these registers, therefore,
3882 * if we save them as they are written, we can avoid having
3883 * to save them on context switch out. This is made possible
3884 * by the fact that when perfmon uses debug registers, ptrace()
3885 * won't be able to modify them concurrently.
3886 */
3887 if (mode == PFM_CODE_RR) {
3888 CTX_USED_IBR(ctx, rnum);
3889
3890 if (can_access_pmu) {
3891 ia64_set_ibr(rnum, dbreg.val);
3892 ia64_dv_serialize_instruction();
3893 }
3894
3895 ctx->ctx_ibrs[rnum] = dbreg.val;
3896
3897 DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
3898 rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
3899 } else {
3900 CTX_USED_DBR(ctx, rnum);
3901
3902 if (can_access_pmu) {
3903 ia64_set_dbr(rnum, dbreg.val);
3904 ia64_dv_serialize_data();
3905 }
3906 ctx->ctx_dbrs[rnum] = dbreg.val;
3907
3908 DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
3909 rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
3910 }
3911 }
3912
3913 return 0;
3914
3915abort_mission:
3916 /*
3917 * in case it was our first attempt, we undo the global modifications
3918 */
3919 if (first_time) {
3920 LOCK_PFS(flags);
3921 if (ctx->ctx_fl_system) {
3922 pfm_sessions.pfs_sys_use_dbregs--;
3923 }
3924 UNLOCK_PFS(flags);
3925 ctx->ctx_fl_using_dbreg = 0;
3926 }
3927 /*
3928 * install error return flag
3929 */
3930 PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
3931
3932 return ret;
3933}
3934
3935static int
3936pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3937{
3938 return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
3939}
3940
3941static int
3942pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3943{
3944 return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
3945}
3946
3947int
3948pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3949{
3950 pfm_context_t *ctx;
3951
3952 if (req == NULL) return -EINVAL;
3953
3954 ctx = GET_PMU_CTX();
3955
3956 if (ctx == NULL) return -EINVAL;
3957
3958 /*
3959 * for now limit to current task, which is enough when calling
3960 * from overflow handler
3961 */
3962 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3963
3964 return pfm_write_ibrs(ctx, req, nreq, regs);
3965}
3966EXPORT_SYMBOL(pfm_mod_write_ibrs);
3967
3968int
3969pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3970{
3971 pfm_context_t *ctx;
3972
3973 if (req == NULL) return -EINVAL;
3974
3975 ctx = GET_PMU_CTX();
3976
3977 if (ctx == NULL) return -EINVAL;
3978
3979 /*
3980 * for now limit to current task, which is enough when calling
3981 * from overflow handler
3982 */
3983 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3984
3985 return pfm_write_dbrs(ctx, req, nreq, regs);
3986}
3987EXPORT_SYMBOL(pfm_mod_write_dbrs);
3988
3989
3990static int
3991pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3992{
3993 pfarg_features_t *req = (pfarg_features_t *)arg;
3994
3995 req->ft_version = PFM_VERSION;
3996 return 0;
3997}
3998
3999static int
4000pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4001{
4002 struct pt_regs *tregs;
4003 struct task_struct *task = PFM_CTX_TASK(ctx);
4004 int state, is_system;
4005
4006 state = ctx->ctx_state;
4007 is_system = ctx->ctx_fl_system;
4008
4009 /*
4010 * context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE)
4011 */
4012 if (state == PFM_CTX_UNLOADED) return -EINVAL;
4013
4014 /*
4015 * In system wide and when the context is loaded, access can only happen
4016 * when the caller is running on the CPU being monitored by the session.
4017 * It does not have to be the owner (ctx_task) of the context per se.
4018 */
4019 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
4020 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
4021 return -EBUSY;
4022 }
4023 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
19c5870c 4024 task_pid_nr(PFM_CTX_TASK(ctx)),
1da177e4
LT
4025 state,
4026 is_system));
4027 /*
4028 * in system mode, we need to update the PMU directly
4029 * and the user level state of the caller, which may not
4030 * necessarily be the creator of the context.
4031 */
4032 if (is_system) {
4033 /*
4034 * Update local PMU first
4035 *
4036 * disable dcr pp
4037 */
4038 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
4039 ia64_srlz_i();
4040
4041 /*
4042 * update local cpuinfo
4043 */
4044 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4045
4046 /*
4047 * stop monitoring, does srlz.i
4048 */
4049 pfm_clear_psr_pp();
4050
4051 /*
4052 * stop monitoring in the caller
4053 */
4054 ia64_psr(regs)->pp = 0;
4055
4056 return 0;
4057 }
4058 /*
4059 * per-task mode
4060 */
4061
4062 if (task == current) {
4063 /* stop monitoring at kernel level */
4064 pfm_clear_psr_up();
4065
4066 /*
4067 * stop monitoring at the user level
4068 */
4069 ia64_psr(regs)->up = 0;
4070 } else {
6450578f 4071 tregs = task_pt_regs(task);
1da177e4
LT
4072
4073 /*
4074 * stop monitoring at the user level
4075 */
4076 ia64_psr(tregs)->up = 0;
4077
4078 /*
4079 * monitoring disabled in kernel at next reschedule
4080 */
4081 ctx->ctx_saved_psr_up = 0;
19c5870c 4082 DPRINT(("task=[%d]\n", task_pid_nr(task)));
1da177e4
LT
4083 }
4084 return 0;
4085}
4086
4087
4088static int
4089pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4090{
4091 struct pt_regs *tregs;
4092 int state, is_system;
4093
4094 state = ctx->ctx_state;
4095 is_system = ctx->ctx_fl_system;
4096
4097 if (state != PFM_CTX_LOADED) return -EINVAL;
4098
4099 /*
4100 * In system wide and when the context is loaded, access can only happen
4101 * when the caller is running on the CPU being monitored by the session.
4102 * It does not have to be the owner (ctx_task) of the context per se.
4103 */
4104 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
4105 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
4106 return -EBUSY;
4107 }
4108
4109 /*
4110 * in system mode, we need to update the PMU directly
4111 * and the user level state of the caller, which may not
4112 * necessarily be the creator of the context.
4113 */
4114 if (is_system) {
4115
4116 /*
4117 * set user level psr.pp for the caller
4118 */
4119 ia64_psr(regs)->pp = 1;
4120
4121 /*
4122 * now update the local PMU and cpuinfo
4123 */
4124 PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
4125
4126 /*
4127 * start monitoring at kernel level
4128 */
4129 pfm_set_psr_pp();
4130
4131 /* enable dcr pp */
4132 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
4133 ia64_srlz_i();
4134
4135 return 0;
4136 }
4137
4138 /*
4139 * per-process mode
4140 */
4141
4142 if (ctx->ctx_task == current) {
4143
4144 /* start monitoring at kernel level */
4145 pfm_set_psr_up();
4146
4147 /*
4148 * activate monitoring at user level
4149 */
4150 ia64_psr(regs)->up = 1;
4151
4152 } else {
6450578f 4153 tregs = task_pt_regs(ctx->ctx_task);
1da177e4
LT
4154
4155 /*
4156 * start monitoring at the kernel level the next
4157 * time the task is scheduled
4158 */
4159 ctx->ctx_saved_psr_up = IA64_PSR_UP;
4160
4161 /*
4162 * activate monitoring at user level
4163 */
4164 ia64_psr(tregs)->up = 1;
4165 }
4166 return 0;
4167}
4168
4169static int
4170pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4171{
4172 pfarg_reg_t *req = (pfarg_reg_t *)arg;
4173 unsigned int cnum;
4174 int i;
4175 int ret = -EINVAL;
4176
4177 for (i = 0; i < count; i++, req++) {
4178
4179 cnum = req->reg_num;
4180
4181 if (!PMC_IS_IMPL(cnum)) goto abort_mission;
4182
4183 req->reg_value = PMC_DFL_VAL(cnum);
4184
4185 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
4186
4187 DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
4188 }
4189 return 0;
4190
4191abort_mission:
4192 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
4193 return ret;
4194}
4195
4196static int
4197pfm_check_task_exist(pfm_context_t *ctx)
4198{
4199 struct task_struct *g, *t;
4200 int ret = -ESRCH;
4201
4202 read_lock(&tasklist_lock);
4203
4204 do_each_thread (g, t) {
4205 if (t->thread.pfm_context == ctx) {
4206 ret = 0;
6794c752 4207 goto out;
1da177e4
LT
4208 }
4209 } while_each_thread (g, t);
6794c752 4210out:
1da177e4
LT
4211 read_unlock(&tasklist_lock);
4212
4213 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
4214
4215 return ret;
4216}
4217
4218static int
4219pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4220{
4221 struct task_struct *task;
4222 struct thread_struct *thread;
4223 struct pfm_context_t *old;
4224 unsigned long flags;
4225#ifndef CONFIG_SMP
4226 struct task_struct *owner_task = NULL;
4227#endif
4228 pfarg_load_t *req = (pfarg_load_t *)arg;
4229 unsigned long *pmcs_source, *pmds_source;
4230 int the_cpu;
4231 int ret = 0;
4232 int state, is_system, set_dbregs = 0;
4233
4234 state = ctx->ctx_state;
4235 is_system = ctx->ctx_fl_system;
4236 /*
4237 * can only load from unloaded or terminated state
4238 */
4239 if (state != PFM_CTX_UNLOADED) {
4240 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4241 req->load_pid,
4242 ctx->ctx_state));
a5a70b75 4243 return -EBUSY;
1da177e4
LT
4244 }
4245
4246 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
4247
4248 if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
4249 DPRINT(("cannot use blocking mode on self\n"));
4250 return -EINVAL;
4251 }
4252
4253 ret = pfm_get_task(ctx, req->load_pid, &task);
4254 if (ret) {
4255 DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
4256 return ret;
4257 }
4258
4259 ret = -EINVAL;
4260
4261 /*
4262 * system wide is self monitoring only
4263 */
4264 if (is_system && task != current) {
4265 DPRINT(("system wide is self monitoring only load_pid=%d\n",
4266 req->load_pid));
4267 goto error;
4268 }
4269
4270 thread = &task->thread;
4271
4272 ret = 0;
4273 /*
4274 * cannot load a context which is using range restrictions,
4275 * into a task that is being debugged.
4276 */
4277 if (ctx->ctx_fl_using_dbreg) {
4278 if (thread->flags & IA64_THREAD_DBG_VALID) {
4279 ret = -EBUSY;
4280 DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
4281 goto error;
4282 }
4283 LOCK_PFS(flags);
4284
4285 if (is_system) {
4286 if (pfm_sessions.pfs_ptrace_use_dbregs) {
19c5870c
AD
4287 DPRINT(("cannot load [%d] dbregs in use\n",
4288 task_pid_nr(task)));
1da177e4
LT
4289 ret = -EBUSY;
4290 } else {
4291 pfm_sessions.pfs_sys_use_dbregs++;
19c5870c 4292 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
1da177e4
LT
4293 set_dbregs = 1;
4294 }
4295 }
4296
4297 UNLOCK_PFS(flags);
4298
4299 if (ret) goto error;
4300 }
4301
4302 /*
4303 * SMP system-wide monitoring implies self-monitoring.
4304 *
4305 * The programming model expects the task to
4306 * be pinned on a CPU throughout the session.
4307 * Here we take note of the current CPU at the
4308 * time the context is loaded. No call from
4309 * another CPU will be allowed.
4310 *
4311 * The pinning via shed_setaffinity()
4312 * must be done by the calling task prior
4313 * to this call.
4314 *
4315 * systemwide: keep track of CPU this session is supposed to run on
4316 */
4317 the_cpu = ctx->ctx_cpu = smp_processor_id();
4318
4319 ret = -EBUSY;
4320 /*
4321 * now reserve the session
4322 */
4323 ret = pfm_reserve_session(current, is_system, the_cpu);
4324 if (ret) goto error;
4325
4326 /*
4327 * task is necessarily stopped at this point.
4328 *
4329 * If the previous context was zombie, then it got removed in
4330 * pfm_save_regs(). Therefore we should not see it here.
4331 * If we see a context, then this is an active context
4332 *
4333 * XXX: needs to be atomic
4334 */
4335 DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
4336 thread->pfm_context, ctx));
4337
6bf11e8c 4338 ret = -EBUSY;
1da177e4
LT
4339 old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
4340 if (old != NULL) {
4341 DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
4342 goto error_unres;
4343 }
4344
4345 pfm_reset_msgq(ctx);
4346
4347 ctx->ctx_state = PFM_CTX_LOADED;
4348
4349 /*
4350 * link context to task
4351 */
4352 ctx->ctx_task = task;
4353
4354 if (is_system) {
4355 /*
4356 * we load as stopped
4357 */
4358 PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
4359 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4360
4361 if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
4362 } else {
4363 thread->flags |= IA64_THREAD_PM_VALID;
4364 }
4365
4366 /*
4367 * propagate into thread-state
4368 */
4369 pfm_copy_pmds(task, ctx);
4370 pfm_copy_pmcs(task, ctx);
4371
35589a8f
KA
4372 pmcs_source = ctx->th_pmcs;
4373 pmds_source = ctx->th_pmds;
1da177e4
LT
4374
4375 /*
4376 * always the case for system-wide
4377 */
4378 if (task == current) {
4379
4380 if (is_system == 0) {
4381
4382 /* allow user level control */
4383 ia64_psr(regs)->sp = 0;
19c5870c 4384 DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
1da177e4
LT
4385
4386 SET_LAST_CPU(ctx, smp_processor_id());
4387 INC_ACTIVATION();
4388 SET_ACTIVATION(ctx);
4389#ifndef CONFIG_SMP
4390 /*
4391 * push the other task out, if any
4392 */
4393 owner_task = GET_PMU_OWNER();
4394 if (owner_task) pfm_lazy_save_regs(owner_task);
4395#endif
4396 }
4397 /*
4398 * load all PMD from ctx to PMU (as opposed to thread state)
4399 * restore all PMC from ctx to PMU
4400 */
4401 pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
4402 pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
4403
4404 ctx->ctx_reload_pmcs[0] = 0UL;
4405 ctx->ctx_reload_pmds[0] = 0UL;
4406
4407 /*
4408 * guaranteed safe by earlier check against DBG_VALID
4409 */
4410 if (ctx->ctx_fl_using_dbreg) {
4411 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
4412 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
4413 }
4414 /*
4415 * set new ownership
4416 */
4417 SET_PMU_OWNER(task, ctx);
4418
19c5870c 4419 DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
1da177e4
LT
4420 } else {
4421 /*
4422 * when not current, task MUST be stopped, so this is safe
4423 */
6450578f 4424 regs = task_pt_regs(task);
1da177e4
LT
4425
4426 /* force a full reload */
4427 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4428 SET_LAST_CPU(ctx, -1);
4429
4430 /* initial saved psr (stopped) */
4431 ctx->ctx_saved_psr_up = 0UL;
4432 ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
4433 }
4434
4435 ret = 0;
4436
4437error_unres:
4438 if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
4439error:
4440 /*
4441 * we must undo the dbregs setting (for system-wide)
4442 */
4443 if (ret && set_dbregs) {
4444 LOCK_PFS(flags);
4445 pfm_sessions.pfs_sys_use_dbregs--;
4446 UNLOCK_PFS(flags);
4447 }
4448 /*
4449 * release task, there is now a link with the context
4450 */
4451 if (is_system == 0 && task != current) {
4452 pfm_put_task(task);
4453
4454 if (ret == 0) {
4455 ret = pfm_check_task_exist(ctx);
4456 if (ret) {
4457 ctx->ctx_state = PFM_CTX_UNLOADED;
4458 ctx->ctx_task = NULL;
4459 }
4460 }
4461 }
4462 return ret;
4463}
4464
4465/*
4466 * in this function, we do not need to increase the use count
4467 * for the task via get_task_struct(), because we hold the
4468 * context lock. If the task were to disappear while having
4469 * a context attached, it would go through pfm_exit_thread()
4470 * which also grabs the context lock and would therefore be blocked
4471 * until we are here.
4472 */
4473static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
4474
4475static int
4476pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4477{
4478 struct task_struct *task = PFM_CTX_TASK(ctx);
4479 struct pt_regs *tregs;
4480 int prev_state, is_system;
4481 int ret;
4482
19c5870c 4483 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
1da177e4
LT
4484
4485 prev_state = ctx->ctx_state;
4486 is_system = ctx->ctx_fl_system;
4487
4488 /*
4489 * unload only when necessary
4490 */
4491 if (prev_state == PFM_CTX_UNLOADED) {
4492 DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
4493 return 0;
4494 }
4495
4496 /*
4497 * clear psr and dcr bits
4498 */
4499 ret = pfm_stop(ctx, NULL, 0, regs);
4500 if (ret) return ret;
4501
4502 ctx->ctx_state = PFM_CTX_UNLOADED;
4503
4504 /*
4505 * in system mode, we need to update the PMU directly
4506 * and the user level state of the caller, which may not
4507 * necessarily be the creator of the context.
4508 */
4509 if (is_system) {
4510
4511 /*
4512 * Update cpuinfo
4513 *
4514 * local PMU is taken care of in pfm_stop()
4515 */
4516 PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
4517 PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
4518
4519 /*
4520 * save PMDs in context
4521 * release ownership
4522 */
4523 pfm_flush_pmds(current, ctx);
4524
4525 /*
4526 * at this point we are done with the PMU
4527 * so we can unreserve the resource.
4528 */
4529 if (prev_state != PFM_CTX_ZOMBIE)
4530 pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
4531
4532 /*
4533 * disconnect context from task
4534 */
4535 task->thread.pfm_context = NULL;
4536 /*
4537 * disconnect task from context
4538 */
4539 ctx->ctx_task = NULL;
4540
4541 /*
4542 * There is nothing more to cleanup here.
4543 */
4544 return 0;
4545 }
4546
4547 /*
4548 * per-task mode
4549 */
6450578f 4550 tregs = task == current ? regs : task_pt_regs(task);
1da177e4
LT
4551
4552 if (task == current) {
4553 /*
4554 * cancel user level control
4555 */
4556 ia64_psr(regs)->sp = 1;
4557
19c5870c 4558 DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
1da177e4
LT
4559 }
4560 /*
4561 * save PMDs to context
4562 * release ownership
4563 */
4564 pfm_flush_pmds(task, ctx);
4565
4566 /*
4567 * at this point we are done with the PMU
4568 * so we can unreserve the resource.
4569 *
4570 * when state was ZOMBIE, we have already unreserved.
4571 */
4572 if (prev_state != PFM_CTX_ZOMBIE)
4573 pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
4574
4575 /*
4576 * reset activation counter and psr
4577 */
4578 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4579 SET_LAST_CPU(ctx, -1);
4580
4581 /*
4582 * PMU state will not be restored
4583 */
4584 task->thread.flags &= ~IA64_THREAD_PM_VALID;
4585
4586 /*
4587 * break links between context and task
4588 */
4589 task->thread.pfm_context = NULL;
4590 ctx->ctx_task = NULL;
4591
4592 PFM_SET_WORK_PENDING(task, 0);
4593
4594 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4595 ctx->ctx_fl_can_restart = 0;
4596 ctx->ctx_fl_going_zombie = 0;
4597
19c5870c 4598 DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
1da177e4
LT
4599
4600 return 0;
4601}
4602
4603
4604/*
4605 * called only from exit_thread(): task == current
4606 * we come here only if current has a context attached (loaded or masked)
4607 */
4608void
4609pfm_exit_thread(struct task_struct *task)
4610{
4611 pfm_context_t *ctx;
4612 unsigned long flags;
6450578f 4613 struct pt_regs *regs = task_pt_regs(task);
1da177e4
LT
4614 int ret, state;
4615 int free_ok = 0;
4616
4617 ctx = PFM_GET_CTX(task);
4618
4619 PROTECT_CTX(ctx, flags);
4620
19c5870c 4621 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
1da177e4
LT
4622
4623 state = ctx->ctx_state;
4624 switch(state) {
4625 case PFM_CTX_UNLOADED:
4626 /*
72fdbdce 4627 * only comes to this function if pfm_context is not NULL, i.e., cannot
1da177e4
LT
4628 * be in unloaded state
4629 */
19c5870c 4630 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
1da177e4
LT
4631 break;
4632 case PFM_CTX_LOADED:
4633 case PFM_CTX_MASKED:
4634 ret = pfm_context_unload(ctx, NULL, 0, regs);
4635 if (ret) {
19c5870c 4636 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
1da177e4
LT
4637 }
4638 DPRINT(("ctx unloaded for current state was %d\n", state));
4639
4640 pfm_end_notify_user(ctx);
4641 break;
4642 case PFM_CTX_ZOMBIE:
4643 ret = pfm_context_unload(ctx, NULL, 0, regs);
4644 if (ret) {
19c5870c 4645 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
1da177e4
LT
4646 }
4647 free_ok = 1;
4648 break;
4649 default:
19c5870c 4650 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
1da177e4
LT
4651 break;
4652 }
4653 UNPROTECT_CTX(ctx, flags);
4654
4655 { u64 psr = pfm_get_psr();
4656 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
4657 BUG_ON(GET_PMU_OWNER());
4658 BUG_ON(ia64_psr(regs)->up);
4659 BUG_ON(ia64_psr(regs)->pp);
4660 }
4661
4662 /*
4663 * All memory free operations (especially for vmalloc'ed memory)
4664 * MUST be done with interrupts ENABLED.
4665 */
4666 if (free_ok) pfm_context_free(ctx);
4667}
4668
4669/*
4670 * functions MUST be listed in the increasing order of their index (see permfon.h)
4671 */
4672#define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
4673#define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
4674#define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
4675#define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
4676#define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
4677
4678static pfm_cmd_desc_t pfm_cmd_tab[]={
4679/* 0 */PFM_CMD_NONE,
4680/* 1 */PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4681/* 2 */PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4682/* 3 */PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4683/* 4 */PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
4684/* 5 */PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
4685/* 6 */PFM_CMD_NONE,
4686/* 7 */PFM_CMD_NONE,
4687/* 8 */PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
4688/* 9 */PFM_CMD_NONE,
4689/* 10 */PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
4690/* 11 */PFM_CMD_NONE,
4691/* 12 */PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
4692/* 13 */PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
4693/* 14 */PFM_CMD_NONE,
4694/* 15 */PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4695/* 16 */PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
4696/* 17 */PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
4697/* 18 */PFM_CMD_NONE,
4698/* 19 */PFM_CMD_NONE,
4699/* 20 */PFM_CMD_NONE,
4700/* 21 */PFM_CMD_NONE,
4701/* 22 */PFM_CMD_NONE,
4702/* 23 */PFM_CMD_NONE,
4703/* 24 */PFM_CMD_NONE,
4704/* 25 */PFM_CMD_NONE,
4705/* 26 */PFM_CMD_NONE,
4706/* 27 */PFM_CMD_NONE,
4707/* 28 */PFM_CMD_NONE,
4708/* 29 */PFM_CMD_NONE,
4709/* 30 */PFM_CMD_NONE,
4710/* 31 */PFM_CMD_NONE,
4711/* 32 */PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
4712/* 33 */PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
4713};
4714#define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
4715
4716static int
4717pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
4718{
4719 struct task_struct *task;
4720 int state, old_state;
4721
4722recheck:
4723 state = ctx->ctx_state;
4724 task = ctx->ctx_task;
4725
4726 if (task == NULL) {
4727 DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
4728 return 0;
4729 }
4730
4731 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4732 ctx->ctx_fd,
4733 state,
19c5870c 4734 task_pid_nr(task),
1da177e4
LT
4735 task->state, PFM_CMD_STOPPED(cmd)));
4736
4737 /*
4738 * self-monitoring always ok.
4739 *
4740 * for system-wide the caller can either be the creator of the
4741 * context (to one to which the context is attached to) OR
4742 * a task running on the same CPU as the session.
4743 */
4744 if (task == current || ctx->ctx_fl_system) return 0;
4745
4746 /*
a5a70b75 4747 * we are monitoring another thread
1da177e4 4748 */
a5a70b75 4749 switch(state) {
4750 case PFM_CTX_UNLOADED:
4751 /*
4752 * if context is UNLOADED we are safe to go
4753 */
4754 return 0;
4755 case PFM_CTX_ZOMBIE:
4756 /*
4757 * no command can operate on a zombie context
4758 */
4759 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
4760 return -EINVAL;
4761 case PFM_CTX_MASKED:
4762 /*
4763 * PMU state has been saved to software even though
4764 * the thread may still be running.
4765 */
4766 if (cmd != PFM_UNLOAD_CONTEXT) return 0;
1da177e4
LT
4767 }
4768
4769 /*
4770 * context is LOADED or MASKED. Some commands may need to have
4771 * the task stopped.
4772 *
4773 * We could lift this restriction for UP but it would mean that
4774 * the user has no guarantee the task would not run between
4775 * two successive calls to perfmonctl(). That's probably OK.
4776 * If this user wants to ensure the task does not run, then
4777 * the task must be stopped.
4778 */
4779 if (PFM_CMD_STOPPED(cmd)) {
21498223 4780 if (!task_is_stopped_or_traced(task)) {
19c5870c 4781 DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
1da177e4
LT
4782 return -EBUSY;
4783 }
4784 /*
4785 * task is now stopped, wait for ctxsw out
4786 *
4787 * This is an interesting point in the code.
4788 * We need to unprotect the context because
4789 * the pfm_save_regs() routines needs to grab
4790 * the same lock. There are danger in doing
4791 * this because it leaves a window open for
4792 * another task to get access to the context
4793 * and possibly change its state. The one thing
4794 * that is not possible is for the context to disappear
4795 * because we are protected by the VFS layer, i.e.,
4796 * get_fd()/put_fd().
4797 */
4798 old_state = state;
4799
4800 UNPROTECT_CTX(ctx, flags);
4801
4802 wait_task_inactive(task);
4803
4804 PROTECT_CTX(ctx, flags);
4805
4806 /*
4807 * we must recheck to verify if state has changed
4808 */
4809 if (ctx->ctx_state != old_state) {
4810 DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
4811 goto recheck;
4812 }
4813 }
4814 return 0;
4815}
4816
4817/*
4818 * system-call entry point (must return long)
4819 */
4820asmlinkage long
4821sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
4822{
4823 struct file *file = NULL;
4824 pfm_context_t *ctx = NULL;
4825 unsigned long flags = 0UL;
4826 void *args_k = NULL;
4827 long ret; /* will expand int return types */
4828 size_t base_sz, sz, xtra_sz = 0;
4829 int narg, completed_args = 0, call_made = 0, cmd_flags;
4830 int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
4831 int (*getsize)(void *arg, size_t *sz);
4832#define PFM_MAX_ARGSIZE 4096
4833
4834 /*
4835 * reject any call if perfmon was disabled at initialization
4836 */
4837 if (unlikely(pmu_conf == NULL)) return -ENOSYS;
4838
4839 if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
4840 DPRINT(("invalid cmd=%d\n", cmd));
4841 return -EINVAL;
4842 }
4843
4844 func = pfm_cmd_tab[cmd].cmd_func;
4845 narg = pfm_cmd_tab[cmd].cmd_narg;
4846 base_sz = pfm_cmd_tab[cmd].cmd_argsize;
4847 getsize = pfm_cmd_tab[cmd].cmd_getsize;
4848 cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
4849
4850 if (unlikely(func == NULL)) {
4851 DPRINT(("invalid cmd=%d\n", cmd));
4852 return -EINVAL;
4853 }
4854
4855 DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
4856 PFM_CMD_NAME(cmd),
4857 cmd,
4858 narg,
4859 base_sz,
4860 count));
4861
4862 /*
4863 * check if number of arguments matches what the command expects
4864 */
4865 if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
4866 return -EINVAL;
4867
4868restart_args:
4869 sz = xtra_sz + base_sz*count;
4870 /*
4871 * limit abuse to min page size
4872 */
4873 if (unlikely(sz > PFM_MAX_ARGSIZE)) {
19c5870c 4874 printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
1da177e4
LT
4875 return -E2BIG;
4876 }
4877
4878 /*
4879 * allocate default-sized argument buffer
4880 */
4881 if (likely(count && args_k == NULL)) {
4882 args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
4883 if (args_k == NULL) return -ENOMEM;
4884 }
4885
4886 ret = -EFAULT;
4887
4888 /*
4889 * copy arguments
4890 *
4891 * assume sz = 0 for command without parameters
4892 */
4893 if (sz && copy_from_user(args_k, arg, sz)) {
4894 DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
4895 goto error_args;
4896 }
4897
4898 /*
4899 * check if command supports extra parameters
4900 */
4901 if (completed_args == 0 && getsize) {
4902 /*
4903 * get extra parameters size (based on main argument)
4904 */
4905 ret = (*getsize)(args_k, &xtra_sz);
4906 if (ret) goto error_args;
4907
4908 completed_args = 1;
4909
4910 DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
4911
4912 /* retry if necessary */
4913 if (likely(xtra_sz)) goto restart_args;
4914 }
4915
4916 if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
4917
4918 ret = -EBADF;
4919
4920 file = fget(fd);
4921 if (unlikely(file == NULL)) {
4922 DPRINT(("invalid fd %d\n", fd));
4923 goto error_args;
4924 }
4925 if (unlikely(PFM_IS_FILE(file) == 0)) {
4926 DPRINT(("fd %d not related to perfmon\n", fd));
4927 goto error_args;
4928 }
4929
4930 ctx = (pfm_context_t *)file->private_data;
4931 if (unlikely(ctx == NULL)) {
4932 DPRINT(("no context for fd %d\n", fd));
4933 goto error_args;
4934 }
4935 prefetch(&ctx->ctx_state);
4936
4937 PROTECT_CTX(ctx, flags);
4938
4939 /*
4940 * check task is stopped
4941 */
4942 ret = pfm_check_task_state(ctx, cmd, flags);
4943 if (unlikely(ret)) goto abort_locked;
4944
4945skip_fd:
6450578f 4946 ret = (*func)(ctx, args_k, count, task_pt_regs(current));
1da177e4
LT
4947
4948 call_made = 1;
4949
4950abort_locked:
4951 if (likely(ctx)) {
4952 DPRINT(("context unlocked\n"));
4953 UNPROTECT_CTX(ctx, flags);
1da177e4
LT
4954 }
4955
4956 /* copy argument back to user, if needed */
4957 if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
4958
4959error_args:
b8444d00
SE
4960 if (file)
4961 fput(file);
4962
b2325fe1 4963 kfree(args_k);
1da177e4
LT
4964
4965 DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
4966
4967 return ret;
4968}
4969
4970static void
4971pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
4972{
4973 pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
4974 pfm_ovfl_ctrl_t rst_ctrl;
4975 int state;
4976 int ret = 0;
4977
4978 state = ctx->ctx_state;
4979 /*
4980 * Unlock sampling buffer and reset index atomically
4981 * XXX: not really needed when blocking
4982 */
4983 if (CTX_HAS_SMPL(ctx)) {
4984
4985 rst_ctrl.bits.mask_monitoring = 0;
4986 rst_ctrl.bits.reset_ovfl_pmds = 0;
4987
4988 if (state == PFM_CTX_LOADED)
4989 ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4990 else
4991 ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4992 } else {
4993 rst_ctrl.bits.mask_monitoring = 0;
4994 rst_ctrl.bits.reset_ovfl_pmds = 1;
4995 }
4996
4997 if (ret == 0) {
4998 if (rst_ctrl.bits.reset_ovfl_pmds) {
4999 pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
5000 }
5001 if (rst_ctrl.bits.mask_monitoring == 0) {
5002 DPRINT(("resuming monitoring\n"));
5003 if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
5004 } else {
5005 DPRINT(("stopping monitoring\n"));
5006 //pfm_stop_monitoring(current, regs);
5007 }
5008 ctx->ctx_state = PFM_CTX_LOADED;
5009 }
5010}
5011
5012/*
5013 * context MUST BE LOCKED when calling
5014 * can only be called for current
5015 */
5016static void
5017pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
5018{
5019 int ret;
5020
19c5870c 5021 DPRINT(("entering for [%d]\n", task_pid_nr(current)));
1da177e4
LT
5022
5023 ret = pfm_context_unload(ctx, NULL, 0, regs);
5024 if (ret) {
19c5870c 5025 printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
1da177e4
LT
5026 }
5027
5028 /*
5029 * and wakeup controlling task, indicating we are now disconnected
5030 */
5031 wake_up_interruptible(&ctx->ctx_zombieq);
5032
5033 /*
5034 * given that context is still locked, the controlling
5035 * task will only get access when we return from
5036 * pfm_handle_work().
5037 */
5038}
5039
5040static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
4944930a
SE
5041 /*
5042 * pfm_handle_work() can be called with interrupts enabled
5043 * (TIF_NEED_RESCHED) or disabled. The down_interruptible
5044 * call may sleep, therefore we must re-enable interrupts
5045 * to avoid deadlocks. It is safe to do so because this function
5046 * is called ONLY when returning to user level (PUStk=1), in which case
5047 * there is no risk of kernel stack overflow due to deep
5048 * interrupt nesting.
5049 */
1da177e4
LT
5050void
5051pfm_handle_work(void)
5052{
5053 pfm_context_t *ctx;
5054 struct pt_regs *regs;
4944930a 5055 unsigned long flags, dummy_flags;
1da177e4
LT
5056 unsigned long ovfl_regs;
5057 unsigned int reason;
5058 int ret;
5059
5060 ctx = PFM_GET_CTX(current);
5061 if (ctx == NULL) {
19c5870c 5062 printk(KERN_ERR "perfmon: [%d] has no PFM context\n", task_pid_nr(current));
1da177e4
LT
5063 return;
5064 }
5065
5066 PROTECT_CTX(ctx, flags);
5067
5068 PFM_SET_WORK_PENDING(current, 0);
5069
5aa92ffd 5070 tsk_clear_notify_resume(current);
1da177e4 5071
6450578f 5072 regs = task_pt_regs(current);
1da177e4
LT
5073
5074 /*
5075 * extract reason for being here and clear
5076 */
5077 reason = ctx->ctx_fl_trap_reason;
5078 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
5079 ovfl_regs = ctx->ctx_ovfl_regs[0];
5080
5081 DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
5082
5083 /*
5084 * must be done before we check for simple-reset mode
5085 */
5086 if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) goto do_zombie;
5087
5088
5089 //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
5090 if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
5091
4944930a
SE
5092 /*
5093 * restore interrupt mask to what it was on entry.
5094 * Could be enabled/diasbled.
5095 */
1da177e4
LT
5096 UNPROTECT_CTX(ctx, flags);
5097
4944930a
SE
5098 /*
5099 * force interrupt enable because of down_interruptible()
5100 */
1da177e4
LT
5101 local_irq_enable();
5102
5103 DPRINT(("before block sleeping\n"));
5104
5105 /*
5106 * may go through without blocking on SMP systems
5107 * if restart has been received already by the time we call down()
5108 */
60f1c444 5109 ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
1da177e4
LT
5110
5111 DPRINT(("after block sleeping ret=%d\n", ret));
5112
5113 /*
4944930a
SE
5114 * lock context and mask interrupts again
5115 * We save flags into a dummy because we may have
5116 * altered interrupts mask compared to entry in this
5117 * function.
1da177e4 5118 */
4944930a 5119 PROTECT_CTX(ctx, dummy_flags);
1da177e4
LT
5120
5121 /*
5122 * we need to read the ovfl_regs only after wake-up
5123 * because we may have had pfm_write_pmds() in between
5124 * and that can changed PMD values and therefore
5125 * ovfl_regs is reset for these new PMD values.
5126 */
5127 ovfl_regs = ctx->ctx_ovfl_regs[0];
5128
5129 if (ctx->ctx_fl_going_zombie) {
5130do_zombie:
5131 DPRINT(("context is zombie, bailing out\n"));
5132 pfm_context_force_terminate(ctx, regs);
5133 goto nothing_to_do;
5134 }
5135 /*
5136 * in case of interruption of down() we don't restart anything
5137 */
5138 if (ret < 0) goto nothing_to_do;
5139
5140skip_blocking:
5141 pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
5142 ctx->ctx_ovfl_regs[0] = 0UL;
5143
5144nothing_to_do:
4944930a
SE
5145 /*
5146 * restore flags as they were upon entry
5147 */
1da177e4
LT
5148 UNPROTECT_CTX(ctx, flags);
5149}
5150
5151static int
5152pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
5153{
5154 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5155 DPRINT(("ignoring overflow notification, owner is zombie\n"));
5156 return 0;
5157 }
5158
5159 DPRINT(("waking up somebody\n"));
5160
5161 if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
5162
5163 /*
5164 * safe, we are not in intr handler, nor in ctxsw when
5165 * we come here
5166 */
5167 kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
5168
5169 return 0;
5170}
5171
5172static int
5173pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
5174{
5175 pfm_msg_t *msg = NULL;
5176
5177 if (ctx->ctx_fl_no_msg == 0) {
5178 msg = pfm_get_new_msg(ctx);
5179 if (msg == NULL) {
5180 printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
5181 return -1;
5182 }
5183
5184 msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
5185 msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
5186 msg->pfm_ovfl_msg.msg_active_set = 0;
5187 msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
5188 msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
5189 msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
5190 msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
5191 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5192 }
5193
5194 DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
5195 msg,
5196 ctx->ctx_fl_no_msg,
5197 ctx->ctx_fd,
5198 ovfl_pmds));
5199
5200 return pfm_notify_user(ctx, msg);
5201}
5202
5203static int
5204pfm_end_notify_user(pfm_context_t *ctx)
5205{
5206 pfm_msg_t *msg;
5207
5208 msg = pfm_get_new_msg(ctx);
5209 if (msg == NULL) {
5210 printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
5211 return -1;
5212 }
5213 /* no leak */
5214 memset(msg, 0, sizeof(*msg));
5215
5216 msg->pfm_end_msg.msg_type = PFM_MSG_END;
5217 msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
5218 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5219
5220 DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
5221 msg,
5222 ctx->ctx_fl_no_msg,
5223 ctx->ctx_fd));
5224
5225 return pfm_notify_user(ctx, msg);
5226}
5227
5228/*
5229 * main overflow processing routine.
72fdbdce 5230 * it can be called from the interrupt path or explicitly during the context switch code
1da177e4
LT
5231 */
5232static void
5233pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
5234{
5235 pfm_ovfl_arg_t *ovfl_arg;
5236 unsigned long mask;
5237 unsigned long old_val, ovfl_val, new_val;
5238 unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
5239 unsigned long tstamp;
5240 pfm_ovfl_ctrl_t ovfl_ctrl;
5241 unsigned int i, has_smpl;
5242 int must_notify = 0;
5243
5244 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
5245
5246 /*
5247 * sanity test. Should never happen
5248 */
5249 if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
5250
5251 tstamp = ia64_get_itc();
5252 mask = pmc0 >> PMU_FIRST_COUNTER;
5253 ovfl_val = pmu_conf->ovfl_val;
5254 has_smpl = CTX_HAS_SMPL(ctx);
5255
5256 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5257 "used_pmds=0x%lx\n",
5258 pmc0,
19c5870c 5259 task ? task_pid_nr(task): -1,
1da177e4
LT
5260 (regs ? regs->cr_iip : 0),
5261 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
5262 ctx->ctx_used_pmds[0]));
5263
5264
5265 /*
5266 * first we update the virtual counters
5267 * assume there was a prior ia64_srlz_d() issued
5268 */
5269 for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
5270
5271 /* skip pmd which did not overflow */
5272 if ((mask & 0x1) == 0) continue;
5273
5274 /*
5275 * Note that the pmd is not necessarily 0 at this point as qualified events
5276 * may have happened before the PMU was frozen. The residual count is not
5277 * taken into consideration here but will be with any read of the pmd via
5278 * pfm_read_pmds().
5279 */
5280 old_val = new_val = ctx->ctx_pmds[i].val;
5281 new_val += 1 + ovfl_val;
5282 ctx->ctx_pmds[i].val = new_val;
5283
5284 /*
5285 * check for overflow condition
5286 */
5287 if (likely(old_val > new_val)) {
5288 ovfl_pmds |= 1UL << i;
5289 if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
5290 }
5291
5292 DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
5293 i,
5294 new_val,
5295 old_val,
5296 ia64_get_pmd(i) & ovfl_val,
5297 ovfl_pmds,
5298 ovfl_notify));
5299 }
5300
5301 /*
5302 * there was no 64-bit overflow, nothing else to do
5303 */
5304 if (ovfl_pmds == 0UL) return;
5305
5306 /*
5307 * reset all control bits
5308 */
5309 ovfl_ctrl.val = 0;
5310 reset_pmds = 0UL;
5311
5312 /*
5313 * if a sampling format module exists, then we "cache" the overflow by
5314 * calling the module's handler() routine.
5315 */
5316 if (has_smpl) {
5317 unsigned long start_cycles, end_cycles;
5318 unsigned long pmd_mask;
5319 int j, k, ret = 0;
5320 int this_cpu = smp_processor_id();
5321
5322 pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
5323 ovfl_arg = &ctx->ctx_ovfl_arg;
5324
5325 prefetch(ctx->ctx_smpl_hdr);
5326
5327 for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
5328
5329 mask = 1UL << i;
5330
5331 if ((pmd_mask & 0x1) == 0) continue;
5332
5333 ovfl_arg->ovfl_pmd = (unsigned char )i;
5334 ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
5335 ovfl_arg->active_set = 0;
5336 ovfl_arg->ovfl_ctrl.val = 0; /* module must fill in all fields */
5337 ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
5338
5339 ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
5340 ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
5341 ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
5342
5343 /*
5344 * copy values of pmds of interest. Sampling format may copy them
5345 * into sampling buffer.
5346 */
5347 if (smpl_pmds) {
5348 for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
5349 if ((smpl_pmds & 0x1) == 0) continue;
5350 ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
5351 DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
5352 }
5353 }
5354
5355 pfm_stats[this_cpu].pfm_smpl_handler_calls++;
5356
5357 start_cycles = ia64_get_itc();
5358
5359 /*
5360 * call custom buffer format record (handler) routine
5361 */
5362 ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
5363
5364 end_cycles = ia64_get_itc();
5365
5366 /*
5367 * For those controls, we take the union because they have
5368 * an all or nothing behavior.
5369 */
5370 ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
5371 ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
5372 ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
5373 /*
5374 * build the bitmask of pmds to reset now
5375 */
5376 if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
5377
5378 pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
5379 }
5380 /*
5381 * when the module cannot handle the rest of the overflows, we abort right here
5382 */
5383 if (ret && pmd_mask) {
5384 DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
5385 pmd_mask<<PMU_FIRST_COUNTER));
5386 }
5387 /*
5388 * remove the pmds we reset now from the set of pmds to reset in pfm_restart()
5389 */
5390 ovfl_pmds &= ~reset_pmds;
5391 } else {
5392 /*
5393 * when no sampling module is used, then the default
5394 * is to notify on overflow if requested by user
5395 */
5396 ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
5397 ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
5398 ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */
5399 ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
5400 /*
5401 * if needed, we reset all overflowed pmds
5402 */
5403 if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
5404 }
5405
5406 DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
5407
5408 /*
5409 * reset the requested PMD registers using the short reset values
5410 */
5411 if (reset_pmds) {
5412 unsigned long bm = reset_pmds;
5413 pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
5414 }
5415
5416 if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
5417 /*
5418 * keep track of what to reset when unblocking
5419 */
5420 ctx->ctx_ovfl_regs[0] = ovfl_pmds;
5421
5422 /*
5423 * check for blocking context
5424 */
5425 if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
5426
5427 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
5428
5429 /*
5430 * set the perfmon specific checking pending work for the task
5431 */
5432 PFM_SET_WORK_PENDING(task, 1);
5433
5434 /*
5435 * when coming from ctxsw, current still points to the
5436 * previous task, therefore we must work with task and not current.
5437 */
5aa92ffd 5438 tsk_set_notify_resume(task);
1da177e4
LT
5439 }
5440 /*
5441 * defer until state is changed (shorten spin window). the context is locked
5442 * anyway, so the signal receiver would come spin for nothing.
5443 */
5444 must_notify = 1;
5445 }
5446
5447 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
19c5870c 5448 GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
1da177e4
LT
5449 PFM_GET_WORK_PENDING(task),
5450 ctx->ctx_fl_trap_reason,
5451 ovfl_pmds,
5452 ovfl_notify,
5453 ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
5454 /*
5455 * in case monitoring must be stopped, we toggle the psr bits
5456 */
5457 if (ovfl_ctrl.bits.mask_monitoring) {
5458 pfm_mask_monitoring(task);
5459 ctx->ctx_state = PFM_CTX_MASKED;
5460 ctx->ctx_fl_can_restart = 1;
5461 }
5462
5463 /*
5464 * send notification now
5465 */
5466 if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
5467
5468 return;
5469
5470sanity_check:
5471 printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5472 smp_processor_id(),
19c5870c 5473 task ? task_pid_nr(task) : -1,
1da177e4
LT
5474 pmc0);
5475 return;
5476
5477stop_monitoring:
5478 /*
5479 * in SMP, zombie context is never restored but reclaimed in pfm_load_regs().
5480 * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can
5481 * come here as zombie only if the task is the current task. In which case, we
5482 * can access the PMU hardware directly.
5483 *
5484 * Note that zombies do have PM_VALID set. So here we do the minimal.
5485 *
5486 * In case the context was zombified it could not be reclaimed at the time
5487 * the monitoring program exited. At this point, the PMU reservation has been
5488 * returned, the sampiing buffer has been freed. We must convert this call
5489 * into a spurious interrupt. However, we must also avoid infinite overflows
5490 * by stopping monitoring for this task. We can only come here for a per-task
5491 * context. All we need to do is to stop monitoring using the psr bits which
5492 * are always task private. By re-enabling secure montioring, we ensure that
5493 * the monitored task will not be able to re-activate monitoring.
5494 * The task will eventually be context switched out, at which point the context
5495 * will be reclaimed (that includes releasing ownership of the PMU).
5496 *
5497 * So there might be a window of time where the number of per-task session is zero
5498 * yet one PMU might have a owner and get at most one overflow interrupt for a zombie
5499 * context. This is safe because if a per-task session comes in, it will push this one
5500 * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide
5501 * session is force on that CPU, given that we use task pinning, pfm_save_regs() will
5502 * also push our zombie context out.
5503 *
5504 * Overall pretty hairy stuff....
5505 */
19c5870c 5506 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
1da177e4
LT
5507 pfm_clear_psr_up();
5508 ia64_psr(regs)->up = 0;
5509 ia64_psr(regs)->sp = 1;
5510 return;
5511}
5512
5513static int
9010eff0 5514pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
1da177e4
LT
5515{
5516 struct task_struct *task;
5517 pfm_context_t *ctx;
5518 unsigned long flags;
5519 u64 pmc0;
5520 int this_cpu = smp_processor_id();
5521 int retval = 0;
5522
5523 pfm_stats[this_cpu].pfm_ovfl_intr_count++;
5524
5525 /*
5526 * srlz.d done before arriving here
5527 */
5528 pmc0 = ia64_get_pmc(0);
5529
5530 task = GET_PMU_OWNER();
5531 ctx = GET_PMU_CTX();
5532
5533 /*
5534 * if we have some pending bits set
5535 * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
5536 */
5537 if (PMC0_HAS_OVFL(pmc0) && task) {
5538 /*
5539 * we assume that pmc0.fr is always set here
5540 */
5541
5542 /* sanity check */
5543 if (!ctx) goto report_spurious1;
5544
5545 if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
5546 goto report_spurious2;
5547
5548 PROTECT_CTX_NOPRINT(ctx, flags);
5549
5550 pfm_overflow_handler(task, ctx, pmc0, regs);
5551
5552 UNPROTECT_CTX_NOPRINT(ctx, flags);
5553
5554 } else {
5555 pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
5556 retval = -1;
5557 }
5558 /*
5559 * keep it unfrozen at all times
5560 */
5561 pfm_unfreeze_pmu();
5562
5563 return retval;
5564
5565report_spurious1:
5566 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
19c5870c 5567 this_cpu, task_pid_nr(task));
1da177e4
LT
5568 pfm_unfreeze_pmu();
5569 return -1;
5570report_spurious2:
5571 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5572 this_cpu,
19c5870c 5573 task_pid_nr(task));
1da177e4
LT
5574 pfm_unfreeze_pmu();
5575 return -1;
5576}
5577
5578static irqreturn_t
3bbe486b 5579pfm_interrupt_handler(int irq, void *arg)
1da177e4
LT
5580{
5581 unsigned long start_cycles, total_cycles;
5582 unsigned long min, max;
5583 int this_cpu;
5584 int ret;
3bbe486b 5585 struct pt_regs *regs = get_irq_regs();
1da177e4
LT
5586
5587 this_cpu = get_cpu();
a1ecf7f6
TL
5588 if (likely(!pfm_alt_intr_handler)) {
5589 min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
5590 max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
1da177e4 5591
a1ecf7f6 5592 start_cycles = ia64_get_itc();
1da177e4 5593
9010eff0 5594 ret = pfm_do_interrupt_handler(arg, regs);
1da177e4 5595
a1ecf7f6 5596 total_cycles = ia64_get_itc();
1da177e4 5597
a1ecf7f6
TL
5598 /*
5599 * don't measure spurious interrupts
5600 */
5601 if (likely(ret == 0)) {
5602 total_cycles -= start_cycles;
1da177e4 5603
a1ecf7f6
TL
5604 if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
5605 if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
1da177e4 5606
a1ecf7f6
TL
5607 pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
5608 }
5609 }
5610 else {
5611 (*pfm_alt_intr_handler->handler)(irq, arg, regs);
1da177e4 5612 }
a1ecf7f6 5613
1da177e4
LT
5614 put_cpu_no_resched();
5615 return IRQ_HANDLED;
5616}
5617
5618/*
5619 * /proc/perfmon interface, for debug only
5620 */
5621
5622#define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1)
5623
5624static void *
5625pfm_proc_start(struct seq_file *m, loff_t *pos)
5626{
5627 if (*pos == 0) {
5628 return PFM_PROC_SHOW_HEADER;
5629 }
5630
5631 while (*pos <= NR_CPUS) {
5632 if (cpu_online(*pos - 1)) {
5633 return (void *)*pos;
5634 }
5635 ++*pos;
5636 }
5637 return NULL;
5638}
5639
5640static void *
5641pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
5642{
5643 ++*pos;
5644 return pfm_proc_start(m, pos);
5645}
5646
5647static void
5648pfm_proc_stop(struct seq_file *m, void *v)
5649{
5650}
5651
5652static void
5653pfm_proc_show_header(struct seq_file *m)
5654{
5655 struct list_head * pos;
5656 pfm_buffer_fmt_t * entry;
5657 unsigned long flags;
5658
5659 seq_printf(m,
5660 "perfmon version : %u.%u\n"
5661 "model : %s\n"
5662 "fastctxsw : %s\n"
5663 "expert mode : %s\n"
5664 "ovfl_mask : 0x%lx\n"
5665 "PMU flags : 0x%x\n",
5666 PFM_VERSION_MAJ, PFM_VERSION_MIN,
5667 pmu_conf->pmu_name,
5668 pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
5669 pfm_sysctl.expert_mode > 0 ? "Yes": "No",
5670 pmu_conf->ovfl_val,
5671 pmu_conf->flags);
5672
5673 LOCK_PFS(flags);
5674
5675 seq_printf(m,
5676 "proc_sessions : %u\n"
5677 "sys_sessions : %u\n"
5678 "sys_use_dbregs : %u\n"
5679 "ptrace_use_dbregs : %u\n",
5680 pfm_sessions.pfs_task_sessions,
5681 pfm_sessions.pfs_sys_sessions,
5682 pfm_sessions.pfs_sys_use_dbregs,
5683 pfm_sessions.pfs_ptrace_use_dbregs);
5684
5685 UNLOCK_PFS(flags);
5686
5687 spin_lock(&pfm_buffer_fmt_lock);
5688
5689 list_for_each(pos, &pfm_buffer_fmt_list) {
5690 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
5691 seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
5692 entry->fmt_uuid[0],
5693 entry->fmt_uuid[1],
5694 entry->fmt_uuid[2],
5695 entry->fmt_uuid[3],
5696 entry->fmt_uuid[4],
5697 entry->fmt_uuid[5],
5698 entry->fmt_uuid[6],
5699 entry->fmt_uuid[7],
5700 entry->fmt_uuid[8],
5701 entry->fmt_uuid[9],
5702 entry->fmt_uuid[10],
5703 entry->fmt_uuid[11],
5704 entry->fmt_uuid[12],
5705 entry->fmt_uuid[13],
5706 entry->fmt_uuid[14],
5707 entry->fmt_uuid[15],
5708 entry->fmt_name);
5709 }
5710 spin_unlock(&pfm_buffer_fmt_lock);
5711
5712}
5713
5714static int
5715pfm_proc_show(struct seq_file *m, void *v)
5716{
5717 unsigned long psr;
5718 unsigned int i;
5719 int cpu;
5720
5721 if (v == PFM_PROC_SHOW_HEADER) {
5722 pfm_proc_show_header(m);
5723 return 0;
5724 }
5725
5726 /* show info for CPU (v - 1) */
5727
5728 cpu = (long)v - 1;
5729 seq_printf(m,
5730 "CPU%-2d overflow intrs : %lu\n"
5731 "CPU%-2d overflow cycles : %lu\n"
5732 "CPU%-2d overflow min : %lu\n"
5733 "CPU%-2d overflow max : %lu\n"
5734 "CPU%-2d smpl handler calls : %lu\n"
5735 "CPU%-2d smpl handler cycles : %lu\n"
5736 "CPU%-2d spurious intrs : %lu\n"
5737 "CPU%-2d replay intrs : %lu\n"
5738 "CPU%-2d syst_wide : %d\n"
5739 "CPU%-2d dcr_pp : %d\n"
5740 "CPU%-2d exclude idle : %d\n"
5741 "CPU%-2d owner : %d\n"
5742 "CPU%-2d context : %p\n"
5743 "CPU%-2d activations : %lu\n",
5744 cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
5745 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
5746 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
5747 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
5748 cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
5749 cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
5750 cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
5751 cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
5752 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
5753 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
5754 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
5755 cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
5756 cpu, pfm_get_cpu_data(pmu_ctx, cpu),
5757 cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
5758
5759 if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
5760
5761 psr = pfm_get_psr();
5762
5763 ia64_srlz_d();
5764
5765 seq_printf(m,
5766 "CPU%-2d psr : 0x%lx\n"
5767 "CPU%-2d pmc0 : 0x%lx\n",
5768 cpu, psr,
5769 cpu, ia64_get_pmc(0));
5770
5771 for (i=0; PMC_IS_LAST(i) == 0; i++) {
5772 if (PMC_IS_COUNTING(i) == 0) continue;
5773 seq_printf(m,
5774 "CPU%-2d pmc%u : 0x%lx\n"
5775 "CPU%-2d pmd%u : 0x%lx\n",
5776 cpu, i, ia64_get_pmc(i),
5777 cpu, i, ia64_get_pmd(i));
5778 }
5779 }
5780 return 0;
5781}
5782
a23fe55e 5783const struct seq_operations pfm_seq_ops = {
1da177e4
LT
5784 .start = pfm_proc_start,
5785 .next = pfm_proc_next,
5786 .stop = pfm_proc_stop,
5787 .show = pfm_proc_show
5788};
5789
5790static int
5791pfm_proc_open(struct inode *inode, struct file *file)
5792{
5793 return seq_open(file, &pfm_seq_ops);
5794}
5795
5796
5797/*
5798 * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens
5799 * during pfm_enable() hence before pfm_start(). We cannot assume monitoring
5800 * is active or inactive based on mode. We must rely on the value in
5801 * local_cpu_data->pfm_syst_info
5802 */
5803void
5804pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
5805{
5806 struct pt_regs *regs;
5807 unsigned long dcr;
5808 unsigned long dcr_pp;
5809
5810 dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
5811
5812 /*
5813 * pid 0 is guaranteed to be the idle task. There is one such task with pid 0
5814 * on every CPU, so we can rely on the pid to identify the idle task.
5815 */
5816 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
6450578f 5817 regs = task_pt_regs(task);
1da177e4
LT
5818 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
5819 return;
5820 }
5821 /*
5822 * if monitoring has started
5823 */
5824 if (dcr_pp) {
5825 dcr = ia64_getreg(_IA64_REG_CR_DCR);
5826 /*
5827 * context switching in?
5828 */
5829 if (is_ctxswin) {
5830 /* mask monitoring for the idle task */
5831 ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
5832 pfm_clear_psr_pp();
5833 ia64_srlz_i();
5834 return;
5835 }
5836 /*
5837 * context switching out
5838 * restore monitoring for next task
5839 *
5840 * Due to inlining this odd if-then-else construction generates
5841 * better code.
5842 */
5843 ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
5844 pfm_set_psr_pp();
5845 ia64_srlz_i();
5846 }
5847}
5848
5849#ifdef CONFIG_SMP
5850
5851static void
5852pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
5853{
5854 struct task_struct *task = ctx->ctx_task;
5855
5856 ia64_psr(regs)->up = 0;
5857 ia64_psr(regs)->sp = 1;
5858
5859 if (GET_PMU_OWNER() == task) {
19c5870c
AD
5860 DPRINT(("cleared ownership for [%d]\n",
5861 task_pid_nr(ctx->ctx_task)));
1da177e4
LT
5862 SET_PMU_OWNER(NULL, NULL);
5863 }
5864
5865 /*
5866 * disconnect the task from the context and vice-versa
5867 */
5868 PFM_SET_WORK_PENDING(task, 0);
5869
5870 task->thread.pfm_context = NULL;
5871 task->thread.flags &= ~IA64_THREAD_PM_VALID;
5872
19c5870c 5873 DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
1da177e4
LT
5874}
5875
5876
5877/*
5878 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
5879 */
5880void
5881pfm_save_regs(struct task_struct *task)
5882{
5883 pfm_context_t *ctx;
1da177e4
LT
5884 unsigned long flags;
5885 u64 psr;
5886
5887
5888 ctx = PFM_GET_CTX(task);
5889 if (ctx == NULL) return;
1da177e4
LT
5890
5891 /*
5892 * we always come here with interrupts ALREADY disabled by
5893 * the scheduler. So we simply need to protect against concurrent
5894 * access, not CPU concurrency.
5895 */
5896 flags = pfm_protect_ctx_ctxsw(ctx);
5897
5898 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
6450578f 5899 struct pt_regs *regs = task_pt_regs(task);
1da177e4
LT
5900
5901 pfm_clear_psr_up();
5902
5903 pfm_force_cleanup(ctx, regs);
5904
5905 BUG_ON(ctx->ctx_smpl_hdr);
5906
5907 pfm_unprotect_ctx_ctxsw(ctx, flags);
5908
5909 pfm_context_free(ctx);
5910 return;
5911 }
5912
5913 /*
5914 * save current PSR: needed because we modify it
5915 */
5916 ia64_srlz_d();
5917 psr = pfm_get_psr();
5918
5919 BUG_ON(psr & (IA64_PSR_I));
5920
5921 /*
5922 * stop monitoring:
5923 * This is the last instruction which may generate an overflow
5924 *
5925 * We do not need to set psr.sp because, it is irrelevant in kernel.
5926 * It will be restored from ipsr when going back to user level
5927 */
5928 pfm_clear_psr_up();
5929
5930 /*
5931 * keep a copy of psr.up (for reload)
5932 */
5933 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5934
5935 /*
5936 * release ownership of this PMU.
5937 * PM interrupts are masked, so nothing
5938 * can happen.
5939 */
5940 SET_PMU_OWNER(NULL, NULL);
5941
5942 /*
5943 * we systematically save the PMD as we have no
5944 * guarantee we will be schedule at that same
5945 * CPU again.
5946 */
35589a8f 5947 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
1da177e4
LT
5948
5949 /*
5950 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
5951 * we will need it on the restore path to check
5952 * for pending overflow.
5953 */
35589a8f 5954 ctx->th_pmcs[0] = ia64_get_pmc(0);
1da177e4
LT
5955
5956 /*
5957 * unfreeze PMU if had pending overflows
5958 */
35589a8f 5959 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
1da177e4
LT
5960
5961 /*
5962 * finally, allow context access.
5963 * interrupts will still be masked after this call.
5964 */
5965 pfm_unprotect_ctx_ctxsw(ctx, flags);
5966}
5967
5968#else /* !CONFIG_SMP */
5969void
5970pfm_save_regs(struct task_struct *task)
5971{
5972 pfm_context_t *ctx;
5973 u64 psr;
5974
5975 ctx = PFM_GET_CTX(task);
5976 if (ctx == NULL) return;
5977
5978 /*
5979 * save current PSR: needed because we modify it
5980 */
5981 psr = pfm_get_psr();
5982
5983 BUG_ON(psr & (IA64_PSR_I));
5984
5985 /*
5986 * stop monitoring:
5987 * This is the last instruction which may generate an overflow
5988 *
5989 * We do not need to set psr.sp because, it is irrelevant in kernel.
5990 * It will be restored from ipsr when going back to user level
5991 */
5992 pfm_clear_psr_up();
5993
5994 /*
5995 * keep a copy of psr.up (for reload)
5996 */
5997 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5998}
5999
6000static void
6001pfm_lazy_save_regs (struct task_struct *task)
6002{
6003 pfm_context_t *ctx;
1da177e4
LT
6004 unsigned long flags;
6005
6006 { u64 psr = pfm_get_psr();
6007 BUG_ON(psr & IA64_PSR_UP);
6008 }
6009
6010 ctx = PFM_GET_CTX(task);
1da177e4
LT
6011
6012 /*
6013 * we need to mask PMU overflow here to
6014 * make sure that we maintain pmc0 until
6015 * we save it. overflow interrupts are
6016 * treated as spurious if there is no
6017 * owner.
6018 *
6019 * XXX: I don't think this is necessary
6020 */
6021 PROTECT_CTX(ctx,flags);
6022
6023 /*
6024 * release ownership of this PMU.
6025 * must be done before we save the registers.
6026 *
6027 * after this call any PMU interrupt is treated
6028 * as spurious.
6029 */
6030 SET_PMU_OWNER(NULL, NULL);
6031
6032 /*
6033 * save all the pmds we use
6034 */
35589a8f 6035 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
1da177e4
LT
6036
6037 /*
6038 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
6039 * it is needed to check for pended overflow
6040 * on the restore path
6041 */
35589a8f 6042 ctx->th_pmcs[0] = ia64_get_pmc(0);
1da177e4
LT
6043
6044 /*
6045 * unfreeze PMU if had pending overflows
6046 */
35589a8f 6047 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
1da177e4
LT
6048
6049 /*
6050 * now get can unmask PMU interrupts, they will
6051 * be treated as purely spurious and we will not
6052 * lose any information
6053 */
6054 UNPROTECT_CTX(ctx,flags);
6055}
6056#endif /* CONFIG_SMP */
6057
6058#ifdef CONFIG_SMP
6059/*
6060 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
6061 */
6062void
6063pfm_load_regs (struct task_struct *task)
6064{
6065 pfm_context_t *ctx;
1da177e4
LT
6066 unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
6067 unsigned long flags;
6068 u64 psr, psr_up;
6069 int need_irq_resend;
6070
6071 ctx = PFM_GET_CTX(task);
6072 if (unlikely(ctx == NULL)) return;
6073
6074 BUG_ON(GET_PMU_OWNER());
6075
1da177e4
LT
6076 /*
6077 * possible on unload
6078 */
35589a8f 6079 if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
1da177e4
LT
6080
6081 /*
6082 * we always come here with interrupts ALREADY disabled by
6083 * the scheduler. So we simply need to protect against concurrent
6084 * access, not CPU concurrency.
6085 */
6086 flags = pfm_protect_ctx_ctxsw(ctx);
6087 psr = pfm_get_psr();
6088
6089 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6090
6091 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6092 BUG_ON(psr & IA64_PSR_I);
6093
6094 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
6450578f 6095 struct pt_regs *regs = task_pt_regs(task);
1da177e4
LT
6096
6097 BUG_ON(ctx->ctx_smpl_hdr);
6098
6099 pfm_force_cleanup(ctx, regs);
6100
6101 pfm_unprotect_ctx_ctxsw(ctx, flags);
6102
6103 /*
6104 * this one (kmalloc'ed) is fine with interrupts disabled
6105 */
6106 pfm_context_free(ctx);
6107
6108 return;
6109 }
6110
6111 /*
6112 * we restore ALL the debug registers to avoid picking up
6113 * stale state.
6114 */
6115 if (ctx->ctx_fl_using_dbreg) {
6116 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6117 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6118 }
6119 /*
6120 * retrieve saved psr.up
6121 */
6122 psr_up = ctx->ctx_saved_psr_up;
6123
6124 /*
6125 * if we were the last user of the PMU on that CPU,
6126 * then nothing to do except restore psr
6127 */
6128 if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
6129
6130 /*
6131 * retrieve partial reload masks (due to user modifications)
6132 */
6133 pmc_mask = ctx->ctx_reload_pmcs[0];
6134 pmd_mask = ctx->ctx_reload_pmds[0];
6135
6136 } else {
6137 /*
6138 * To avoid leaking information to the user level when psr.sp=0,
6139 * we must reload ALL implemented pmds (even the ones we don't use).
6140 * In the kernel we only allow PFM_READ_PMDS on registers which
6141 * we initialized or requested (sampling) so there is no risk there.
6142 */
6143 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6144
6145 /*
6146 * ALL accessible PMCs are systematically reloaded, unused registers
6147 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6148 * up stale configuration.
6149 *
6150 * PMC0 is never in the mask. It is always restored separately.
6151 */
6152 pmc_mask = ctx->ctx_all_pmcs[0];
6153 }
6154 /*
6155 * when context is MASKED, we will restore PMC with plm=0
6156 * and PMD with stale information, but that's ok, nothing
6157 * will be captured.
6158 *
6159 * XXX: optimize here
6160 */
35589a8f
KA
6161 if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6162 if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
1da177e4
LT
6163
6164 /*
6165 * check for pending overflow at the time the state
6166 * was saved.
6167 */
35589a8f 6168 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
1da177e4
LT
6169 /*
6170 * reload pmc0 with the overflow information
6171 * On McKinley PMU, this will trigger a PMU interrupt
6172 */
35589a8f 6173 ia64_set_pmc(0, ctx->th_pmcs[0]);
1da177e4 6174 ia64_srlz_d();
35589a8f 6175 ctx->th_pmcs[0] = 0UL;
1da177e4
LT
6176
6177 /*
6178 * will replay the PMU interrupt
6179 */
c0ad90a3 6180 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
1da177e4
LT
6181
6182 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6183 }
6184
6185 /*
6186 * we just did a reload, so we reset the partial reload fields
6187 */
6188 ctx->ctx_reload_pmcs[0] = 0UL;
6189 ctx->ctx_reload_pmds[0] = 0UL;
6190
6191 SET_LAST_CPU(ctx, smp_processor_id());
6192
6193 /*
6194 * dump activation value for this PMU
6195 */
6196 INC_ACTIVATION();
6197 /*
6198 * record current activation for this context
6199 */
6200 SET_ACTIVATION(ctx);
6201
6202 /*
6203 * establish new ownership.
6204 */
6205 SET_PMU_OWNER(task, ctx);
6206
6207 /*
6208 * restore the psr.up bit. measurement
6209 * is active again.
6210 * no PMU interrupt can happen at this point
6211 * because we still have interrupts disabled.
6212 */
6213 if (likely(psr_up)) pfm_set_psr_up();
6214
6215 /*
6216 * allow concurrent access to context
6217 */
6218 pfm_unprotect_ctx_ctxsw(ctx, flags);
6219}
6220#else /* !CONFIG_SMP */
6221/*
6222 * reload PMU state for UP kernels
6223 * in 2.5 we come here with interrupts disabled
6224 */
6225void
6226pfm_load_regs (struct task_struct *task)
6227{
1da177e4
LT
6228 pfm_context_t *ctx;
6229 struct task_struct *owner;
6230 unsigned long pmd_mask, pmc_mask;
6231 u64 psr, psr_up;
6232 int need_irq_resend;
6233
6234 owner = GET_PMU_OWNER();
6235 ctx = PFM_GET_CTX(task);
1da177e4
LT
6236 psr = pfm_get_psr();
6237
6238 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6239 BUG_ON(psr & IA64_PSR_I);
6240
6241 /*
6242 * we restore ALL the debug registers to avoid picking up
6243 * stale state.
6244 *
6245 * This must be done even when the task is still the owner
6246 * as the registers may have been modified via ptrace()
6247 * (not perfmon) by the previous task.
6248 */
6249 if (ctx->ctx_fl_using_dbreg) {
6250 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6251 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6252 }
6253
6254 /*
6255 * retrieved saved psr.up
6256 */
6257 psr_up = ctx->ctx_saved_psr_up;
6258 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6259
6260 /*
6261 * short path, our state is still there, just
6262 * need to restore psr and we go
6263 *
6264 * we do not touch either PMC nor PMD. the psr is not touched
6265 * by the overflow_handler. So we are safe w.r.t. to interrupt
6266 * concurrency even without interrupt masking.
6267 */
6268 if (likely(owner == task)) {
6269 if (likely(psr_up)) pfm_set_psr_up();
6270 return;
6271 }
6272
6273 /*
6274 * someone else is still using the PMU, first push it out and
6275 * then we'll be able to install our stuff !
6276 *
6277 * Upon return, there will be no owner for the current PMU
6278 */
6279 if (owner) pfm_lazy_save_regs(owner);
6280
6281 /*
6282 * To avoid leaking information to the user level when psr.sp=0,
6283 * we must reload ALL implemented pmds (even the ones we don't use).
6284 * In the kernel we only allow PFM_READ_PMDS on registers which
6285 * we initialized or requested (sampling) so there is no risk there.
6286 */
6287 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6288
6289 /*
6290 * ALL accessible PMCs are systematically reloaded, unused registers
6291 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6292 * up stale configuration.
6293 *
6294 * PMC0 is never in the mask. It is always restored separately
6295 */
6296 pmc_mask = ctx->ctx_all_pmcs[0];
6297
35589a8f
KA
6298 pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6299 pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
1da177e4
LT
6300
6301 /*
6302 * check for pending overflow at the time the state
6303 * was saved.
6304 */
35589a8f 6305 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
1da177e4
LT
6306 /*
6307 * reload pmc0 with the overflow information
6308 * On McKinley PMU, this will trigger a PMU interrupt
6309 */
35589a8f 6310 ia64_set_pmc(0, ctx->th_pmcs[0]);
1da177e4
LT
6311 ia64_srlz_d();
6312
35589a8f 6313 ctx->th_pmcs[0] = 0UL;
1da177e4
LT
6314
6315 /*
6316 * will replay the PMU interrupt
6317 */
c0ad90a3 6318 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
1da177e4
LT
6319
6320 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6321 }
6322
6323 /*
6324 * establish new ownership.
6325 */
6326 SET_PMU_OWNER(task, ctx);
6327
6328 /*
6329 * restore the psr.up bit. measurement
6330 * is active again.
6331 * no PMU interrupt can happen at this point
6332 * because we still have interrupts disabled.
6333 */
6334 if (likely(psr_up)) pfm_set_psr_up();
6335}
6336#endif /* CONFIG_SMP */
6337
6338/*
6339 * this function assumes monitoring is stopped
6340 */
6341static void
6342pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6343{
6344 u64 pmc0;
6345 unsigned long mask2, val, pmd_val, ovfl_val;
6346 int i, can_access_pmu = 0;
6347 int is_self;
6348
6349 /*
6350 * is the caller the task being monitored (or which initiated the
6351 * session for system wide measurements)
6352 */
6353 is_self = ctx->ctx_task == task ? 1 : 0;
6354
6355 /*
6356 * can access PMU is task is the owner of the PMU state on the current CPU
6357 * or if we are running on the CPU bound to the context in system-wide mode
6358 * (that is not necessarily the task the context is attached to in this mode).
6359 * In system-wide we always have can_access_pmu true because a task running on an
6360 * invalid processor is flagged earlier in the call stack (see pfm_stop).
6361 */
6362 can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
6363 if (can_access_pmu) {
6364 /*
6365 * Mark the PMU as not owned
6366 * This will cause the interrupt handler to do nothing in case an overflow
6367 * interrupt was in-flight
6368 * This also guarantees that pmc0 will contain the final state
6369 * It virtually gives us full control on overflow processing from that point
6370 * on.
6371 */
6372 SET_PMU_OWNER(NULL, NULL);
6373 DPRINT(("releasing ownership\n"));
6374
6375 /*
6376 * read current overflow status:
6377 *
6378 * we are guaranteed to read the final stable state
6379 */
6380 ia64_srlz_d();
6381 pmc0 = ia64_get_pmc(0); /* slow */
6382
6383 /*
6384 * reset freeze bit, overflow status information destroyed
6385 */
6386 pfm_unfreeze_pmu();
6387 } else {
35589a8f 6388 pmc0 = ctx->th_pmcs[0];
1da177e4
LT
6389 /*
6390 * clear whatever overflow status bits there were
6391 */
35589a8f 6392 ctx->th_pmcs[0] = 0;
1da177e4
LT
6393 }
6394 ovfl_val = pmu_conf->ovfl_val;
6395 /*
6396 * we save all the used pmds
6397 * we take care of overflows for counting PMDs
6398 *
6399 * XXX: sampling situation is not taken into account here
6400 */
6401 mask2 = ctx->ctx_used_pmds[0];
6402
6403 DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
6404
6405 for (i = 0; mask2; i++, mask2>>=1) {
6406
6407 /* skip non used pmds */
6408 if ((mask2 & 0x1) == 0) continue;
6409
6410 /*
6411 * can access PMU always true in system wide mode
6412 */
35589a8f 6413 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
1da177e4
LT
6414
6415 if (PMD_IS_COUNTING(i)) {
6416 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
19c5870c 6417 task_pid_nr(task),
1da177e4
LT
6418 i,
6419 ctx->ctx_pmds[i].val,
6420 val & ovfl_val));
6421
6422 /*
6423 * we rebuild the full 64 bit value of the counter
6424 */
6425 val = ctx->ctx_pmds[i].val + (val & ovfl_val);
6426
6427 /*
6428 * now everything is in ctx_pmds[] and we need
6429 * to clear the saved context from save_regs() such that
6430 * pfm_read_pmds() gets the correct value
6431 */
6432 pmd_val = 0UL;
6433
6434 /*
6435 * take care of overflow inline
6436 */
6437 if (pmc0 & (1UL << i)) {
6438 val += 1 + ovfl_val;
19c5870c 6439 DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
1da177e4
LT
6440 }
6441 }
6442
19c5870c 6443 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
1da177e4 6444
35589a8f 6445 if (is_self) ctx->th_pmds[i] = pmd_val;
1da177e4
LT
6446
6447 ctx->ctx_pmds[i].val = val;
6448 }
6449}
6450
6451static struct irqaction perfmon_irqaction = {
6452 .handler = pfm_interrupt_handler,
121a4226 6453 .flags = IRQF_DISABLED,
1da177e4
LT
6454 .name = "perfmon"
6455};
6456
a1ecf7f6
TL
6457static void
6458pfm_alt_save_pmu_state(void *data)
6459{
6460 struct pt_regs *regs;
6461
6450578f 6462 regs = task_pt_regs(current);
a1ecf7f6
TL
6463
6464 DPRINT(("called\n"));
6465
6466 /*
6467 * should not be necessary but
6468 * let's take not risk
6469 */
6470 pfm_clear_psr_up();
6471 pfm_clear_psr_pp();
6472 ia64_psr(regs)->pp = 0;
6473
6474 /*
6475 * This call is required
6476 * May cause a spurious interrupt on some processors
6477 */
6478 pfm_freeze_pmu();
6479
6480 ia64_srlz_d();
6481}
6482
6483void
6484pfm_alt_restore_pmu_state(void *data)
6485{
6486 struct pt_regs *regs;
6487
6450578f 6488 regs = task_pt_regs(current);
a1ecf7f6
TL
6489
6490 DPRINT(("called\n"));
6491
6492 /*
6493 * put PMU back in state expected
6494 * by perfmon
6495 */
6496 pfm_clear_psr_up();
6497 pfm_clear_psr_pp();
6498 ia64_psr(regs)->pp = 0;
6499
6500 /*
6501 * perfmon runs with PMU unfrozen at all times
6502 */
6503 pfm_unfreeze_pmu();
6504
6505 ia64_srlz_d();
6506}
6507
6508int
6509pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6510{
6511 int ret, i;
6512 int reserve_cpu;
6513
6514 /* some sanity checks */
6515 if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
6516
6517 /* do the easy test first */
6518 if (pfm_alt_intr_handler) return -EBUSY;
6519
6520 /* one at a time in the install or remove, just fail the others */
6521 if (!spin_trylock(&pfm_alt_install_check)) {
6522 return -EBUSY;
6523 }
6524
6525 /* reserve our session */
6526 for_each_online_cpu(reserve_cpu) {
6527 ret = pfm_reserve_session(NULL, 1, reserve_cpu);
6528 if (ret) goto cleanup_reserve;
6529 }
6530
6531 /* save the current system wide pmu states */
6532 ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1);
6533 if (ret) {
6534 DPRINT(("on_each_cpu() failed: %d\n", ret));
6535 goto cleanup_reserve;
6536 }
6537
6538 /* officially change to the alternate interrupt handler */
6539 pfm_alt_intr_handler = hdl;
6540
6541 spin_unlock(&pfm_alt_install_check);
6542
6543 return 0;
6544
6545cleanup_reserve:
6546 for_each_online_cpu(i) {
6547 /* don't unreserve more than we reserved */
6548 if (i >= reserve_cpu) break;
6549
6550 pfm_unreserve_session(NULL, 1, i);
6551 }
6552
6553 spin_unlock(&pfm_alt_install_check);
6554
6555 return ret;
6556}
6557EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
6558
6559int
6560pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6561{
6562 int i;
6563 int ret;
6564
6565 if (hdl == NULL) return -EINVAL;
6566
6567 /* cannot remove someone else's handler! */
6568 if (pfm_alt_intr_handler != hdl) return -EINVAL;
6569
6570 /* one at a time in the install or remove, just fail the others */
6571 if (!spin_trylock(&pfm_alt_install_check)) {
6572 return -EBUSY;
6573 }
6574
6575 pfm_alt_intr_handler = NULL;
6576
6577 ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1);
6578 if (ret) {
6579 DPRINT(("on_each_cpu() failed: %d\n", ret));
6580 }
6581
6582 for_each_online_cpu(i) {
6583 pfm_unreserve_session(NULL, 1, i);
6584 }
6585
6586 spin_unlock(&pfm_alt_install_check);
6587
6588 return 0;
6589}
6590EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
6591
1da177e4
LT
6592/*
6593 * perfmon initialization routine, called from the initcall() table
6594 */
6595static int init_pfm_fs(void);
6596
6597static int __init
6598pfm_probe_pmu(void)
6599{
6600 pmu_config_t **p;
6601 int family;
6602
6603 family = local_cpu_data->family;
6604 p = pmu_confs;
6605
6606 while(*p) {
6607 if ((*p)->probe) {
6608 if ((*p)->probe() == 0) goto found;
6609 } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
6610 goto found;
6611 }
6612 p++;
6613 }
6614 return -1;
6615found:
6616 pmu_conf = *p;
6617 return 0;
6618}
6619
5dfe4c96 6620static const struct file_operations pfm_proc_fops = {
1da177e4
LT
6621 .open = pfm_proc_open,
6622 .read = seq_read,
6623 .llseek = seq_lseek,
6624 .release = seq_release,
6625};
6626
6627int __init
6628pfm_init(void)
6629{
6630 unsigned int n, n_counters, i;
6631
6632 printk("perfmon: version %u.%u IRQ %u\n",
6633 PFM_VERSION_MAJ,
6634 PFM_VERSION_MIN,
6635 IA64_PERFMON_VECTOR);
6636
6637 if (pfm_probe_pmu()) {
6638 printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
6639 local_cpu_data->family);
6640 return -ENODEV;
6641 }
6642
6643 /*
6644 * compute the number of implemented PMD/PMC from the
6645 * description tables
6646 */
6647 n = 0;
6648 for (i=0; PMC_IS_LAST(i) == 0; i++) {
6649 if (PMC_IS_IMPL(i) == 0) continue;
6650 pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
6651 n++;
6652 }
6653 pmu_conf->num_pmcs = n;
6654
6655 n = 0; n_counters = 0;
6656 for (i=0; PMD_IS_LAST(i) == 0; i++) {
6657 if (PMD_IS_IMPL(i) == 0) continue;
6658 pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
6659 n++;
6660 if (PMD_IS_COUNTING(i)) n_counters++;
6661 }
6662 pmu_conf->num_pmds = n;
6663 pmu_conf->num_counters = n_counters;
6664
6665 /*
6666 * sanity checks on the number of debug registers
6667 */
6668 if (pmu_conf->use_rr_dbregs) {
6669 if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
6670 printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
6671 pmu_conf = NULL;
6672 return -1;
6673 }
6674 if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
6675 printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
6676 pmu_conf = NULL;
6677 return -1;
6678 }
6679 }
6680
6681 printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
6682 pmu_conf->pmu_name,
6683 pmu_conf->num_pmcs,
6684 pmu_conf->num_pmds,
6685 pmu_conf->num_counters,
6686 ffz(pmu_conf->ovfl_val));
6687
6688 /* sanity check */
35589a8f 6689 if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
1da177e4
LT
6690 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
6691 pmu_conf = NULL;
6692 return -1;
6693 }
6694
6695 /*
6696 * create /proc/perfmon (mostly for debugging purposes)
6697 */
6698 perfmon_dir = create_proc_entry("perfmon", S_IRUGO, NULL);
6699 if (perfmon_dir == NULL) {
6700 printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
6701 pmu_conf = NULL;
6702 return -1;
6703 }
6704 /*
6705 * install customized file operations for /proc/perfmon entry
6706 */
6707 perfmon_dir->proc_fops = &pfm_proc_fops;
6708
6709 /*
6710 * create /proc/sys/kernel/perfmon (for debugging purposes)
6711 */
0b4d4147 6712 pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
1da177e4
LT
6713
6714 /*
6715 * initialize all our spinlocks
6716 */
6717 spin_lock_init(&pfm_sessions.pfs_lock);
6718 spin_lock_init(&pfm_buffer_fmt_lock);
6719
6720 init_pfm_fs();
6721
6722 for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
6723
6724 return 0;
6725}
6726
6727__initcall(pfm_init);
6728
6729/*
6730 * this function is called before pfm_init()
6731 */
6732void
6733pfm_init_percpu (void)
6734{
ff741906 6735 static int first_time=1;
1da177e4
LT
6736 /*
6737 * make sure no measurement is active
6738 * (may inherit programmed PMCs from EFI).
6739 */
6740 pfm_clear_psr_pp();
6741 pfm_clear_psr_up();
6742
6743 /*
6744 * we run with the PMU not frozen at all times
6745 */
6746 pfm_unfreeze_pmu();
6747
ff741906 6748 if (first_time) {
1da177e4 6749 register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
ff741906
AR
6750 first_time=0;
6751 }
1da177e4
LT
6752
6753 ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
6754 ia64_srlz_d();
6755}
6756
6757/*
6758 * used for debug purposes only
6759 */
6760void
6761dump_pmu_state(const char *from)
6762{
6763 struct task_struct *task;
1da177e4
LT
6764 struct pt_regs *regs;
6765 pfm_context_t *ctx;
6766 unsigned long psr, dcr, info, flags;
6767 int i, this_cpu;
6768
6769 local_irq_save(flags);
6770
6771 this_cpu = smp_processor_id();
6450578f 6772 regs = task_pt_regs(current);
1da177e4
LT
6773 info = PFM_CPUINFO_GET();
6774 dcr = ia64_getreg(_IA64_REG_CR_DCR);
6775
6776 if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
6777 local_irq_restore(flags);
6778 return;
6779 }
6780
6781 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6782 this_cpu,
6783 from,
19c5870c 6784 task_pid_nr(current),
1da177e4
LT
6785 regs->cr_iip,
6786 current->comm);
6787
6788 task = GET_PMU_OWNER();
6789 ctx = GET_PMU_CTX();
6790
19c5870c 6791 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
1da177e4
LT
6792
6793 psr = pfm_get_psr();
6794
6795 printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
6796 this_cpu,
6797 ia64_get_pmc(0),
6798 psr & IA64_PSR_PP ? 1 : 0,
6799 psr & IA64_PSR_UP ? 1 : 0,
6800 dcr & IA64_DCR_PP ? 1 : 0,
6801 info,
6802 ia64_psr(regs)->up,
6803 ia64_psr(regs)->pp);
6804
6805 ia64_psr(regs)->up = 0;
6806 ia64_psr(regs)->pp = 0;
6807
1da177e4
LT
6808 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6809 if (PMC_IS_IMPL(i) == 0) continue;
35589a8f 6810 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
1da177e4
LT
6811 }
6812
6813 for (i=1; PMD_IS_LAST(i) == 0; i++) {
6814 if (PMD_IS_IMPL(i) == 0) continue;
35589a8f 6815 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
1da177e4
LT
6816 }
6817
6818 if (ctx) {
6819 printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
6820 this_cpu,
6821 ctx->ctx_state,
6822 ctx->ctx_smpl_vaddr,
6823 ctx->ctx_smpl_hdr,
6824 ctx->ctx_msgq_head,
6825 ctx->ctx_msgq_tail,
6826 ctx->ctx_saved_psr_up);
6827 }
6828 local_irq_restore(flags);
6829}
6830
6831/*
6832 * called from process.c:copy_thread(). task is new child.
6833 */
6834void
6835pfm_inherit(struct task_struct *task, struct pt_regs *regs)
6836{
6837 struct thread_struct *thread;
6838
19c5870c 6839 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
1da177e4
LT
6840
6841 thread = &task->thread;
6842
6843 /*
6844 * cut links inherited from parent (current)
6845 */
6846 thread->pfm_context = NULL;
6847
6848 PFM_SET_WORK_PENDING(task, 0);
6849
6850 /*
6851 * the psr bits are already set properly in copy_threads()
6852 */
6853}
6854#else /* !CONFIG_PERFMON */
6855asmlinkage long
6856sys_perfmonctl (int fd, int cmd, void *arg, int count)
6857{
6858 return -ENOSYS;
6859}
6860#endif /* CONFIG_PERFMON */