]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/ia64/kernel/perfmon.c
[IA64] cpumask_of_node() should handle -1 as a node
[net-next-2.6.git] / arch / ia64 / kernel / perfmon.c
CommitLineData
1da177e4
LT
1/*
2 * This file implements the perfmon-2 subsystem which is used
3 * to program the IA-64 Performance Monitoring Unit (PMU).
4 *
5 * The initial version of perfmon.c was written by
6 * Ganesh Venkitachalam, IBM Corp.
7 *
8 * Then it was modified for perfmon-1.x by Stephane Eranian and
9 * David Mosberger, Hewlett Packard Co.
10 *
11 * Version Perfmon-2.x is a rewrite of perfmon-1.x
12 * by Stephane Eranian, Hewlett Packard Co.
13 *
a1ecf7f6 14 * Copyright (C) 1999-2005 Hewlett Packard Co
1da177e4
LT
15 * Stephane Eranian <eranian@hpl.hp.com>
16 * David Mosberger-Tang <davidm@hpl.hp.com>
17 *
18 * More information about perfmon available at:
19 * http://www.hpl.hp.com/research/linux/perfmon
20 */
21
1da177e4
LT
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/interrupt.h>
1da177e4
LT
26#include <linux/proc_fs.h>
27#include <linux/seq_file.h>
28#include <linux/init.h>
29#include <linux/vmalloc.h>
30#include <linux/mm.h>
31#include <linux/sysctl.h>
32#include <linux/list.h>
33#include <linux/file.h>
34#include <linux/poll.h>
35#include <linux/vfs.h>
a3bc0dbc 36#include <linux/smp.h>
1da177e4
LT
37#include <linux/pagemap.h>
38#include <linux/mount.h>
1da177e4 39#include <linux/bitops.h>
a9415644 40#include <linux/capability.h>
badf1662 41#include <linux/rcupdate.h>
60f1c444 42#include <linux/completion.h>
f14488cc 43#include <linux/tracehook.h>
1da177e4
LT
44
45#include <asm/errno.h>
46#include <asm/intrinsics.h>
47#include <asm/page.h>
48#include <asm/perfmon.h>
49#include <asm/processor.h>
50#include <asm/signal.h>
51#include <asm/system.h>
52#include <asm/uaccess.h>
53#include <asm/delay.h>
54
55#ifdef CONFIG_PERFMON
56/*
57 * perfmon context state
58 */
59#define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */
60#define PFM_CTX_LOADED 2 /* context is loaded onto a task */
61#define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
62#define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
63
64#define PFM_INVALID_ACTIVATION (~0UL)
65
35589a8f
KA
66#define PFM_NUM_PMC_REGS 64 /* PMC save area for ctxsw */
67#define PFM_NUM_PMD_REGS 64 /* PMD save area for ctxsw */
68
1da177e4
LT
69/*
70 * depth of message queue
71 */
72#define PFM_MAX_MSGS 32
73#define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
74
75/*
76 * type of a PMU register (bitmask).
77 * bitmask structure:
78 * bit0 : register implemented
79 * bit1 : end marker
80 * bit2-3 : reserved
81 * bit4 : pmc has pmc.pm
82 * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter
83 * bit6-7 : register type
84 * bit8-31: reserved
85 */
86#define PFM_REG_NOTIMPL 0x0 /* not implemented at all */
87#define PFM_REG_IMPL 0x1 /* register implemented */
88#define PFM_REG_END 0x2 /* end marker */
89#define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
90#define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */
91#define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */
92#define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */
93#define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */
94
95#define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
96#define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
97
98#define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
99
100/* i assumed unsigned */
101#define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
102#define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
103
104/* XXX: these assume that register i is implemented */
105#define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
106#define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
107#define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
108#define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
109
110#define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
111#define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
112#define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
113#define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
114
115#define PFM_NUM_IBRS IA64_NUM_DBG_REGS
116#define PFM_NUM_DBRS IA64_NUM_DBG_REGS
117
118#define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
119#define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
120#define PFM_CTX_TASK(h) (h)->ctx_task
121
122#define PMU_PMC_OI 5 /* position of pmc.oi bit */
123
124/* XXX: does not support more than 64 PMDs */
125#define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
126#define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
127
128#define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
129
130#define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
131#define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
132#define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
133#define PFM_CODE_RR 0 /* requesting code range restriction */
134#define PFM_DATA_RR 1 /* requestion data range restriction */
135
136#define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
137#define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
138#define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
139
140#define RDEP(x) (1UL<<(x))
141
142/*
143 * context protection macros
144 * in SMP:
145 * - we need to protect against CPU concurrency (spin_lock)
146 * - we need to protect against PMU overflow interrupts (local_irq_disable)
147 * in UP:
148 * - we need to protect against PMU overflow interrupts (local_irq_disable)
149 *
85d1fe09 150 * spin_lock_irqsave()/spin_unlock_irqrestore():
1da177e4
LT
151 * in SMP: local_irq_disable + spin_lock
152 * in UP : local_irq_disable
153 *
154 * spin_lock()/spin_lock():
155 * in UP : removed automatically
156 * in SMP: protect against context accesses from other CPU. interrupts
157 * are not masked. This is useful for the PMU interrupt handler
158 * because we know we will not get PMU concurrency in that code.
159 */
160#define PROTECT_CTX(c, f) \
161 do { \
19c5870c 162 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
1da177e4 163 spin_lock_irqsave(&(c)->ctx_lock, f); \
19c5870c 164 DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
1da177e4
LT
165 } while(0)
166
167#define UNPROTECT_CTX(c, f) \
168 do { \
19c5870c 169 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
1da177e4
LT
170 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
171 } while(0)
172
173#define PROTECT_CTX_NOPRINT(c, f) \
174 do { \
175 spin_lock_irqsave(&(c)->ctx_lock, f); \
176 } while(0)
177
178
179#define UNPROTECT_CTX_NOPRINT(c, f) \
180 do { \
181 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
182 } while(0)
183
184
185#define PROTECT_CTX_NOIRQ(c) \
186 do { \
187 spin_lock(&(c)->ctx_lock); \
188 } while(0)
189
190#define UNPROTECT_CTX_NOIRQ(c) \
191 do { \
192 spin_unlock(&(c)->ctx_lock); \
193 } while(0)
194
195
196#ifdef CONFIG_SMP
197
198#define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
199#define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
200#define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
201
202#else /* !CONFIG_SMP */
203#define SET_ACTIVATION(t) do {} while(0)
204#define GET_ACTIVATION(t) do {} while(0)
205#define INC_ACTIVATION(t) do {} while(0)
206#endif /* CONFIG_SMP */
207
208#define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
209#define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
210#define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
211
212#define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
213#define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
214
215#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
216
217/*
218 * cmp0 must be the value of pmc0
219 */
220#define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
221
222#define PFMFS_MAGIC 0xa0b4d889
223
224/*
225 * debugging
226 */
227#define PFM_DEBUGGING 1
228#ifdef PFM_DEBUGGING
229#define DPRINT(a) \
230 do { \
d4ed8084 231 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
1da177e4
LT
232 } while (0)
233
234#define DPRINT_ovfl(a) \
235 do { \
d4ed8084 236 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
1da177e4
LT
237 } while (0)
238#endif
239
240/*
241 * 64-bit software counter structure
242 *
243 * the next_reset_type is applied to the next call to pfm_reset_regs()
244 */
245typedef struct {
246 unsigned long val; /* virtual 64bit counter value */
247 unsigned long lval; /* last reset value */
248 unsigned long long_reset; /* reset value on sampling overflow */
249 unsigned long short_reset; /* reset value on overflow */
250 unsigned long reset_pmds[4]; /* which other pmds to reset when this counter overflows */
251 unsigned long smpl_pmds[4]; /* which pmds are accessed when counter overflow */
252 unsigned long seed; /* seed for random-number generator */
253 unsigned long mask; /* mask for random-number generator */
254 unsigned int flags; /* notify/do not notify */
255 unsigned long eventid; /* overflow event identifier */
256} pfm_counter_t;
257
258/*
259 * context flags
260 */
261typedef struct {
262 unsigned int block:1; /* when 1, task will blocked on user notifications */
263 unsigned int system:1; /* do system wide monitoring */
264 unsigned int using_dbreg:1; /* using range restrictions (debug registers) */
265 unsigned int is_sampling:1; /* true if using a custom format */
266 unsigned int excl_idle:1; /* exclude idle task in system wide session */
267 unsigned int going_zombie:1; /* context is zombie (MASKED+blocking) */
268 unsigned int trap_reason:2; /* reason for going into pfm_handle_work() */
269 unsigned int no_msg:1; /* no message sent on overflow */
270 unsigned int can_restart:1; /* allowed to issue a PFM_RESTART */
271 unsigned int reserved:22;
272} pfm_context_flags_t;
273
274#define PFM_TRAP_REASON_NONE 0x0 /* default value */
275#define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */
276#define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */
277
278
279/*
280 * perfmon context: encapsulates all the state of a monitoring session
281 */
282
283typedef struct pfm_context {
284 spinlock_t ctx_lock; /* context protection */
285
286 pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */
287 unsigned int ctx_state; /* state: active/inactive (no bitfield) */
288
289 struct task_struct *ctx_task; /* task to which context is attached */
290
291 unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
292
60f1c444 293 struct completion ctx_restart_done; /* use for blocking notification mode */
1da177e4
LT
294
295 unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
296 unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
297 unsigned long ctx_reload_pmds[4]; /* bitmask of force reload PMD on ctxsw in */
298
299 unsigned long ctx_all_pmcs[4]; /* bitmask of all accessible PMCs */
300 unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */
301 unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */
302
35589a8f 303 unsigned long ctx_pmcs[PFM_NUM_PMC_REGS]; /* saved copies of PMC values */
1da177e4
LT
304
305 unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */
306 unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */
307 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */
308 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */
309
35589a8f
KA
310 pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS]; /* software state for PMDS */
311
312 unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */
313 unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */
1da177e4 314
e088a4ad 315 unsigned long ctx_saved_psr_up; /* only contains psr.up value */
1da177e4
LT
316
317 unsigned long ctx_last_activation; /* context last activation number for last_cpu */
318 unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */
319 unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */
320
321 int ctx_fd; /* file descriptor used my this context */
322 pfm_ovfl_arg_t ctx_ovfl_arg; /* argument to custom buffer format handler */
323
324 pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */
325 void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */
326 unsigned long ctx_smpl_size; /* size of sampling buffer */
327 void *ctx_smpl_vaddr; /* user level virtual address of smpl buffer */
328
329 wait_queue_head_t ctx_msgq_wait;
330 pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
331 int ctx_msgq_head;
332 int ctx_msgq_tail;
333 struct fasync_struct *ctx_async_queue;
334
335 wait_queue_head_t ctx_zombieq; /* termination cleanup wait queue */
336} pfm_context_t;
337
338/*
339 * magic number used to verify that structure is really
340 * a perfmon context
341 */
342#define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
343
344#define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
345
346#ifdef CONFIG_SMP
347#define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
348#define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
349#else
350#define SET_LAST_CPU(ctx, v) do {} while(0)
351#define GET_LAST_CPU(ctx) do {} while(0)
352#endif
353
354
355#define ctx_fl_block ctx_flags.block
356#define ctx_fl_system ctx_flags.system
357#define ctx_fl_using_dbreg ctx_flags.using_dbreg
358#define ctx_fl_is_sampling ctx_flags.is_sampling
359#define ctx_fl_excl_idle ctx_flags.excl_idle
360#define ctx_fl_going_zombie ctx_flags.going_zombie
361#define ctx_fl_trap_reason ctx_flags.trap_reason
362#define ctx_fl_no_msg ctx_flags.no_msg
363#define ctx_fl_can_restart ctx_flags.can_restart
364
365#define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
366#define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
367
368/*
369 * global information about all sessions
370 * mostly used to synchronize between system wide and per-process
371 */
372typedef struct {
373 spinlock_t pfs_lock; /* lock the structure */
374
375 unsigned int pfs_task_sessions; /* number of per task sessions */
376 unsigned int pfs_sys_sessions; /* number of per system wide sessions */
377 unsigned int pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */
378 unsigned int pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */
379 struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */
380} pfm_session_t;
381
382/*
383 * information about a PMC or PMD.
384 * dep_pmd[]: a bitmask of dependent PMD registers
385 * dep_pmc[]: a bitmask of dependent PMC registers
386 */
387typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
388typedef struct {
389 unsigned int type;
390 int pm_pos;
391 unsigned long default_value; /* power-on default value */
392 unsigned long reserved_mask; /* bitmask of reserved bits */
393 pfm_reg_check_t read_check;
394 pfm_reg_check_t write_check;
395 unsigned long dep_pmd[4];
396 unsigned long dep_pmc[4];
397} pfm_reg_desc_t;
398
399/* assume cnum is a valid monitor */
400#define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
401
402/*
403 * This structure is initialized at boot time and contains
404 * a description of the PMU main characteristics.
405 *
406 * If the probe function is defined, detection is based
407 * on its return value:
408 * - 0 means recognized PMU
409 * - anything else means not supported
410 * When the probe function is not defined, then the pmu_family field
411 * is used and it must match the host CPU family such that:
412 * - cpu->family & config->pmu_family != 0
413 */
414typedef struct {
415 unsigned long ovfl_val; /* overflow value for counters */
416
417 pfm_reg_desc_t *pmc_desc; /* detailed PMC register dependencies descriptions */
418 pfm_reg_desc_t *pmd_desc; /* detailed PMD register dependencies descriptions */
419
420 unsigned int num_pmcs; /* number of PMCS: computed at init time */
421 unsigned int num_pmds; /* number of PMDS: computed at init time */
422 unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */
423 unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */
424
425 char *pmu_name; /* PMU family name */
426 unsigned int pmu_family; /* cpuid family pattern used to identify pmu */
427 unsigned int flags; /* pmu specific flags */
428 unsigned int num_ibrs; /* number of IBRS: computed at init time */
429 unsigned int num_dbrs; /* number of DBRS: computed at init time */
430 unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */
431 int (*probe)(void); /* customized probe routine */
432 unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */
433} pmu_config_t;
434/*
435 * PMU specific flags
436 */
437#define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */
438
439/*
440 * debug register related type definitions
441 */
442typedef struct {
443 unsigned long ibr_mask:56;
444 unsigned long ibr_plm:4;
445 unsigned long ibr_ig:3;
446 unsigned long ibr_x:1;
447} ibr_mask_reg_t;
448
449typedef struct {
450 unsigned long dbr_mask:56;
451 unsigned long dbr_plm:4;
452 unsigned long dbr_ig:2;
453 unsigned long dbr_w:1;
454 unsigned long dbr_r:1;
455} dbr_mask_reg_t;
456
457typedef union {
458 unsigned long val;
459 ibr_mask_reg_t ibr;
460 dbr_mask_reg_t dbr;
461} dbreg_t;
462
463
464/*
465 * perfmon command descriptions
466 */
467typedef struct {
468 int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
469 char *cmd_name;
470 int cmd_flags;
471 unsigned int cmd_narg;
472 size_t cmd_argsize;
473 int (*cmd_getsize)(void *arg, size_t *sz);
474} pfm_cmd_desc_t;
475
476#define PFM_CMD_FD 0x01 /* command requires a file descriptor */
477#define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */
478#define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */
479#define PFM_CMD_STOP 0x08 /* command does not work on zombie context */
480
481
482#define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
483#define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
484#define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
485#define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
486#define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
487
488#define PFM_CMD_ARG_MANY -1 /* cannot be zero */
489
1da177e4
LT
490typedef struct {
491 unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
492 unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
493 unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */
494 unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */
495 unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */
496 unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */
497 unsigned long pfm_smpl_handler_calls;
498 unsigned long pfm_smpl_handler_cycles;
499 char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
500} pfm_stats_t;
501
502/*
503 * perfmon internal variables
504 */
505static pfm_stats_t pfm_stats[NR_CPUS];
506static pfm_session_t pfm_sessions; /* global sessions information */
507
a9f6a0dd 508static DEFINE_SPINLOCK(pfm_alt_install_check);
a1ecf7f6
TL
509static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
510
1da177e4
LT
511static struct proc_dir_entry *perfmon_dir;
512static pfm_uuid_t pfm_null_uuid = {0,};
513
514static spinlock_t pfm_buffer_fmt_lock;
515static LIST_HEAD(pfm_buffer_fmt_list);
516
517static pmu_config_t *pmu_conf;
518
519/* sysctl() controls */
4944930a
SE
520pfm_sysctl_t pfm_sysctl;
521EXPORT_SYMBOL(pfm_sysctl);
1da177e4
LT
522
523static ctl_table pfm_ctl_table[]={
4e009901 524 {
4e009901
EB
525 .procname = "debug",
526 .data = &pfm_sysctl.debug,
527 .maxlen = sizeof(int),
528 .mode = 0666,
6d456111 529 .proc_handler = proc_dointvec,
4e009901
EB
530 },
531 {
4e009901
EB
532 .procname = "debug_ovfl",
533 .data = &pfm_sysctl.debug_ovfl,
534 .maxlen = sizeof(int),
535 .mode = 0666,
6d456111 536 .proc_handler = proc_dointvec,
4e009901
EB
537 },
538 {
4e009901
EB
539 .procname = "fastctxsw",
540 .data = &pfm_sysctl.fastctxsw,
541 .maxlen = sizeof(int),
542 .mode = 0600,
6d456111 543 .proc_handler = proc_dointvec,
4e009901
EB
544 },
545 {
4e009901
EB
546 .procname = "expert_mode",
547 .data = &pfm_sysctl.expert_mode,
548 .maxlen = sizeof(int),
549 .mode = 0600,
6d456111 550 .proc_handler = proc_dointvec,
4e009901
EB
551 },
552 {}
1da177e4
LT
553};
554static ctl_table pfm_sysctl_dir[] = {
4e009901 555 {
4e009901 556 .procname = "perfmon",
e3ad42be 557 .mode = 0555,
4e009901
EB
558 .child = pfm_ctl_table,
559 },
560 {}
1da177e4
LT
561};
562static ctl_table pfm_sysctl_root[] = {
4e009901 563 {
4e009901 564 .procname = "kernel",
e3ad42be 565 .mode = 0555,
4e009901
EB
566 .child = pfm_sysctl_dir,
567 },
568 {}
1da177e4
LT
569};
570static struct ctl_table_header *pfm_sysctl_header;
571
572static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
1da177e4
LT
573
574#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
575#define pfm_get_cpu_data(a,b) per_cpu(a, b)
576
577static inline void
578pfm_put_task(struct task_struct *task)
579{
580 if (task != current) put_task_struct(task);
581}
582
1da177e4
LT
583static inline void
584pfm_reserve_page(unsigned long a)
585{
586 SetPageReserved(vmalloc_to_page((void *)a));
587}
588static inline void
589pfm_unreserve_page(unsigned long a)
590{
591 ClearPageReserved(vmalloc_to_page((void*)a));
592}
593
594static inline unsigned long
595pfm_protect_ctx_ctxsw(pfm_context_t *x)
596{
597 spin_lock(&(x)->ctx_lock);
598 return 0UL;
599}
600
24b8e0cc 601static inline void
1da177e4
LT
602pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
603{
604 spin_unlock(&(x)->ctx_lock);
605}
606
607static inline unsigned int
608pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct)
609{
610 return do_munmap(mm, addr, len);
611}
612
613static inline unsigned long
614pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
615{
616 return get_unmapped_area(file, addr, len, pgoff, flags);
617}
618
619
454e2398
DH
620static int
621pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data,
622 struct vfsmount *mnt)
1da177e4 623{
454e2398 624 return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC, mnt);
1da177e4
LT
625}
626
627static struct file_system_type pfm_fs_type = {
628 .name = "pfmfs",
629 .get_sb = pfmfs_get_sb,
630 .kill_sb = kill_anon_super,
631};
632
633DEFINE_PER_CPU(unsigned long, pfm_syst_info);
634DEFINE_PER_CPU(struct task_struct *, pmu_owner);
635DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
636DEFINE_PER_CPU(unsigned long, pmu_activation_number);
fffcc150 637EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
1da177e4
LT
638
639
640/* forward declaration */
5dfe4c96 641static const struct file_operations pfm_file_ops;
1da177e4
LT
642
643/*
644 * forward declarations
645 */
646#ifndef CONFIG_SMP
647static void pfm_lazy_save_regs (struct task_struct *ta);
648#endif
649
650void dump_pmu_state(const char *);
651static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
652
653#include "perfmon_itanium.h"
654#include "perfmon_mckinley.h"
9179cb65 655#include "perfmon_montecito.h"
1da177e4
LT
656#include "perfmon_generic.h"
657
658static pmu_config_t *pmu_confs[]={
9179cb65 659 &pmu_conf_mont,
1da177e4
LT
660 &pmu_conf_mck,
661 &pmu_conf_ita,
662 &pmu_conf_gen, /* must be last */
663 NULL
664};
665
666
667static int pfm_end_notify_user(pfm_context_t *ctx);
668
669static inline void
670pfm_clear_psr_pp(void)
671{
672 ia64_rsm(IA64_PSR_PP);
673 ia64_srlz_i();
674}
675
676static inline void
677pfm_set_psr_pp(void)
678{
679 ia64_ssm(IA64_PSR_PP);
680 ia64_srlz_i();
681}
682
683static inline void
684pfm_clear_psr_up(void)
685{
686 ia64_rsm(IA64_PSR_UP);
687 ia64_srlz_i();
688}
689
690static inline void
691pfm_set_psr_up(void)
692{
693 ia64_ssm(IA64_PSR_UP);
694 ia64_srlz_i();
695}
696
697static inline unsigned long
698pfm_get_psr(void)
699{
700 unsigned long tmp;
701 tmp = ia64_getreg(_IA64_REG_PSR);
702 ia64_srlz_i();
703 return tmp;
704}
705
706static inline void
707pfm_set_psr_l(unsigned long val)
708{
709 ia64_setreg(_IA64_REG_PSR_L, val);
710 ia64_srlz_i();
711}
712
713static inline void
714pfm_freeze_pmu(void)
715{
716 ia64_set_pmc(0,1UL);
717 ia64_srlz_d();
718}
719
720static inline void
721pfm_unfreeze_pmu(void)
722{
723 ia64_set_pmc(0,0UL);
724 ia64_srlz_d();
725}
726
727static inline void
728pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
729{
730 int i;
731
732 for (i=0; i < nibrs; i++) {
733 ia64_set_ibr(i, ibrs[i]);
734 ia64_dv_serialize_instruction();
735 }
736 ia64_srlz_i();
737}
738
739static inline void
740pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
741{
742 int i;
743
744 for (i=0; i < ndbrs; i++) {
745 ia64_set_dbr(i, dbrs[i]);
746 ia64_dv_serialize_data();
747 }
748 ia64_srlz_d();
749}
750
751/*
752 * PMD[i] must be a counter. no check is made
753 */
754static inline unsigned long
755pfm_read_soft_counter(pfm_context_t *ctx, int i)
756{
757 return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
758}
759
760/*
761 * PMD[i] must be a counter. no check is made
762 */
763static inline void
764pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
765{
766 unsigned long ovfl_val = pmu_conf->ovfl_val;
767
768 ctx->ctx_pmds[i].val = val & ~ovfl_val;
769 /*
770 * writing to unimplemented part is ignore, so we do not need to
771 * mask off top part
772 */
773 ia64_set_pmd(i, val & ovfl_val);
774}
775
776static pfm_msg_t *
777pfm_get_new_msg(pfm_context_t *ctx)
778{
779 int idx, next;
780
781 next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
782
783 DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
784 if (next == ctx->ctx_msgq_head) return NULL;
785
786 idx = ctx->ctx_msgq_tail;
787 ctx->ctx_msgq_tail = next;
788
789 DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
790
791 return ctx->ctx_msgq+idx;
792}
793
794static pfm_msg_t *
795pfm_get_next_msg(pfm_context_t *ctx)
796{
797 pfm_msg_t *msg;
798
799 DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
800
801 if (PFM_CTXQ_EMPTY(ctx)) return NULL;
802
803 /*
804 * get oldest message
805 */
806 msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
807
808 /*
809 * and move forward
810 */
811 ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
812
813 DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
814
815 return msg;
816}
817
818static void
819pfm_reset_msgq(pfm_context_t *ctx)
820{
821 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
822 DPRINT(("ctx=%p msgq reset\n", ctx));
823}
824
825static void *
826pfm_rvmalloc(unsigned long size)
827{
828 void *mem;
829 unsigned long addr;
830
831 size = PAGE_ALIGN(size);
832 mem = vmalloc(size);
833 if (mem) {
834 //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
835 memset(mem, 0, size);
836 addr = (unsigned long)mem;
837 while (size > 0) {
838 pfm_reserve_page(addr);
839 addr+=PAGE_SIZE;
840 size-=PAGE_SIZE;
841 }
842 }
843 return mem;
844}
845
846static void
847pfm_rvfree(void *mem, unsigned long size)
848{
849 unsigned long addr;
850
851 if (mem) {
852 DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
853 addr = (unsigned long) mem;
854 while ((long) size > 0) {
855 pfm_unreserve_page(addr);
856 addr+=PAGE_SIZE;
857 size-=PAGE_SIZE;
858 }
859 vfree(mem);
860 }
861 return;
862}
863
864static pfm_context_t *
f8e811b9 865pfm_context_alloc(int ctx_flags)
1da177e4
LT
866{
867 pfm_context_t *ctx;
868
869 /*
870 * allocate context descriptor
871 * must be able to free with interrupts disabled
872 */
52fd9108 873 ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
1da177e4 874 if (ctx) {
1da177e4 875 DPRINT(("alloc ctx @%p\n", ctx));
f8e811b9
AV
876
877 /*
878 * init context protection lock
879 */
880 spin_lock_init(&ctx->ctx_lock);
881
882 /*
883 * context is unloaded
884 */
885 ctx->ctx_state = PFM_CTX_UNLOADED;
886
887 /*
888 * initialization of context's flags
889 */
890 ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
891 ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
892 ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
893 /*
894 * will move to set properties
895 * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
896 */
897
898 /*
899 * init restart semaphore to locked
900 */
901 init_completion(&ctx->ctx_restart_done);
902
903 /*
904 * activation is used in SMP only
905 */
906 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
907 SET_LAST_CPU(ctx, -1);
908
909 /*
910 * initialize notification message queue
911 */
912 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
913 init_waitqueue_head(&ctx->ctx_msgq_wait);
914 init_waitqueue_head(&ctx->ctx_zombieq);
915
1da177e4
LT
916 }
917 return ctx;
918}
919
920static void
921pfm_context_free(pfm_context_t *ctx)
922{
923 if (ctx) {
924 DPRINT(("free ctx @%p\n", ctx));
925 kfree(ctx);
926 }
927}
928
929static void
930pfm_mask_monitoring(struct task_struct *task)
931{
932 pfm_context_t *ctx = PFM_GET_CTX(task);
1da177e4
LT
933 unsigned long mask, val, ovfl_mask;
934 int i;
935
19c5870c 936 DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
1da177e4
LT
937
938 ovfl_mask = pmu_conf->ovfl_val;
939 /*
940 * monitoring can only be masked as a result of a valid
941 * counter overflow. In UP, it means that the PMU still
942 * has an owner. Note that the owner can be different
943 * from the current task. However the PMU state belongs
944 * to the owner.
945 * In SMP, a valid overflow only happens when task is
946 * current. Therefore if we come here, we know that
947 * the PMU state belongs to the current task, therefore
948 * we can access the live registers.
949 *
950 * So in both cases, the live register contains the owner's
951 * state. We can ONLY touch the PMU registers and NOT the PSR.
952 *
35589a8f 953 * As a consequence to this call, the ctx->th_pmds[] array
1da177e4
LT
954 * contains stale information which must be ignored
955 * when context is reloaded AND monitoring is active (see
956 * pfm_restart).
957 */
958 mask = ctx->ctx_used_pmds[0];
959 for (i = 0; mask; i++, mask>>=1) {
960 /* skip non used pmds */
961 if ((mask & 0x1) == 0) continue;
962 val = ia64_get_pmd(i);
963
964 if (PMD_IS_COUNTING(i)) {
965 /*
966 * we rebuild the full 64 bit value of the counter
967 */
968 ctx->ctx_pmds[i].val += (val & ovfl_mask);
969 } else {
970 ctx->ctx_pmds[i].val = val;
971 }
972 DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
973 i,
974 ctx->ctx_pmds[i].val,
975 val & ovfl_mask));
976 }
977 /*
978 * mask monitoring by setting the privilege level to 0
979 * we cannot use psr.pp/psr.up for this, it is controlled by
980 * the user
981 *
982 * if task is current, modify actual registers, otherwise modify
983 * thread save state, i.e., what will be restored in pfm_load_regs()
984 */
985 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
986 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
987 if ((mask & 0x1) == 0UL) continue;
35589a8f
KA
988 ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
989 ctx->th_pmcs[i] &= ~0xfUL;
990 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1da177e4
LT
991 }
992 /*
993 * make all of this visible
994 */
995 ia64_srlz_d();
996}
997
998/*
999 * must always be done with task == current
1000 *
1001 * context must be in MASKED state when calling
1002 */
1003static void
1004pfm_restore_monitoring(struct task_struct *task)
1005{
1006 pfm_context_t *ctx = PFM_GET_CTX(task);
1da177e4
LT
1007 unsigned long mask, ovfl_mask;
1008 unsigned long psr, val;
1009 int i, is_system;
1010
1011 is_system = ctx->ctx_fl_system;
1012 ovfl_mask = pmu_conf->ovfl_val;
1013
1014 if (task != current) {
19c5870c 1015 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
1da177e4
LT
1016 return;
1017 }
1018 if (ctx->ctx_state != PFM_CTX_MASKED) {
1019 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
19c5870c 1020 task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
1da177e4
LT
1021 return;
1022 }
1023 psr = pfm_get_psr();
1024 /*
1025 * monitoring is masked via the PMC.
1026 * As we restore their value, we do not want each counter to
1027 * restart right away. We stop monitoring using the PSR,
1028 * restore the PMC (and PMD) and then re-establish the psr
1029 * as it was. Note that there can be no pending overflow at
1030 * this point, because monitoring was MASKED.
1031 *
1032 * system-wide session are pinned and self-monitoring
1033 */
1034 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1035 /* disable dcr pp */
1036 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
1037 pfm_clear_psr_pp();
1038 } else {
1039 pfm_clear_psr_up();
1040 }
1041 /*
1042 * first, we restore the PMD
1043 */
1044 mask = ctx->ctx_used_pmds[0];
1045 for (i = 0; mask; i++, mask>>=1) {
1046 /* skip non used pmds */
1047 if ((mask & 0x1) == 0) continue;
1048
1049 if (PMD_IS_COUNTING(i)) {
1050 /*
1051 * we split the 64bit value according to
1052 * counter width
1053 */
1054 val = ctx->ctx_pmds[i].val & ovfl_mask;
1055 ctx->ctx_pmds[i].val &= ~ovfl_mask;
1056 } else {
1057 val = ctx->ctx_pmds[i].val;
1058 }
1059 ia64_set_pmd(i, val);
1060
1061 DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
1062 i,
1063 ctx->ctx_pmds[i].val,
1064 val));
1065 }
1066 /*
1067 * restore the PMCs
1068 */
1069 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
1070 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
1071 if ((mask & 0x1) == 0UL) continue;
35589a8f
KA
1072 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1073 ia64_set_pmc(i, ctx->th_pmcs[i]);
19c5870c
AD
1074 DPRINT(("[%d] pmc[%d]=0x%lx\n",
1075 task_pid_nr(task), i, ctx->th_pmcs[i]));
1da177e4
LT
1076 }
1077 ia64_srlz_d();
1078
1079 /*
1080 * must restore DBR/IBR because could be modified while masked
1081 * XXX: need to optimize
1082 */
1083 if (ctx->ctx_fl_using_dbreg) {
1084 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
1085 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
1086 }
1087
1088 /*
1089 * now restore PSR
1090 */
1091 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1092 /* enable dcr pp */
1093 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
1094 ia64_srlz_i();
1095 }
1096 pfm_set_psr_l(psr);
1097}
1098
1099static inline void
1100pfm_save_pmds(unsigned long *pmds, unsigned long mask)
1101{
1102 int i;
1103
1104 ia64_srlz_d();
1105
1106 for (i=0; mask; i++, mask>>=1) {
1107 if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
1108 }
1109}
1110
1111/*
1112 * reload from thread state (used for ctxw only)
1113 */
1114static inline void
1115pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
1116{
1117 int i;
1118 unsigned long val, ovfl_val = pmu_conf->ovfl_val;
1119
1120 for (i=0; mask; i++, mask>>=1) {
1121 if ((mask & 0x1) == 0) continue;
1122 val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
1123 ia64_set_pmd(i, val);
1124 }
1125 ia64_srlz_d();
1126}
1127
1128/*
1129 * propagate PMD from context to thread-state
1130 */
1131static inline void
1132pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1133{
1da177e4
LT
1134 unsigned long ovfl_val = pmu_conf->ovfl_val;
1135 unsigned long mask = ctx->ctx_all_pmds[0];
1136 unsigned long val;
1137 int i;
1138
1139 DPRINT(("mask=0x%lx\n", mask));
1140
1141 for (i=0; mask; i++, mask>>=1) {
1142
1143 val = ctx->ctx_pmds[i].val;
1144
1145 /*
1146 * We break up the 64 bit value into 2 pieces
1147 * the lower bits go to the machine state in the
1148 * thread (will be reloaded on ctxsw in).
1149 * The upper part stays in the soft-counter.
1150 */
1151 if (PMD_IS_COUNTING(i)) {
1152 ctx->ctx_pmds[i].val = val & ~ovfl_val;
1153 val &= ovfl_val;
1154 }
35589a8f 1155 ctx->th_pmds[i] = val;
1da177e4
LT
1156
1157 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1158 i,
35589a8f 1159 ctx->th_pmds[i],
1da177e4
LT
1160 ctx->ctx_pmds[i].val));
1161 }
1162}
1163
1164/*
1165 * propagate PMC from context to thread-state
1166 */
1167static inline void
1168pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1169{
1da177e4
LT
1170 unsigned long mask = ctx->ctx_all_pmcs[0];
1171 int i;
1172
1173 DPRINT(("mask=0x%lx\n", mask));
1174
1175 for (i=0; mask; i++, mask>>=1) {
1176 /* masking 0 with ovfl_val yields 0 */
35589a8f
KA
1177 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1178 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1da177e4
LT
1179 }
1180}
1181
1182
1183
1184static inline void
1185pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
1186{
1187 int i;
1188
1189 for (i=0; mask; i++, mask>>=1) {
1190 if ((mask & 0x1) == 0) continue;
1191 ia64_set_pmc(i, pmcs[i]);
1192 }
1193 ia64_srlz_d();
1194}
1195
1196static inline int
1197pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
1198{
1199 return memcmp(a, b, sizeof(pfm_uuid_t));
1200}
1201
1202static inline int
1203pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
1204{
1205 int ret = 0;
1206 if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
1207 return ret;
1208}
1209
1210static inline int
1211pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
1212{
1213 int ret = 0;
1214 if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
1215 return ret;
1216}
1217
1218
1219static inline int
1220pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
1221 int cpu, void *arg)
1222{
1223 int ret = 0;
1224 if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
1225 return ret;
1226}
1227
1228static inline int
1229pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
1230 int cpu, void *arg)
1231{
1232 int ret = 0;
1233 if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
1234 return ret;
1235}
1236
1237static inline int
1238pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1239{
1240 int ret = 0;
1241 if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
1242 return ret;
1243}
1244
1245static inline int
1246pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1247{
1248 int ret = 0;
1249 if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
1250 return ret;
1251}
1252
1253static pfm_buffer_fmt_t *
1254__pfm_find_buffer_fmt(pfm_uuid_t uuid)
1255{
1256 struct list_head * pos;
1257 pfm_buffer_fmt_t * entry;
1258
1259 list_for_each(pos, &pfm_buffer_fmt_list) {
1260 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
1261 if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
1262 return entry;
1263 }
1264 return NULL;
1265}
1266
1267/*
1268 * find a buffer format based on its uuid
1269 */
1270static pfm_buffer_fmt_t *
1271pfm_find_buffer_fmt(pfm_uuid_t uuid)
1272{
1273 pfm_buffer_fmt_t * fmt;
1274 spin_lock(&pfm_buffer_fmt_lock);
1275 fmt = __pfm_find_buffer_fmt(uuid);
1276 spin_unlock(&pfm_buffer_fmt_lock);
1277 return fmt;
1278}
1279
1280int
1281pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
1282{
1283 int ret = 0;
1284
1285 /* some sanity checks */
1286 if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
1287
1288 /* we need at least a handler */
1289 if (fmt->fmt_handler == NULL) return -EINVAL;
1290
1291 /*
1292 * XXX: need check validity of fmt_arg_size
1293 */
1294
1295 spin_lock(&pfm_buffer_fmt_lock);
1296
1297 if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
1298 printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
1299 ret = -EBUSY;
1300 goto out;
1301 }
1302 list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
1303 printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
1304
1305out:
1306 spin_unlock(&pfm_buffer_fmt_lock);
1307 return ret;
1308}
1309EXPORT_SYMBOL(pfm_register_buffer_fmt);
1310
1311int
1312pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
1313{
1314 pfm_buffer_fmt_t *fmt;
1315 int ret = 0;
1316
1317 spin_lock(&pfm_buffer_fmt_lock);
1318
1319 fmt = __pfm_find_buffer_fmt(uuid);
1320 if (!fmt) {
1321 printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
1322 ret = -EINVAL;
1323 goto out;
1324 }
1325 list_del_init(&fmt->fmt_list);
1326 printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
1327
1328out:
1329 spin_unlock(&pfm_buffer_fmt_lock);
1330 return ret;
1331
1332}
1333EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1334
8df5a500
SE
1335extern void update_pal_halt_status(int);
1336
1da177e4
LT
1337static int
1338pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1339{
1340 unsigned long flags;
1341 /*
72fdbdce 1342 * validity checks on cpu_mask have been done upstream
1da177e4
LT
1343 */
1344 LOCK_PFS(flags);
1345
1346 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1347 pfm_sessions.pfs_sys_sessions,
1348 pfm_sessions.pfs_task_sessions,
1349 pfm_sessions.pfs_sys_use_dbregs,
1350 is_syswide,
1351 cpu));
1352
1353 if (is_syswide) {
1354 /*
1355 * cannot mix system wide and per-task sessions
1356 */
1357 if (pfm_sessions.pfs_task_sessions > 0UL) {
1358 DPRINT(("system wide not possible, %u conflicting task_sessions\n",
1359 pfm_sessions.pfs_task_sessions));
1360 goto abort;
1361 }
1362
1363 if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
1364
1365 DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
1366
1367 pfm_sessions.pfs_sys_session[cpu] = task;
1368
1369 pfm_sessions.pfs_sys_sessions++ ;
1370
1371 } else {
1372 if (pfm_sessions.pfs_sys_sessions) goto abort;
1373 pfm_sessions.pfs_task_sessions++;
1374 }
1375
1376 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1377 pfm_sessions.pfs_sys_sessions,
1378 pfm_sessions.pfs_task_sessions,
1379 pfm_sessions.pfs_sys_use_dbregs,
1380 is_syswide,
1381 cpu));
1382
8df5a500
SE
1383 /*
1384 * disable default_idle() to go to PAL_HALT
1385 */
1386 update_pal_halt_status(0);
1387
1da177e4
LT
1388 UNLOCK_PFS(flags);
1389
1390 return 0;
1391
1392error_conflict:
1393 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
19c5870c 1394 task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
a1ecf7f6 1395 cpu));
1da177e4
LT
1396abort:
1397 UNLOCK_PFS(flags);
1398
1399 return -EBUSY;
1400
1401}
1402
1403static int
1404pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1405{
1406 unsigned long flags;
1407 /*
72fdbdce 1408 * validity checks on cpu_mask have been done upstream
1da177e4
LT
1409 */
1410 LOCK_PFS(flags);
1411
1412 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1413 pfm_sessions.pfs_sys_sessions,
1414 pfm_sessions.pfs_task_sessions,
1415 pfm_sessions.pfs_sys_use_dbregs,
1416 is_syswide,
1417 cpu));
1418
1419
1420 if (is_syswide) {
1421 pfm_sessions.pfs_sys_session[cpu] = NULL;
1422 /*
1423 * would not work with perfmon+more than one bit in cpu_mask
1424 */
1425 if (ctx && ctx->ctx_fl_using_dbreg) {
1426 if (pfm_sessions.pfs_sys_use_dbregs == 0) {
1427 printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
1428 } else {
1429 pfm_sessions.pfs_sys_use_dbregs--;
1430 }
1431 }
1432 pfm_sessions.pfs_sys_sessions--;
1433 } else {
1434 pfm_sessions.pfs_task_sessions--;
1435 }
1436 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1437 pfm_sessions.pfs_sys_sessions,
1438 pfm_sessions.pfs_task_sessions,
1439 pfm_sessions.pfs_sys_use_dbregs,
1440 is_syswide,
1441 cpu));
1442
8df5a500
SE
1443 /*
1444 * if possible, enable default_idle() to go into PAL_HALT
1445 */
1446 if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
1447 update_pal_halt_status(1);
1448
1da177e4
LT
1449 UNLOCK_PFS(flags);
1450
1451 return 0;
1452}
1453
1454/*
1455 * removes virtual mapping of the sampling buffer.
1456 * IMPORTANT: cannot be called with interrupts disable, e.g. inside
1457 * a PROTECT_CTX() section.
1458 */
1459static int
1460pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size)
1461{
1462 int r;
1463
1464 /* sanity checks */
1465 if (task->mm == NULL || size == 0UL || vaddr == NULL) {
19c5870c 1466 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
1da177e4
LT
1467 return -EINVAL;
1468 }
1469
1470 DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
1471
1472 /*
1473 * does the actual unmapping
1474 */
1475 down_write(&task->mm->mmap_sem);
1476
1477 DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size));
1478
1479 r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0);
1480
1481 up_write(&task->mm->mmap_sem);
1482 if (r !=0) {
19c5870c 1483 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
1da177e4
LT
1484 }
1485
1486 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
1487
1488 return 0;
1489}
1490
1491/*
1492 * free actual physical storage used by sampling buffer
1493 */
1494#if 0
1495static int
1496pfm_free_smpl_buffer(pfm_context_t *ctx)
1497{
1498 pfm_buffer_fmt_t *fmt;
1499
1500 if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
1501
1502 /*
1503 * we won't use the buffer format anymore
1504 */
1505 fmt = ctx->ctx_buf_fmt;
1506
1507 DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
1508 ctx->ctx_smpl_hdr,
1509 ctx->ctx_smpl_size,
1510 ctx->ctx_smpl_vaddr));
1511
1512 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1513
1514 /*
1515 * free the buffer
1516 */
1517 pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
1518
1519 ctx->ctx_smpl_hdr = NULL;
1520 ctx->ctx_smpl_size = 0UL;
1521
1522 return 0;
1523
1524invalid_free:
19c5870c 1525 printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
1da177e4
LT
1526 return -EINVAL;
1527}
1528#endif
1529
1530static inline void
1531pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
1532{
1533 if (fmt == NULL) return;
1534
1535 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1536
1537}
1538
1539/*
1540 * pfmfs should _never_ be mounted by userland - too much of security hassle,
1541 * no real gain from having the whole whorehouse mounted. So we don't need
1542 * any operations on the root directory. However, we need a non-trivial
1543 * d_name - pfm: will go nicely and kill the special-casing in procfs.
1544 */
1545static struct vfsmount *pfmfs_mnt;
1546
1547static int __init
1548init_pfm_fs(void)
1549{
1550 int err = register_filesystem(&pfm_fs_type);
1551 if (!err) {
1552 pfmfs_mnt = kern_mount(&pfm_fs_type);
1553 err = PTR_ERR(pfmfs_mnt);
1554 if (IS_ERR(pfmfs_mnt))
1555 unregister_filesystem(&pfm_fs_type);
1556 else
1557 err = 0;
1558 }
1559 return err;
1560}
1561
1da177e4
LT
1562static ssize_t
1563pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1564{
1565 pfm_context_t *ctx;
1566 pfm_msg_t *msg;
1567 ssize_t ret;
1568 unsigned long flags;
1569 DECLARE_WAITQUEUE(wait, current);
1570 if (PFM_IS_FILE(filp) == 0) {
19c5870c 1571 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1da177e4
LT
1572 return -EINVAL;
1573 }
1574
1575 ctx = (pfm_context_t *)filp->private_data;
1576 if (ctx == NULL) {
19c5870c 1577 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
1da177e4
LT
1578 return -EINVAL;
1579 }
1580
1581 /*
1582 * check even when there is no message
1583 */
1584 if (size < sizeof(pfm_msg_t)) {
1585 DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
1586 return -EINVAL;
1587 }
1588
1589 PROTECT_CTX(ctx, flags);
1590
1591 /*
1592 * put ourselves on the wait queue
1593 */
1594 add_wait_queue(&ctx->ctx_msgq_wait, &wait);
1595
1596
1597 for(;;) {
1598 /*
1599 * check wait queue
1600 */
1601
1602 set_current_state(TASK_INTERRUPTIBLE);
1603
1604 DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
1605
1606 ret = 0;
1607 if(PFM_CTXQ_EMPTY(ctx) == 0) break;
1608
1609 UNPROTECT_CTX(ctx, flags);
1610
1611 /*
1612 * check non-blocking read
1613 */
1614 ret = -EAGAIN;
1615 if(filp->f_flags & O_NONBLOCK) break;
1616
1617 /*
1618 * check pending signals
1619 */
1620 if(signal_pending(current)) {
1621 ret = -EINTR;
1622 break;
1623 }
1624 /*
1625 * no message, so wait
1626 */
1627 schedule();
1628
1629 PROTECT_CTX(ctx, flags);
1630 }
19c5870c 1631 DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
1da177e4
LT
1632 set_current_state(TASK_RUNNING);
1633 remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
1634
1635 if (ret < 0) goto abort;
1636
1637 ret = -EINVAL;
1638 msg = pfm_get_next_msg(ctx);
1639 if (msg == NULL) {
19c5870c 1640 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
1da177e4
LT
1641 goto abort_locked;
1642 }
1643
4944930a 1644 DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
1da177e4
LT
1645
1646 ret = -EFAULT;
1647 if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
1648
1649abort_locked:
1650 UNPROTECT_CTX(ctx, flags);
1651abort:
1652 return ret;
1653}
1654
1655static ssize_t
1656pfm_write(struct file *file, const char __user *ubuf,
1657 size_t size, loff_t *ppos)
1658{
1659 DPRINT(("pfm_write called\n"));
1660 return -EINVAL;
1661}
1662
1663static unsigned int
1664pfm_poll(struct file *filp, poll_table * wait)
1665{
1666 pfm_context_t *ctx;
1667 unsigned long flags;
1668 unsigned int mask = 0;
1669
1670 if (PFM_IS_FILE(filp) == 0) {
19c5870c 1671 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1da177e4
LT
1672 return 0;
1673 }
1674
1675 ctx = (pfm_context_t *)filp->private_data;
1676 if (ctx == NULL) {
19c5870c 1677 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
1da177e4
LT
1678 return 0;
1679 }
1680
1681
1682 DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
1683
1684 poll_wait(filp, &ctx->ctx_msgq_wait, wait);
1685
1686 PROTECT_CTX(ctx, flags);
1687
1688 if (PFM_CTXQ_EMPTY(ctx) == 0)
1689 mask = POLLIN | POLLRDNORM;
1690
1691 UNPROTECT_CTX(ctx, flags);
1692
1693 DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
1694
1695 return mask;
1696}
1697
1698static int
1699pfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1700{
1701 DPRINT(("pfm_ioctl called\n"));
1702 return -EINVAL;
1703}
1704
1705/*
1706 * interrupt cannot be masked when coming here
1707 */
1708static inline int
1709pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
1710{
1711 int ret;
1712
1713 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
1714
1715 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
19c5870c 1716 task_pid_nr(current),
1da177e4
LT
1717 fd,
1718 on,
1719 ctx->ctx_async_queue, ret));
1720
1721 return ret;
1722}
1723
1724static int
1725pfm_fasync(int fd, struct file *filp, int on)
1726{
1727 pfm_context_t *ctx;
1728 int ret;
1729
1730 if (PFM_IS_FILE(filp) == 0) {
19c5870c 1731 printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
1da177e4
LT
1732 return -EBADF;
1733 }
1734
1735 ctx = (pfm_context_t *)filp->private_data;
1736 if (ctx == NULL) {
19c5870c 1737 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
1da177e4
LT
1738 return -EBADF;
1739 }
1740 /*
1741 * we cannot mask interrupts during this call because this may
1742 * may go to sleep if memory is not readily avalaible.
1743 *
1744 * We are protected from the conetxt disappearing by the get_fd()/put_fd()
1745 * done in caller. Serialization of this function is ensured by caller.
1746 */
1747 ret = pfm_do_fasync(fd, filp, ctx, on);
1748
1749
1750 DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1751 fd,
1752 on,
1753 ctx->ctx_async_queue, ret));
1754
1755 return ret;
1756}
1757
1758#ifdef CONFIG_SMP
1759/*
1760 * this function is exclusively called from pfm_close().
1761 * The context is not protected at that time, nor are interrupts
1762 * on the remote CPU. That's necessary to avoid deadlocks.
1763 */
1764static void
1765pfm_syswide_force_stop(void *info)
1766{
1767 pfm_context_t *ctx = (pfm_context_t *)info;
6450578f 1768 struct pt_regs *regs = task_pt_regs(current);
1da177e4
LT
1769 struct task_struct *owner;
1770 unsigned long flags;
1771 int ret;
1772
1773 if (ctx->ctx_cpu != smp_processor_id()) {
1774 printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
1775 ctx->ctx_cpu,
1776 smp_processor_id());
1777 return;
1778 }
1779 owner = GET_PMU_OWNER();
1780 if (owner != ctx->ctx_task) {
1781 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1782 smp_processor_id(),
19c5870c 1783 task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
1da177e4
LT
1784 return;
1785 }
1786 if (GET_PMU_CTX() != ctx) {
1787 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
1788 smp_processor_id(),
1789 GET_PMU_CTX(), ctx);
1790 return;
1791 }
1792
19c5870c 1793 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
1da177e4
LT
1794 /*
1795 * the context is already protected in pfm_close(), we simply
1796 * need to mask interrupts to avoid a PMU interrupt race on
1797 * this CPU
1798 */
1799 local_irq_save(flags);
1800
1801 ret = pfm_context_unload(ctx, NULL, 0, regs);
1802 if (ret) {
1803 DPRINT(("context_unload returned %d\n", ret));
1804 }
1805
1806 /*
1807 * unmask interrupts, PMU interrupts are now spurious here
1808 */
1809 local_irq_restore(flags);
1810}
1811
1812static void
1813pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
1814{
1815 int ret;
1816
1817 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
8691e5a8 1818 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
1da177e4
LT
1819 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
1820}
1821#endif /* CONFIG_SMP */
1822
1823/*
1824 * called for each close(). Partially free resources.
1825 * When caller is self-monitoring, the context is unloaded.
1826 */
1827static int
75e1fcc0 1828pfm_flush(struct file *filp, fl_owner_t id)
1da177e4
LT
1829{
1830 pfm_context_t *ctx;
1831 struct task_struct *task;
1832 struct pt_regs *regs;
1833 unsigned long flags;
1834 unsigned long smpl_buf_size = 0UL;
1835 void *smpl_buf_vaddr = NULL;
1836 int state, is_system;
1837
1838 if (PFM_IS_FILE(filp) == 0) {
1839 DPRINT(("bad magic for\n"));
1840 return -EBADF;
1841 }
1842
1843 ctx = (pfm_context_t *)filp->private_data;
1844 if (ctx == NULL) {
19c5870c 1845 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
1da177e4
LT
1846 return -EBADF;
1847 }
1848
1849 /*
1850 * remove our file from the async queue, if we use this mode.
1851 * This can be done without the context being protected. We come
72fdbdce 1852 * here when the context has become unreachable by other tasks.
1da177e4
LT
1853 *
1854 * We may still have active monitoring at this point and we may
1855 * end up in pfm_overflow_handler(). However, fasync_helper()
1856 * operates with interrupts disabled and it cleans up the
1857 * queue. If the PMU handler is called prior to entering
1858 * fasync_helper() then it will send a signal. If it is
1859 * invoked after, it will find an empty queue and no
1860 * signal will be sent. In both case, we are safe
1861 */
1da177e4
LT
1862 PROTECT_CTX(ctx, flags);
1863
1864 state = ctx->ctx_state;
1865 is_system = ctx->ctx_fl_system;
1866
1867 task = PFM_CTX_TASK(ctx);
6450578f 1868 regs = task_pt_regs(task);
1da177e4
LT
1869
1870 DPRINT(("ctx_state=%d is_current=%d\n",
1871 state,
1872 task == current ? 1 : 0));
1873
1874 /*
1875 * if state == UNLOADED, then task is NULL
1876 */
1877
1878 /*
1879 * we must stop and unload because we are losing access to the context.
1880 */
1881 if (task == current) {
1882#ifdef CONFIG_SMP
1883 /*
1884 * the task IS the owner but it migrated to another CPU: that's bad
1885 * but we must handle this cleanly. Unfortunately, the kernel does
1886 * not provide a mechanism to block migration (while the context is loaded).
1887 *
1888 * We need to release the resource on the ORIGINAL cpu.
1889 */
1890 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
1891
1892 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
1893 /*
1894 * keep context protected but unmask interrupt for IPI
1895 */
1896 local_irq_restore(flags);
1897
1898 pfm_syswide_cleanup_other_cpu(ctx);
1899
1900 /*
1901 * restore interrupt masking
1902 */
1903 local_irq_save(flags);
1904
1905 /*
1906 * context is unloaded at this point
1907 */
1908 } else
1909#endif /* CONFIG_SMP */
1910 {
1911
1912 DPRINT(("forcing unload\n"));
1913 /*
1914 * stop and unload, returning with state UNLOADED
1915 * and session unreserved.
1916 */
1917 pfm_context_unload(ctx, NULL, 0, regs);
1918
1919 DPRINT(("ctx_state=%d\n", ctx->ctx_state));
1920 }
1921 }
1922
1923 /*
1924 * remove virtual mapping, if any, for the calling task.
1925 * cannot reset ctx field until last user is calling close().
1926 *
1927 * ctx_smpl_vaddr must never be cleared because it is needed
1928 * by every task with access to the context
1929 *
1930 * When called from do_exit(), the mm context is gone already, therefore
1931 * mm is NULL, i.e., the VMA is already gone and we do not have to
1932 * do anything here
1933 */
1934 if (ctx->ctx_smpl_vaddr && current->mm) {
1935 smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
1936 smpl_buf_size = ctx->ctx_smpl_size;
1937 }
1938
1939 UNPROTECT_CTX(ctx, flags);
1940
1941 /*
1942 * if there was a mapping, then we systematically remove it
1943 * at this point. Cannot be done inside critical section
1944 * because some VM function reenables interrupts.
1945 *
1946 */
1947 if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size);
1948
1949 return 0;
1950}
1951/*
1952 * called either on explicit close() or from exit_files().
1953 * Only the LAST user of the file gets to this point, i.e., it is
1954 * called only ONCE.
1955 *
1956 * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
1957 * (fput()),i.e, last task to access the file. Nobody else can access the
1958 * file at this point.
1959 *
1960 * When called from exit_files(), the VMA has been freed because exit_mm()
1961 * is executed before exit_files().
1962 *
1963 * When called from exit_files(), the current task is not yet ZOMBIE but we
1964 * flush the PMU state to the context.
1965 */
1966static int
1967pfm_close(struct inode *inode, struct file *filp)
1968{
1969 pfm_context_t *ctx;
1970 struct task_struct *task;
1971 struct pt_regs *regs;
1972 DECLARE_WAITQUEUE(wait, current);
1973 unsigned long flags;
1974 unsigned long smpl_buf_size = 0UL;
1975 void *smpl_buf_addr = NULL;
1976 int free_possible = 1;
1977 int state, is_system;
1978
1979 DPRINT(("pfm_close called private=%p\n", filp->private_data));
1980
1981 if (PFM_IS_FILE(filp) == 0) {
1982 DPRINT(("bad magic\n"));
1983 return -EBADF;
1984 }
1985
1986 ctx = (pfm_context_t *)filp->private_data;
1987 if (ctx == NULL) {
19c5870c 1988 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
1da177e4
LT
1989 return -EBADF;
1990 }
1991
1992 PROTECT_CTX(ctx, flags);
1993
1994 state = ctx->ctx_state;
1995 is_system = ctx->ctx_fl_system;
1996
1997 task = PFM_CTX_TASK(ctx);
6450578f 1998 regs = task_pt_regs(task);
1da177e4
LT
1999
2000 DPRINT(("ctx_state=%d is_current=%d\n",
2001 state,
2002 task == current ? 1 : 0));
2003
2004 /*
2005 * if task == current, then pfm_flush() unloaded the context
2006 */
2007 if (state == PFM_CTX_UNLOADED) goto doit;
2008
2009 /*
2010 * context is loaded/masked and task != current, we need to
2011 * either force an unload or go zombie
2012 */
2013
2014 /*
2015 * The task is currently blocked or will block after an overflow.
2016 * we must force it to wakeup to get out of the
2017 * MASKED state and transition to the unloaded state by itself.
2018 *
2019 * This situation is only possible for per-task mode
2020 */
2021 if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
2022
2023 /*
2024 * set a "partial" zombie state to be checked
2025 * upon return from down() in pfm_handle_work().
2026 *
2027 * We cannot use the ZOMBIE state, because it is checked
2028 * by pfm_load_regs() which is called upon wakeup from down().
2029 * In such case, it would free the context and then we would
2030 * return to pfm_handle_work() which would access the
2031 * stale context. Instead, we set a flag invisible to pfm_load_regs()
2032 * but visible to pfm_handle_work().
2033 *
2034 * For some window of time, we have a zombie context with
2035 * ctx_state = MASKED and not ZOMBIE
2036 */
2037 ctx->ctx_fl_going_zombie = 1;
2038
2039 /*
2040 * force task to wake up from MASKED state
2041 */
60f1c444 2042 complete(&ctx->ctx_restart_done);
1da177e4
LT
2043
2044 DPRINT(("waking up ctx_state=%d\n", state));
2045
2046 /*
2047 * put ourself to sleep waiting for the other
2048 * task to report completion
2049 *
2050 * the context is protected by mutex, therefore there
2051 * is no risk of being notified of completion before
2052 * begin actually on the waitq.
2053 */
2054 set_current_state(TASK_INTERRUPTIBLE);
2055 add_wait_queue(&ctx->ctx_zombieq, &wait);
2056
2057 UNPROTECT_CTX(ctx, flags);
2058
2059 /*
2060 * XXX: check for signals :
2061 * - ok for explicit close
2062 * - not ok when coming from exit_files()
2063 */
2064 schedule();
2065
2066
2067 PROTECT_CTX(ctx, flags);
2068
2069
2070 remove_wait_queue(&ctx->ctx_zombieq, &wait);
2071 set_current_state(TASK_RUNNING);
2072
2073 /*
2074 * context is unloaded at this point
2075 */
2076 DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
2077 }
2078 else if (task != current) {
2079#ifdef CONFIG_SMP
2080 /*
2081 * switch context to zombie state
2082 */
2083 ctx->ctx_state = PFM_CTX_ZOMBIE;
2084
19c5870c 2085 DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
1da177e4
LT
2086 /*
2087 * cannot free the context on the spot. deferred until
2088 * the task notices the ZOMBIE state
2089 */
2090 free_possible = 0;
2091#else
2092 pfm_context_unload(ctx, NULL, 0, regs);
2093#endif
2094 }
2095
2096doit:
2097 /* reload state, may have changed during opening of critical section */
2098 state = ctx->ctx_state;
2099
2100 /*
2101 * the context is still attached to a task (possibly current)
2102 * we cannot destroy it right now
2103 */
2104
2105 /*
2106 * we must free the sampling buffer right here because
2107 * we cannot rely on it being cleaned up later by the
2108 * monitored task. It is not possible to free vmalloc'ed
2109 * memory in pfm_load_regs(). Instead, we remove the buffer
2110 * now. should there be subsequent PMU overflow originally
2111 * meant for sampling, the will be converted to spurious
2112 * and that's fine because the monitoring tools is gone anyway.
2113 */
2114 if (ctx->ctx_smpl_hdr) {
2115 smpl_buf_addr = ctx->ctx_smpl_hdr;
2116 smpl_buf_size = ctx->ctx_smpl_size;
2117 /* no more sampling */
2118 ctx->ctx_smpl_hdr = NULL;
2119 ctx->ctx_fl_is_sampling = 0;
2120 }
2121
2122 DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
2123 state,
2124 free_possible,
2125 smpl_buf_addr,
2126 smpl_buf_size));
2127
2128 if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
2129
2130 /*
2131 * UNLOADED that the session has already been unreserved.
2132 */
2133 if (state == PFM_CTX_ZOMBIE) {
2134 pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
2135 }
2136
2137 /*
2138 * disconnect file descriptor from context must be done
2139 * before we unlock.
2140 */
2141 filp->private_data = NULL;
2142
2143 /*
72fdbdce 2144 * if we free on the spot, the context is now completely unreachable
1da177e4
LT
2145 * from the callers side. The monitored task side is also cut, so we
2146 * can freely cut.
2147 *
2148 * If we have a deferred free, only the caller side is disconnected.
2149 */
2150 UNPROTECT_CTX(ctx, flags);
2151
2152 /*
2153 * All memory free operations (especially for vmalloc'ed memory)
2154 * MUST be done with interrupts ENABLED.
2155 */
2156 if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
2157
2158 /*
2159 * return the memory used by the context
2160 */
2161 if (free_possible) pfm_context_free(ctx);
2162
2163 return 0;
2164}
2165
2166static int
2167pfm_no_open(struct inode *irrelevant, struct file *dontcare)
2168{
2169 DPRINT(("pfm_no_open called\n"));
2170 return -ENXIO;
2171}
2172
2173
2174
5dfe4c96 2175static const struct file_operations pfm_file_ops = {
1da177e4
LT
2176 .llseek = no_llseek,
2177 .read = pfm_read,
2178 .write = pfm_write,
2179 .poll = pfm_poll,
2180 .ioctl = pfm_ioctl,
2181 .open = pfm_no_open, /* special open code to disallow open via /proc */
2182 .fasync = pfm_fasync,
2183 .release = pfm_close,
2184 .flush = pfm_flush
2185};
2186
2187static int
2188pfmfs_delete_dentry(struct dentry *dentry)
2189{
2190 return 1;
2191}
2192
3ba13d17 2193static const struct dentry_operations pfmfs_dentry_operations = {
1da177e4
LT
2194 .d_delete = pfmfs_delete_dentry,
2195};
2196
2197
f8e811b9
AV
2198static struct file *
2199pfm_alloc_file(pfm_context_t *ctx)
1da177e4 2200{
f8e811b9
AV
2201 struct file *file;
2202 struct inode *inode;
2c48b9c4 2203 struct path path;
1da177e4
LT
2204 char name[32];
2205 struct qstr this;
2206
1da177e4
LT
2207 /*
2208 * allocate a new inode
2209 */
2210 inode = new_inode(pfmfs_mnt->mnt_sb);
f8e811b9
AV
2211 if (!inode)
2212 return ERR_PTR(-ENOMEM);
1da177e4
LT
2213
2214 DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
2215
2216 inode->i_mode = S_IFCHR|S_IRUGO;
ef81ee98
DH
2217 inode->i_uid = current_fsuid();
2218 inode->i_gid = current_fsgid();
1da177e4
LT
2219
2220 sprintf(name, "[%lu]", inode->i_ino);
2221 this.name = name;
2222 this.len = strlen(name);
2223 this.hash = inode->i_ino;
2224
1da177e4
LT
2225 /*
2226 * allocate a new dcache entry
2227 */
2c48b9c4
AV
2228 path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
2229 if (!path.dentry) {
f8e811b9
AV
2230 iput(inode);
2231 return ERR_PTR(-ENOMEM);
2232 }
2c48b9c4 2233 path.mnt = mntget(pfmfs_mnt);
1da177e4 2234
2c48b9c4
AV
2235 path.dentry->d_op = &pfmfs_dentry_operations;
2236 d_add(path.dentry, inode);
1da177e4 2237
2c48b9c4 2238 file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
f8e811b9 2239 if (!file) {
2c48b9c4 2240 path_put(&path);
f8e811b9
AV
2241 return ERR_PTR(-ENFILE);
2242 }
1da177e4 2243
1da177e4 2244 file->f_flags = O_RDONLY;
f8e811b9 2245 file->private_data = ctx;
1da177e4 2246
f8e811b9 2247 return file;
1da177e4
LT
2248}
2249
2250static int
2251pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
2252{
2253 DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
2254
2255 while (size > 0) {
2256 unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
2257
2258
2259 if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
2260 return -ENOMEM;
2261
2262 addr += PAGE_SIZE;
2263 buf += PAGE_SIZE;
2264 size -= PAGE_SIZE;
2265 }
2266 return 0;
2267}
2268
2269/*
2270 * allocate a sampling buffer and remaps it into the user address space of the task
2271 */
2272static int
41d5e5d7 2273pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
1da177e4
LT
2274{
2275 struct mm_struct *mm = task->mm;
2276 struct vm_area_struct *vma = NULL;
2277 unsigned long size;
2278 void *smpl_buf;
2279
2280
2281 /*
2282 * the fixed header + requested size and align to page boundary
2283 */
2284 size = PAGE_ALIGN(rsize);
2285
2286 DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
2287
2288 /*
2289 * check requested size to avoid Denial-of-service attacks
2290 * XXX: may have to refine this test
2291 * Check against address space limit.
2292 *
2293 * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
2294 * return -ENOMEM;
2295 */
2296 if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
2297 return -ENOMEM;
2298
2299 /*
2300 * We do the easy to undo allocations first.
2301 *
2302 * pfm_rvmalloc(), clears the buffer, so there is no leak
2303 */
2304 smpl_buf = pfm_rvmalloc(size);
2305 if (smpl_buf == NULL) {
2306 DPRINT(("Can't allocate sampling buffer\n"));
2307 return -ENOMEM;
2308 }
2309
2310 DPRINT(("smpl_buf @%p\n", smpl_buf));
2311
2312 /* allocate vma */
c3762229 2313 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1da177e4
LT
2314 if (!vma) {
2315 DPRINT(("Cannot allocate vma\n"));
2316 goto error_kmem;
2317 }
1da177e4
LT
2318
2319 /*
2320 * partially initialize the vma for the sampling buffer
2321 */
2322 vma->vm_mm = mm;
41d5e5d7 2323 vma->vm_file = filp;
1da177e4
LT
2324 vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
2325 vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
2326
2327 /*
2328 * Now we have everything we need and we can initialize
2329 * and connect all the data structures
2330 */
2331
2332 ctx->ctx_smpl_hdr = smpl_buf;
2333 ctx->ctx_smpl_size = size; /* aligned size */
2334
2335 /*
2336 * Let's do the difficult operations next.
2337 *
2338 * now we atomically find some area in the address space and
2339 * remap the buffer in it.
2340 */
2341 down_write(&task->mm->mmap_sem);
2342
2343 /* find some free area in address space, must have mmap sem held */
2344 vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0);
2345 if (vma->vm_start == 0UL) {
2346 DPRINT(("Cannot find unmapped area for size %ld\n", size));
2347 up_write(&task->mm->mmap_sem);
2348 goto error;
2349 }
2350 vma->vm_end = vma->vm_start + size;
2351 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2352
2353 DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
2354
2355 /* can only be applied to current task, need to have the mm semaphore held when called */
2356 if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
2357 DPRINT(("Can't remap buffer\n"));
2358 up_write(&task->mm->mmap_sem);
2359 goto error;
2360 }
2361
41d5e5d7
NP
2362 get_file(filp);
2363
1da177e4
LT
2364 /*
2365 * now insert the vma in the vm list for the process, must be
2366 * done with mmap lock held
2367 */
2368 insert_vm_struct(mm, vma);
2369
2370 mm->total_vm += size >> PAGE_SHIFT;
ab50b8ed
HD
2371 vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
2372 vma_pages(vma));
1da177e4
LT
2373 up_write(&task->mm->mmap_sem);
2374
2375 /*
2376 * keep track of user level virtual address
2377 */
2378 ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
2379 *(unsigned long *)user_vaddr = vma->vm_start;
2380
2381 return 0;
2382
2383error:
2384 kmem_cache_free(vm_area_cachep, vma);
2385error_kmem:
2386 pfm_rvfree(smpl_buf, size);
2387
2388 return -ENOMEM;
2389}
2390
2391/*
2392 * XXX: do something better here
2393 */
2394static int
2395pfm_bad_permissions(struct task_struct *task)
2396{
c69e8d9c 2397 const struct cred *tcred;
ef81ee98
DH
2398 uid_t uid = current_uid();
2399 gid_t gid = current_gid();
c69e8d9c
DH
2400 int ret;
2401
2402 rcu_read_lock();
2403 tcred = __task_cred(task);
ef81ee98 2404
1da177e4
LT
2405 /* inspired by ptrace_attach() */
2406 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
ef81ee98
DH
2407 uid,
2408 gid,
c69e8d9c
DH
2409 tcred->euid,
2410 tcred->suid,
2411 tcred->uid,
2412 tcred->egid,
2413 tcred->sgid));
2414
2415 ret = ((uid != tcred->euid)
2416 || (uid != tcred->suid)
2417 || (uid != tcred->uid)
2418 || (gid != tcred->egid)
2419 || (gid != tcred->sgid)
2420 || (gid != tcred->gid)) && !capable(CAP_SYS_PTRACE);
2421
2422 rcu_read_unlock();
2423 return ret;
1da177e4
LT
2424}
2425
2426static int
2427pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
2428{
2429 int ctx_flags;
2430
2431 /* valid signal */
2432
2433 ctx_flags = pfx->ctx_flags;
2434
2435 if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
2436
2437 /*
2438 * cannot block in this mode
2439 */
2440 if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
2441 DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
2442 return -EINVAL;
2443 }
2444 } else {
2445 }
2446 /* probably more to add here */
2447
2448 return 0;
2449}
2450
2451static int
41d5e5d7 2452pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
1da177e4
LT
2453 unsigned int cpu, pfarg_context_t *arg)
2454{
2455 pfm_buffer_fmt_t *fmt = NULL;
2456 unsigned long size = 0UL;
2457 void *uaddr = NULL;
2458 void *fmt_arg = NULL;
2459 int ret = 0;
2460#define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
2461
2462 /* invoke and lock buffer format, if found */
2463 fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
2464 if (fmt == NULL) {
19c5870c 2465 DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
1da177e4
LT
2466 return -EINVAL;
2467 }
2468
2469 /*
2470 * buffer argument MUST be contiguous to pfarg_context_t
2471 */
2472 if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
2473
2474 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
2475
19c5870c 2476 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
1da177e4
LT
2477
2478 if (ret) goto error;
2479
2480 /* link buffer format and context */
2481 ctx->ctx_buf_fmt = fmt;
f8e811b9 2482 ctx->ctx_fl_is_sampling = 1; /* assume record() is defined */
1da177e4
LT
2483
2484 /*
2485 * check if buffer format wants to use perfmon buffer allocation/mapping service
2486 */
2487 ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
2488 if (ret) goto error;
2489
2490 if (size) {
2491 /*
2492 * buffer is always remapped into the caller's address space
2493 */
41d5e5d7 2494 ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
1da177e4
LT
2495 if (ret) goto error;
2496
2497 /* keep track of user address of buffer */
2498 arg->ctx_smpl_vaddr = uaddr;
2499 }
2500 ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
2501
2502error:
2503 return ret;
2504}
2505
2506static void
2507pfm_reset_pmu_state(pfm_context_t *ctx)
2508{
2509 int i;
2510
2511 /*
2512 * install reset values for PMC.
2513 */
2514 for (i=1; PMC_IS_LAST(i) == 0; i++) {
2515 if (PMC_IS_IMPL(i) == 0) continue;
2516 ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
2517 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
2518 }
2519 /*
2520 * PMD registers are set to 0UL when the context in memset()
2521 */
2522
2523 /*
2524 * On context switched restore, we must restore ALL pmc and ALL pmd even
2525 * when they are not actively used by the task. In UP, the incoming process
2526 * may otherwise pick up left over PMC, PMD state from the previous process.
2527 * As opposed to PMD, stale PMC can cause harm to the incoming
2528 * process because they may change what is being measured.
2529 * Therefore, we must systematically reinstall the entire
2530 * PMC state. In SMP, the same thing is possible on the
2531 * same CPU but also on between 2 CPUs.
2532 *
2533 * The problem with PMD is information leaking especially
2534 * to user level when psr.sp=0
2535 *
2536 * There is unfortunately no easy way to avoid this problem
2537 * on either UP or SMP. This definitively slows down the
2538 * pfm_load_regs() function.
2539 */
2540
2541 /*
2542 * bitmask of all PMCs accessible to this context
2543 *
2544 * PMC0 is treated differently.
2545 */
2546 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
2547
2548 /*
72fdbdce 2549 * bitmask of all PMDs that are accessible to this context
1da177e4
LT
2550 */
2551 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
2552
2553 DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
2554
2555 /*
2556 * useful in case of re-enable after disable
2557 */
2558 ctx->ctx_used_ibrs[0] = 0UL;
2559 ctx->ctx_used_dbrs[0] = 0UL;
2560}
2561
2562static int
2563pfm_ctx_getsize(void *arg, size_t *sz)
2564{
2565 pfarg_context_t *req = (pfarg_context_t *)arg;
2566 pfm_buffer_fmt_t *fmt;
2567
2568 *sz = 0;
2569
2570 if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
2571
2572 fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
2573 if (fmt == NULL) {
2574 DPRINT(("cannot find buffer format\n"));
2575 return -EINVAL;
2576 }
2577 /* get just enough to copy in user parameters */
2578 *sz = fmt->fmt_arg_size;
2579 DPRINT(("arg_size=%lu\n", *sz));
2580
2581 return 0;
2582}
2583
2584
2585
2586/*
2587 * cannot attach if :
2588 * - kernel task
2589 * - task not owned by caller
2590 * - task incompatible with context mode
2591 */
2592static int
2593pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2594{
2595 /*
2596 * no kernel task or task not owner by caller
2597 */
2598 if (task->mm == NULL) {
19c5870c 2599 DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
1da177e4
LT
2600 return -EPERM;
2601 }
2602 if (pfm_bad_permissions(task)) {
19c5870c 2603 DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
1da177e4
LT
2604 return -EPERM;
2605 }
2606 /*
2607 * cannot block in self-monitoring mode
2608 */
2609 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
19c5870c 2610 DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
1da177e4
LT
2611 return -EINVAL;
2612 }
2613
2614 if (task->exit_state == EXIT_ZOMBIE) {
19c5870c 2615 DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
1da177e4
LT
2616 return -EBUSY;
2617 }
2618
2619 /*
2620 * always ok for self
2621 */
2622 if (task == current) return 0;
2623
21498223 2624 if (!task_is_stopped_or_traced(task)) {
19c5870c 2625 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
1da177e4
LT
2626 return -EBUSY;
2627 }
2628 /*
2629 * make sure the task is off any CPU
2630 */
85ba2d86 2631 wait_task_inactive(task, 0);
1da177e4
LT
2632
2633 /* more to come... */
2634
2635 return 0;
2636}
2637
2638static int
2639pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
2640{
2641 struct task_struct *p = current;
2642 int ret;
2643
2644 /* XXX: need to add more checks here */
2645 if (pid < 2) return -EPERM;
2646
e1b0d4ba 2647 if (pid != task_pid_vnr(current)) {
1da177e4
LT
2648
2649 read_lock(&tasklist_lock);
2650
e1b0d4ba 2651 p = find_task_by_vpid(pid);
1da177e4
LT
2652
2653 /* make sure task cannot go away while we operate on it */
2654 if (p) get_task_struct(p);
2655
2656 read_unlock(&tasklist_lock);
2657
2658 if (p == NULL) return -ESRCH;
2659 }
2660
2661 ret = pfm_task_incompatible(ctx, p);
2662 if (ret == 0) {
2663 *task = p;
2664 } else if (p != current) {
2665 pfm_put_task(p);
2666 }
2667 return ret;
2668}
2669
2670
2671
2672static int
2673pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2674{
2675 pfarg_context_t *req = (pfarg_context_t *)arg;
2676 struct file *filp;
f8e811b9 2677 struct path path;
1da177e4 2678 int ctx_flags;
f8e811b9 2679 int fd;
1da177e4
LT
2680 int ret;
2681
2682 /* let's check the arguments first */
2683 ret = pfarg_is_sane(current, req);
f8e811b9
AV
2684 if (ret < 0)
2685 return ret;
1da177e4
LT
2686
2687 ctx_flags = req->ctx_flags;
2688
2689 ret = -ENOMEM;
2690
f8e811b9
AV
2691 fd = get_unused_fd();
2692 if (fd < 0)
2693 return fd;
1da177e4 2694
f8e811b9
AV
2695 ctx = pfm_context_alloc(ctx_flags);
2696 if (!ctx)
2697 goto error;
1da177e4 2698
f8e811b9
AV
2699 filp = pfm_alloc_file(ctx);
2700 if (IS_ERR(filp)) {
2701 ret = PTR_ERR(filp);
2702 goto error_file;
2703 }
1da177e4 2704
f8e811b9 2705 req->ctx_fd = ctx->ctx_fd = fd;
1da177e4
LT
2706
2707 /*
2708 * does the user want to sample?
2709 */
2710 if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
41d5e5d7 2711 ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
f8e811b9
AV
2712 if (ret)
2713 goto buffer_error;
1da177e4
LT
2714 }
2715
1da177e4
LT
2716 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n",
2717 ctx,
2718 ctx_flags,
2719 ctx->ctx_fl_system,
2720 ctx->ctx_fl_block,
2721 ctx->ctx_fl_excl_idle,
2722 ctx->ctx_fl_no_msg,
2723 ctx->ctx_fd));
2724
2725 /*
2726 * initialize soft PMU state
2727 */
2728 pfm_reset_pmu_state(ctx);
2729
f8e811b9
AV
2730 fd_install(fd, filp);
2731
1da177e4
LT
2732 return 0;
2733
2734buffer_error:
f8e811b9
AV
2735 path = filp->f_path;
2736 put_filp(filp);
2737 path_put(&path);
1da177e4
LT
2738
2739 if (ctx->ctx_buf_fmt) {
2740 pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
2741 }
2742error_file:
2743 pfm_context_free(ctx);
2744
2745error:
f8e811b9 2746 put_unused_fd(fd);
1da177e4
LT
2747 return ret;
2748}
2749
2750static inline unsigned long
2751pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
2752{
2753 unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
2754 unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
2755 extern unsigned long carta_random32 (unsigned long seed);
2756
2757 if (reg->flags & PFM_REGFL_RANDOM) {
2758 new_seed = carta_random32(old_seed);
2759 val -= (old_seed & mask); /* counter values are negative numbers! */
2760 if ((mask >> 32) != 0)
2761 /* construct a full 64-bit random value: */
2762 new_seed |= carta_random32(old_seed >> 32) << 32;
2763 reg->seed = new_seed;
2764 }
2765 reg->lval = val;
2766 return val;
2767}
2768
2769static void
2770pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2771{
2772 unsigned long mask = ovfl_regs[0];
2773 unsigned long reset_others = 0UL;
2774 unsigned long val;
2775 int i;
2776
2777 /*
2778 * now restore reset value on sampling overflowed counters
2779 */
2780 mask >>= PMU_FIRST_COUNTER;
2781 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2782
2783 if ((mask & 0x1UL) == 0UL) continue;
2784
2785 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2786 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2787
2788 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2789 }
2790
2791 /*
2792 * Now take care of resetting the other registers
2793 */
2794 for(i = 0; reset_others; i++, reset_others >>= 1) {
2795
2796 if ((reset_others & 0x1) == 0) continue;
2797
2798 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2799
2800 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2801 is_long_reset ? "long" : "short", i, val));
2802 }
2803}
2804
2805static void
2806pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2807{
2808 unsigned long mask = ovfl_regs[0];
2809 unsigned long reset_others = 0UL;
2810 unsigned long val;
2811 int i;
2812
2813 DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
2814
2815 if (ctx->ctx_state == PFM_CTX_MASKED) {
2816 pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
2817 return;
2818 }
2819
2820 /*
2821 * now restore reset value on sampling overflowed counters
2822 */
2823 mask >>= PMU_FIRST_COUNTER;
2824 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2825
2826 if ((mask & 0x1UL) == 0UL) continue;
2827
2828 val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2829 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2830
2831 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2832
2833 pfm_write_soft_counter(ctx, i, val);
2834 }
2835
2836 /*
2837 * Now take care of resetting the other registers
2838 */
2839 for(i = 0; reset_others; i++, reset_others >>= 1) {
2840
2841 if ((reset_others & 0x1) == 0) continue;
2842
2843 val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2844
2845 if (PMD_IS_COUNTING(i)) {
2846 pfm_write_soft_counter(ctx, i, val);
2847 } else {
2848 ia64_set_pmd(i, val);
2849 }
2850 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2851 is_long_reset ? "long" : "short", i, val));
2852 }
2853 ia64_srlz_d();
2854}
2855
2856static int
2857pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2858{
1da177e4
LT
2859 struct task_struct *task;
2860 pfarg_reg_t *req = (pfarg_reg_t *)arg;
2861 unsigned long value, pmc_pm;
2862 unsigned long smpl_pmds, reset_pmds, impl_pmds;
2863 unsigned int cnum, reg_flags, flags, pmc_type;
2864 int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
2865 int is_monitor, is_counting, state;
2866 int ret = -EINVAL;
2867 pfm_reg_check_t wr_func;
2868#define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
2869
2870 state = ctx->ctx_state;
2871 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
2872 is_system = ctx->ctx_fl_system;
2873 task = ctx->ctx_task;
2874 impl_pmds = pmu_conf->impl_pmds[0];
2875
2876 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
2877
2878 if (is_loaded) {
1da177e4
LT
2879 /*
2880 * In system wide and when the context is loaded, access can only happen
2881 * when the caller is running on the CPU being monitored by the session.
2882 * It does not have to be the owner (ctx_task) of the context per se.
2883 */
2884 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
2885 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
2886 return -EBUSY;
2887 }
2888 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
2889 }
2890 expert_mode = pfm_sysctl.expert_mode;
2891
2892 for (i = 0; i < count; i++, req++) {
2893
2894 cnum = req->reg_num;
2895 reg_flags = req->reg_flags;
2896 value = req->reg_value;
2897 smpl_pmds = req->reg_smpl_pmds[0];
2898 reset_pmds = req->reg_reset_pmds[0];
2899 flags = 0;
2900
2901
2902 if (cnum >= PMU_MAX_PMCS) {
2903 DPRINT(("pmc%u is invalid\n", cnum));
2904 goto error;
2905 }
2906
2907 pmc_type = pmu_conf->pmc_desc[cnum].type;
2908 pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
2909 is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
2910 is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
2911
2912 /*
2913 * we reject all non implemented PMC as well
2914 * as attempts to modify PMC[0-3] which are used
2915 * as status registers by the PMU
2916 */
2917 if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
2918 DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
2919 goto error;
2920 }
2921 wr_func = pmu_conf->pmc_desc[cnum].write_check;
2922 /*
2923 * If the PMC is a monitor, then if the value is not the default:
2924 * - system-wide session: PMCx.pm=1 (privileged monitor)
2925 * - per-task : PMCx.pm=0 (user monitor)
2926 */
2927 if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
2928 DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
2929 cnum,
2930 pmc_pm,
2931 is_system));
2932 goto error;
2933 }
2934
2935 if (is_counting) {
2936 /*
2937 * enforce generation of overflow interrupt. Necessary on all
2938 * CPUs.
2939 */
2940 value |= 1 << PMU_PMC_OI;
2941
2942 if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
2943 flags |= PFM_REGFL_OVFL_NOTIFY;
2944 }
2945
2946 if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
2947
2948 /* verify validity of smpl_pmds */
2949 if ((smpl_pmds & impl_pmds) != smpl_pmds) {
2950 DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
2951 goto error;
2952 }
2953
2954 /* verify validity of reset_pmds */
2955 if ((reset_pmds & impl_pmds) != reset_pmds) {
2956 DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
2957 goto error;
2958 }
2959 } else {
2960 if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
2961 DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
2962 goto error;
2963 }
2964 /* eventid on non-counting monitors are ignored */
2965 }
2966
2967 /*
2968 * execute write checker, if any
2969 */
2970 if (likely(expert_mode == 0 && wr_func)) {
2971 ret = (*wr_func)(task, ctx, cnum, &value, regs);
2972 if (ret) goto error;
2973 ret = -EINVAL;
2974 }
2975
2976 /*
2977 * no error on this register
2978 */
2979 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
2980
2981 /*
2982 * Now we commit the changes to the software state
2983 */
2984
2985 /*
2986 * update overflow information
2987 */
2988 if (is_counting) {
2989 /*
2990 * full flag update each time a register is programmed
2991 */
2992 ctx->ctx_pmds[cnum].flags = flags;
2993
2994 ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
2995 ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
2996 ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
2997
2998 /*
2999 * Mark all PMDS to be accessed as used.
3000 *
3001 * We do not keep track of PMC because we have to
3002 * systematically restore ALL of them.
3003 *
3004 * We do not update the used_monitors mask, because
3005 * if we have not programmed them, then will be in
3006 * a quiescent state, therefore we will not need to
3007 * mask/restore then when context is MASKED.
3008 */
3009 CTX_USED_PMD(ctx, reset_pmds);
3010 CTX_USED_PMD(ctx, smpl_pmds);
3011 /*
3012 * make sure we do not try to reset on
3013 * restart because we have established new values
3014 */
3015 if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3016 }
3017 /*
3018 * Needed in case the user does not initialize the equivalent
3019 * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no
3020 * possible leak here.
3021 */
3022 CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
3023
3024 /*
3025 * keep track of the monitor PMC that we are using.
3026 * we save the value of the pmc in ctx_pmcs[] and if
3027 * the monitoring is not stopped for the context we also
3028 * place it in the saved state area so that it will be
3029 * picked up later by the context switch code.
3030 *
3031 * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
3032 *
35589a8f 3033 * The value in th_pmcs[] may be modified on overflow, i.e., when
1da177e4
LT
3034 * monitoring needs to be stopped.
3035 */
3036 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
3037
3038 /*
3039 * update context state
3040 */
3041 ctx->ctx_pmcs[cnum] = value;
3042
3043 if (is_loaded) {
3044 /*
3045 * write thread state
3046 */
35589a8f 3047 if (is_system == 0) ctx->th_pmcs[cnum] = value;
1da177e4
LT
3048
3049 /*
3050 * write hardware register if we can
3051 */
3052 if (can_access_pmu) {
3053 ia64_set_pmc(cnum, value);
3054 }
3055#ifdef CONFIG_SMP
3056 else {
3057 /*
3058 * per-task SMP only here
3059 *
3060 * we are guaranteed that the task is not running on the other CPU,
3061 * we indicate that this PMD will need to be reloaded if the task
3062 * is rescheduled on the CPU it ran last on.
3063 */
3064 ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
3065 }
3066#endif
3067 }
3068
3069 DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
3070 cnum,
3071 value,
3072 is_loaded,
3073 can_access_pmu,
3074 flags,
3075 ctx->ctx_all_pmcs[0],
3076 ctx->ctx_used_pmds[0],
3077 ctx->ctx_pmds[cnum].eventid,
3078 smpl_pmds,
3079 reset_pmds,
3080 ctx->ctx_reload_pmcs[0],
3081 ctx->ctx_used_monitors[0],
3082 ctx->ctx_ovfl_regs[0]));
3083 }
3084
3085 /*
3086 * make sure the changes are visible
3087 */
3088 if (can_access_pmu) ia64_srlz_d();
3089
3090 return 0;
3091error:
3092 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3093 return ret;
3094}
3095
3096static int
3097pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3098{
1da177e4
LT
3099 struct task_struct *task;
3100 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3101 unsigned long value, hw_value, ovfl_mask;
3102 unsigned int cnum;
3103 int i, can_access_pmu = 0, state;
3104 int is_counting, is_loaded, is_system, expert_mode;
3105 int ret = -EINVAL;
3106 pfm_reg_check_t wr_func;
3107
3108
3109 state = ctx->ctx_state;
3110 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3111 is_system = ctx->ctx_fl_system;
3112 ovfl_mask = pmu_conf->ovfl_val;
3113 task = ctx->ctx_task;
3114
3115 if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
3116
3117 /*
3118 * on both UP and SMP, we can only write to the PMC when the task is
3119 * the owner of the local PMU.
3120 */
3121 if (likely(is_loaded)) {
1da177e4
LT
3122 /*
3123 * In system wide and when the context is loaded, access can only happen
3124 * when the caller is running on the CPU being monitored by the session.
3125 * It does not have to be the owner (ctx_task) of the context per se.
3126 */
3127 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3128 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3129 return -EBUSY;
3130 }
3131 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3132 }
3133 expert_mode = pfm_sysctl.expert_mode;
3134
3135 for (i = 0; i < count; i++, req++) {
3136
3137 cnum = req->reg_num;
3138 value = req->reg_value;
3139
3140 if (!PMD_IS_IMPL(cnum)) {
3141 DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
3142 goto abort_mission;
3143 }
3144 is_counting = PMD_IS_COUNTING(cnum);
3145 wr_func = pmu_conf->pmd_desc[cnum].write_check;
3146
3147 /*
3148 * execute write checker, if any
3149 */
3150 if (unlikely(expert_mode == 0 && wr_func)) {
3151 unsigned long v = value;
3152
3153 ret = (*wr_func)(task, ctx, cnum, &v, regs);
3154 if (ret) goto abort_mission;
3155
3156 value = v;
3157 ret = -EINVAL;
3158 }
3159
3160 /*
3161 * no error on this register
3162 */
3163 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
3164
3165 /*
3166 * now commit changes to software state
3167 */
3168 hw_value = value;
3169
3170 /*
3171 * update virtualized (64bits) counter
3172 */
3173 if (is_counting) {
3174 /*
3175 * write context state
3176 */
3177 ctx->ctx_pmds[cnum].lval = value;
3178
3179 /*
3180 * when context is load we use the split value
3181 */
3182 if (is_loaded) {
3183 hw_value = value & ovfl_mask;
3184 value = value & ~ovfl_mask;
3185 }
3186 }
3187 /*
3188 * update reset values (not just for counters)
3189 */
3190 ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
3191 ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
3192
3193 /*
3194 * update randomization parameters (not just for counters)
3195 */
3196 ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
3197 ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
3198
3199 /*
3200 * update context value
3201 */
3202 ctx->ctx_pmds[cnum].val = value;
3203
3204 /*
3205 * Keep track of what we use
3206 *
3207 * We do not keep track of PMC because we have to
3208 * systematically restore ALL of them.
3209 */
3210 CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
3211
3212 /*
3213 * mark this PMD register used as well
3214 */
3215 CTX_USED_PMD(ctx, RDEP(cnum));
3216
3217 /*
3218 * make sure we do not try to reset on
3219 * restart because we have established new values
3220 */
3221 if (is_counting && state == PFM_CTX_MASKED) {
3222 ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3223 }
3224
3225 if (is_loaded) {
3226 /*
3227 * write thread state
3228 */
35589a8f 3229 if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
1da177e4
LT
3230
3231 /*
3232 * write hardware register if we can
3233 */
3234 if (can_access_pmu) {
3235 ia64_set_pmd(cnum, hw_value);
3236 } else {
3237#ifdef CONFIG_SMP
3238 /*
3239 * we are guaranteed that the task is not running on the other CPU,
3240 * we indicate that this PMD will need to be reloaded if the task
3241 * is rescheduled on the CPU it ran last on.
3242 */
3243 ctx->ctx_reload_pmds[0] |= 1UL << cnum;
3244#endif
3245 }
3246 }
3247
3248 DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
3249 "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
3250 cnum,
3251 value,
3252 is_loaded,
3253 can_access_pmu,
3254 hw_value,
3255 ctx->ctx_pmds[cnum].val,
3256 ctx->ctx_pmds[cnum].short_reset,
3257 ctx->ctx_pmds[cnum].long_reset,
3258 PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
3259 ctx->ctx_pmds[cnum].seed,
3260 ctx->ctx_pmds[cnum].mask,
3261 ctx->ctx_used_pmds[0],
3262 ctx->ctx_pmds[cnum].reset_pmds[0],
3263 ctx->ctx_reload_pmds[0],
3264 ctx->ctx_all_pmds[0],
3265 ctx->ctx_ovfl_regs[0]));
3266 }
3267
3268 /*
3269 * make changes visible
3270 */
3271 if (can_access_pmu) ia64_srlz_d();
3272
3273 return 0;
3274
3275abort_mission:
3276 /*
3277 * for now, we have only one possibility for error
3278 */
3279 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3280 return ret;
3281}
3282
3283/*
3284 * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function.
3285 * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an
3286 * interrupt is delivered during the call, it will be kept pending until we leave, making
3287 * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are
3288 * guaranteed to return consistent data to the user, it may simply be old. It is not
3289 * trivial to treat the overflow while inside the call because you may end up in
3290 * some module sampling buffer code causing deadlocks.
3291 */
3292static int
3293pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3294{
1da177e4
LT
3295 struct task_struct *task;
3296 unsigned long val = 0UL, lval, ovfl_mask, sval;
3297 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3298 unsigned int cnum, reg_flags = 0;
3299 int i, can_access_pmu = 0, state;
3300 int is_loaded, is_system, is_counting, expert_mode;
3301 int ret = -EINVAL;
3302 pfm_reg_check_t rd_func;
3303
3304 /*
3305 * access is possible when loaded only for
3306 * self-monitoring tasks or in UP mode
3307 */
3308
3309 state = ctx->ctx_state;
3310 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3311 is_system = ctx->ctx_fl_system;
3312 ovfl_mask = pmu_conf->ovfl_val;
3313 task = ctx->ctx_task;
3314
3315 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3316
3317 if (likely(is_loaded)) {
1da177e4
LT
3318 /*
3319 * In system wide and when the context is loaded, access can only happen
3320 * when the caller is running on the CPU being monitored by the session.
3321 * It does not have to be the owner (ctx_task) of the context per se.
3322 */
3323 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3324 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3325 return -EBUSY;
3326 }
3327 /*
3328 * this can be true when not self-monitoring only in UP
3329 */
3330 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3331
3332 if (can_access_pmu) ia64_srlz_d();
3333 }
3334 expert_mode = pfm_sysctl.expert_mode;
3335
3336 DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
3337 is_loaded,
3338 can_access_pmu,
3339 state));
3340
3341 /*
3342 * on both UP and SMP, we can only read the PMD from the hardware register when
3343 * the task is the owner of the local PMU.
3344 */
3345
3346 for (i = 0; i < count; i++, req++) {
3347
3348 cnum = req->reg_num;
3349 reg_flags = req->reg_flags;
3350
3351 if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
3352 /*
3353 * we can only read the register that we use. That includes
72fdbdce 3354 * the one we explicitly initialize AND the one we want included
1da177e4
LT
3355 * in the sampling buffer (smpl_regs).
3356 *
3357 * Having this restriction allows optimization in the ctxsw routine
3358 * without compromising security (leaks)
3359 */
3360 if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
3361
3362 sval = ctx->ctx_pmds[cnum].val;
3363 lval = ctx->ctx_pmds[cnum].lval;
3364 is_counting = PMD_IS_COUNTING(cnum);
3365
3366 /*
3367 * If the task is not the current one, then we check if the
3368 * PMU state is still in the local live register due to lazy ctxsw.
3369 * If true, then we read directly from the registers.
3370 */
3371 if (can_access_pmu){
3372 val = ia64_get_pmd(cnum);
3373 } else {
3374 /*
3375 * context has been saved
3376 * if context is zombie, then task does not exist anymore.
3377 * In this case, we use the full value saved in the context (pfm_flush_regs()).
3378 */
35589a8f 3379 val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
1da177e4
LT
3380 }
3381 rd_func = pmu_conf->pmd_desc[cnum].read_check;
3382
3383 if (is_counting) {
3384 /*
3385 * XXX: need to check for overflow when loaded
3386 */
3387 val &= ovfl_mask;
3388 val += sval;
3389 }
3390
3391 /*
3392 * execute read checker, if any
3393 */
3394 if (unlikely(expert_mode == 0 && rd_func)) {
3395 unsigned long v = val;
3396 ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
3397 if (ret) goto error;
3398 val = v;
3399 ret = -EINVAL;
3400 }
3401
3402 PFM_REG_RETFLAG_SET(reg_flags, 0);
3403
3404 DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
3405
3406 /*
3407 * update register return value, abort all if problem during copy.
3408 * we only modify the reg_flags field. no check mode is fine because
3409 * access has been verified upfront in sys_perfmonctl().
3410 */
3411 req->reg_value = val;
3412 req->reg_flags = reg_flags;
3413 req->reg_last_reset_val = lval;
3414 }
3415
3416 return 0;
3417
3418error:
3419 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3420 return ret;
3421}
3422
3423int
3424pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3425{
3426 pfm_context_t *ctx;
3427
3428 if (req == NULL) return -EINVAL;
3429
3430 ctx = GET_PMU_CTX();
3431
3432 if (ctx == NULL) return -EINVAL;
3433
3434 /*
3435 * for now limit to current task, which is enough when calling
3436 * from overflow handler
3437 */
3438 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3439
3440 return pfm_write_pmcs(ctx, req, nreq, regs);
3441}
3442EXPORT_SYMBOL(pfm_mod_write_pmcs);
3443
3444int
3445pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3446{
3447 pfm_context_t *ctx;
3448
3449 if (req == NULL) return -EINVAL;
3450
3451 ctx = GET_PMU_CTX();
3452
3453 if (ctx == NULL) return -EINVAL;
3454
3455 /*
3456 * for now limit to current task, which is enough when calling
3457 * from overflow handler
3458 */
3459 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3460
3461 return pfm_read_pmds(ctx, req, nreq, regs);
3462}
3463EXPORT_SYMBOL(pfm_mod_read_pmds);
3464
3465/*
3466 * Only call this function when a process it trying to
3467 * write the debug registers (reading is always allowed)
3468 */
3469int
3470pfm_use_debug_registers(struct task_struct *task)
3471{
3472 pfm_context_t *ctx = task->thread.pfm_context;
3473 unsigned long flags;
3474 int ret = 0;
3475
3476 if (pmu_conf->use_rr_dbregs == 0) return 0;
3477
19c5870c 3478 DPRINT(("called for [%d]\n", task_pid_nr(task)));
1da177e4
LT
3479
3480 /*
3481 * do it only once
3482 */
3483 if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
3484
3485 /*
3486 * Even on SMP, we do not need to use an atomic here because
3487 * the only way in is via ptrace() and this is possible only when the
3488 * process is stopped. Even in the case where the ctxsw out is not totally
3489 * completed by the time we come here, there is no way the 'stopped' process
3490 * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine.
3491 * So this is always safe.
3492 */
3493 if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
3494
3495 LOCK_PFS(flags);
3496
3497 /*
3498 * We cannot allow setting breakpoints when system wide monitoring
3499 * sessions are using the debug registers.
3500 */
3501 if (pfm_sessions.pfs_sys_use_dbregs> 0)
3502 ret = -1;
3503 else
3504 pfm_sessions.pfs_ptrace_use_dbregs++;
3505
3506 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3507 pfm_sessions.pfs_ptrace_use_dbregs,
3508 pfm_sessions.pfs_sys_use_dbregs,
19c5870c 3509 task_pid_nr(task), ret));
1da177e4
LT
3510
3511 UNLOCK_PFS(flags);
3512
3513 return ret;
3514}
3515
3516/*
3517 * This function is called for every task that exits with the
3518 * IA64_THREAD_DBG_VALID set. This indicates a task which was
3519 * able to use the debug registers for debugging purposes via
3520 * ptrace(). Therefore we know it was not using them for
af901ca1 3521 * performance monitoring, so we only decrement the number
1da177e4
LT
3522 * of "ptraced" debug register users to keep the count up to date
3523 */
3524int
3525pfm_release_debug_registers(struct task_struct *task)
3526{
3527 unsigned long flags;
3528 int ret;
3529
3530 if (pmu_conf->use_rr_dbregs == 0) return 0;
3531
3532 LOCK_PFS(flags);
3533 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
19c5870c 3534 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
1da177e4
LT
3535 ret = -1;
3536 } else {
3537 pfm_sessions.pfs_ptrace_use_dbregs--;
3538 ret = 0;
3539 }
3540 UNLOCK_PFS(flags);
3541
3542 return ret;
3543}
3544
3545static int
3546pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3547{
3548 struct task_struct *task;
3549 pfm_buffer_fmt_t *fmt;
3550 pfm_ovfl_ctrl_t rst_ctrl;
3551 int state, is_system;
3552 int ret = 0;
3553
3554 state = ctx->ctx_state;
3555 fmt = ctx->ctx_buf_fmt;
3556 is_system = ctx->ctx_fl_system;
3557 task = PFM_CTX_TASK(ctx);
3558
3559 switch(state) {
3560 case PFM_CTX_MASKED:
3561 break;
3562 case PFM_CTX_LOADED:
3563 if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
3564 /* fall through */
3565 case PFM_CTX_UNLOADED:
3566 case PFM_CTX_ZOMBIE:
3567 DPRINT(("invalid state=%d\n", state));
3568 return -EBUSY;
3569 default:
3570 DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
3571 return -EINVAL;
3572 }
3573
3574 /*
3575 * In system wide and when the context is loaded, access can only happen
3576 * when the caller is running on the CPU being monitored by the session.
3577 * It does not have to be the owner (ctx_task) of the context per se.
3578 */
3579 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3580 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3581 return -EBUSY;
3582 }
3583
3584 /* sanity check */
3585 if (unlikely(task == NULL)) {
19c5870c 3586 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
1da177e4
LT
3587 return -EINVAL;
3588 }
3589
3590 if (task == current || is_system) {
3591
3592 fmt = ctx->ctx_buf_fmt;
3593
3594 DPRINT(("restarting self %d ovfl=0x%lx\n",
19c5870c 3595 task_pid_nr(task),
1da177e4
LT
3596 ctx->ctx_ovfl_regs[0]));
3597
3598 if (CTX_HAS_SMPL(ctx)) {
3599
3600 prefetch(ctx->ctx_smpl_hdr);
3601
3602 rst_ctrl.bits.mask_monitoring = 0;
3603 rst_ctrl.bits.reset_ovfl_pmds = 0;
3604
3605 if (state == PFM_CTX_LOADED)
3606 ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3607 else
3608 ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3609 } else {
3610 rst_ctrl.bits.mask_monitoring = 0;
3611 rst_ctrl.bits.reset_ovfl_pmds = 1;
3612 }
3613
3614 if (ret == 0) {
3615 if (rst_ctrl.bits.reset_ovfl_pmds)
3616 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
3617
3618 if (rst_ctrl.bits.mask_monitoring == 0) {
19c5870c 3619 DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
1da177e4
LT
3620
3621 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
3622 } else {
19c5870c 3623 DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
1da177e4
LT
3624
3625 // cannot use pfm_stop_monitoring(task, regs);
3626 }
3627 }
3628 /*
3629 * clear overflowed PMD mask to remove any stale information
3630 */
3631 ctx->ctx_ovfl_regs[0] = 0UL;
3632
3633 /*
3634 * back to LOADED state
3635 */
3636 ctx->ctx_state = PFM_CTX_LOADED;
3637
3638 /*
3639 * XXX: not really useful for self monitoring
3640 */
3641 ctx->ctx_fl_can_restart = 0;
3642
3643 return 0;
3644 }
3645
3646 /*
3647 * restart another task
3648 */
3649
3650 /*
3651 * When PFM_CTX_MASKED, we cannot issue a restart before the previous
3652 * one is seen by the task.
3653 */
3654 if (state == PFM_CTX_MASKED) {
3655 if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
3656 /*
3657 * will prevent subsequent restart before this one is
3658 * seen by other task
3659 */
3660 ctx->ctx_fl_can_restart = 0;
3661 }
3662
3663 /*
3664 * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e.
3665 * the task is blocked or on its way to block. That's the normal
3666 * restart path. If the monitoring is not masked, then the task
3667 * can be actively monitoring and we cannot directly intervene.
3668 * Therefore we use the trap mechanism to catch the task and
3669 * force it to reset the buffer/reset PMDs.
3670 *
3671 * if non-blocking, then we ensure that the task will go into
3672 * pfm_handle_work() before returning to user mode.
3673 *
72fdbdce 3674 * We cannot explicitly reset another task, it MUST always
1da177e4
LT
3675 * be done by the task itself. This works for system wide because
3676 * the tool that is controlling the session is logically doing
3677 * "self-monitoring".
3678 */
3679 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
19c5870c 3680 DPRINT(("unblocking [%d] \n", task_pid_nr(task)));
60f1c444 3681 complete(&ctx->ctx_restart_done);
1da177e4 3682 } else {
19c5870c 3683 DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
1da177e4
LT
3684
3685 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
3686
3687 PFM_SET_WORK_PENDING(task, 1);
3688
f14488cc 3689 set_notify_resume(task);
1da177e4
LT
3690
3691 /*
3692 * XXX: send reschedule if task runs on another CPU
3693 */
3694 }
3695 return 0;
3696}
3697
3698static int
3699pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3700{
3701 unsigned int m = *(unsigned int *)arg;
3702
3703 pfm_sysctl.debug = m == 0 ? 0 : 1;
3704
1da177e4
LT
3705 printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
3706
3707 if (m == 0) {
3708 memset(pfm_stats, 0, sizeof(pfm_stats));
3709 for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
3710 }
3711 return 0;
3712}
3713
3714/*
3715 * arg can be NULL and count can be zero for this function
3716 */
3717static int
3718pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3719{
3720 struct thread_struct *thread = NULL;
3721 struct task_struct *task;
3722 pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
3723 unsigned long flags;
3724 dbreg_t dbreg;
3725 unsigned int rnum;
3726 int first_time;
3727 int ret = 0, state;
3728 int i, can_access_pmu = 0;
3729 int is_system, is_loaded;
3730
3731 if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
3732
3733 state = ctx->ctx_state;
3734 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3735 is_system = ctx->ctx_fl_system;
3736 task = ctx->ctx_task;
3737
3738 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3739
3740 /*
3741 * on both UP and SMP, we can only write to the PMC when the task is
3742 * the owner of the local PMU.
3743 */
3744 if (is_loaded) {
3745 thread = &task->thread;
3746 /*
3747 * In system wide and when the context is loaded, access can only happen
3748 * when the caller is running on the CPU being monitored by the session.
3749 * It does not have to be the owner (ctx_task) of the context per se.
3750 */
3751 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3752 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3753 return -EBUSY;
3754 }
3755 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3756 }
3757
3758 /*
3759 * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w
3760 * ensuring that no real breakpoint can be installed via this call.
3761 *
3762 * IMPORTANT: regs can be NULL in this function
3763 */
3764
3765 first_time = ctx->ctx_fl_using_dbreg == 0;
3766
3767 /*
3768 * don't bother if we are loaded and task is being debugged
3769 */
3770 if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
19c5870c 3771 DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
1da177e4
LT
3772 return -EBUSY;
3773 }
3774
3775 /*
3776 * check for debug registers in system wide mode
3777 *
3778 * If though a check is done in pfm_context_load(),
3779 * we must repeat it here, in case the registers are
3780 * written after the context is loaded
3781 */
3782 if (is_loaded) {
3783 LOCK_PFS(flags);
3784
3785 if (first_time && is_system) {
3786 if (pfm_sessions.pfs_ptrace_use_dbregs)
3787 ret = -EBUSY;
3788 else
3789 pfm_sessions.pfs_sys_use_dbregs++;
3790 }
3791 UNLOCK_PFS(flags);
3792 }
3793
3794 if (ret != 0) return ret;
3795
3796 /*
3797 * mark ourself as user of the debug registers for
3798 * perfmon purposes.
3799 */
3800 ctx->ctx_fl_using_dbreg = 1;
3801
3802 /*
3803 * clear hardware registers to make sure we don't
3804 * pick up stale state.
3805 *
3806 * for a system wide session, we do not use
3807 * thread.dbr, thread.ibr because this process
3808 * never leaves the current CPU and the state
3809 * is shared by all processes running on it
3810 */
3811 if (first_time && can_access_pmu) {
19c5870c 3812 DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
1da177e4
LT
3813 for (i=0; i < pmu_conf->num_ibrs; i++) {
3814 ia64_set_ibr(i, 0UL);
3815 ia64_dv_serialize_instruction();
3816 }
3817 ia64_srlz_i();
3818 for (i=0; i < pmu_conf->num_dbrs; i++) {
3819 ia64_set_dbr(i, 0UL);
3820 ia64_dv_serialize_data();
3821 }
3822 ia64_srlz_d();
3823 }
3824
3825 /*
3826 * Now install the values into the registers
3827 */
3828 for (i = 0; i < count; i++, req++) {
3829
3830 rnum = req->dbreg_num;
3831 dbreg.val = req->dbreg_value;
3832
3833 ret = -EINVAL;
3834
3835 if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
3836 DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
3837 rnum, dbreg.val, mode, i, count));
3838
3839 goto abort_mission;
3840 }
3841
3842 /*
3843 * make sure we do not install enabled breakpoint
3844 */
3845 if (rnum & 0x1) {
3846 if (mode == PFM_CODE_RR)
3847 dbreg.ibr.ibr_x = 0;
3848 else
3849 dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
3850 }
3851
3852 PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
3853
3854 /*
3855 * Debug registers, just like PMC, can only be modified
3856 * by a kernel call. Moreover, perfmon() access to those
3857 * registers are centralized in this routine. The hardware
3858 * does not modify the value of these registers, therefore,
3859 * if we save them as they are written, we can avoid having
3860 * to save them on context switch out. This is made possible
3861 * by the fact that when perfmon uses debug registers, ptrace()
3862 * won't be able to modify them concurrently.
3863 */
3864 if (mode == PFM_CODE_RR) {
3865 CTX_USED_IBR(ctx, rnum);
3866
3867 if (can_access_pmu) {
3868 ia64_set_ibr(rnum, dbreg.val);
3869 ia64_dv_serialize_instruction();
3870 }
3871
3872 ctx->ctx_ibrs[rnum] = dbreg.val;
3873
3874 DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
3875 rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
3876 } else {
3877 CTX_USED_DBR(ctx, rnum);
3878
3879 if (can_access_pmu) {
3880 ia64_set_dbr(rnum, dbreg.val);
3881 ia64_dv_serialize_data();
3882 }
3883 ctx->ctx_dbrs[rnum] = dbreg.val;
3884
3885 DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
3886 rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
3887 }
3888 }
3889
3890 return 0;
3891
3892abort_mission:
3893 /*
3894 * in case it was our first attempt, we undo the global modifications
3895 */
3896 if (first_time) {
3897 LOCK_PFS(flags);
3898 if (ctx->ctx_fl_system) {
3899 pfm_sessions.pfs_sys_use_dbregs--;
3900 }
3901 UNLOCK_PFS(flags);
3902 ctx->ctx_fl_using_dbreg = 0;
3903 }
3904 /*
3905 * install error return flag
3906 */
3907 PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
3908
3909 return ret;
3910}
3911
3912static int
3913pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3914{
3915 return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
3916}
3917
3918static int
3919pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3920{
3921 return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
3922}
3923
3924int
3925pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3926{
3927 pfm_context_t *ctx;
3928
3929 if (req == NULL) return -EINVAL;
3930
3931 ctx = GET_PMU_CTX();
3932
3933 if (ctx == NULL) return -EINVAL;
3934
3935 /*
3936 * for now limit to current task, which is enough when calling
3937 * from overflow handler
3938 */
3939 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3940
3941 return pfm_write_ibrs(ctx, req, nreq, regs);
3942}
3943EXPORT_SYMBOL(pfm_mod_write_ibrs);
3944
3945int
3946pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3947{
3948 pfm_context_t *ctx;
3949
3950 if (req == NULL) return -EINVAL;
3951
3952 ctx = GET_PMU_CTX();
3953
3954 if (ctx == NULL) return -EINVAL;
3955
3956 /*
3957 * for now limit to current task, which is enough when calling
3958 * from overflow handler
3959 */
3960 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3961
3962 return pfm_write_dbrs(ctx, req, nreq, regs);
3963}
3964EXPORT_SYMBOL(pfm_mod_write_dbrs);
3965
3966
3967static int
3968pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3969{
3970 pfarg_features_t *req = (pfarg_features_t *)arg;
3971
3972 req->ft_version = PFM_VERSION;
3973 return 0;
3974}
3975
3976static int
3977pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3978{
3979 struct pt_regs *tregs;
3980 struct task_struct *task = PFM_CTX_TASK(ctx);
3981 int state, is_system;
3982
3983 state = ctx->ctx_state;
3984 is_system = ctx->ctx_fl_system;
3985
3986 /*
3987 * context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE)
3988 */
3989 if (state == PFM_CTX_UNLOADED) return -EINVAL;
3990
3991 /*
3992 * In system wide and when the context is loaded, access can only happen
3993 * when the caller is running on the CPU being monitored by the session.
3994 * It does not have to be the owner (ctx_task) of the context per se.
3995 */
3996 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3997 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3998 return -EBUSY;
3999 }
4000 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
19c5870c 4001 task_pid_nr(PFM_CTX_TASK(ctx)),
1da177e4
LT
4002 state,
4003 is_system));
4004 /*
4005 * in system mode, we need to update the PMU directly
4006 * and the user level state of the caller, which may not
4007 * necessarily be the creator of the context.
4008 */
4009 if (is_system) {
4010 /*
4011 * Update local PMU first
4012 *
4013 * disable dcr pp
4014 */
4015 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
4016 ia64_srlz_i();
4017
4018 /*
4019 * update local cpuinfo
4020 */
4021 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4022
4023 /*
4024 * stop monitoring, does srlz.i
4025 */
4026 pfm_clear_psr_pp();
4027
4028 /*
4029 * stop monitoring in the caller
4030 */
4031 ia64_psr(regs)->pp = 0;
4032
4033 return 0;
4034 }
4035 /*
4036 * per-task mode
4037 */
4038
4039 if (task == current) {
4040 /* stop monitoring at kernel level */
4041 pfm_clear_psr_up();
4042
4043 /*
4044 * stop monitoring at the user level
4045 */
4046 ia64_psr(regs)->up = 0;
4047 } else {
6450578f 4048 tregs = task_pt_regs(task);
1da177e4
LT
4049
4050 /*
4051 * stop monitoring at the user level
4052 */
4053 ia64_psr(tregs)->up = 0;
4054
4055 /*
4056 * monitoring disabled in kernel at next reschedule
4057 */
4058 ctx->ctx_saved_psr_up = 0;
19c5870c 4059 DPRINT(("task=[%d]\n", task_pid_nr(task)));
1da177e4
LT
4060 }
4061 return 0;
4062}
4063
4064
4065static int
4066pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4067{
4068 struct pt_regs *tregs;
4069 int state, is_system;
4070
4071 state = ctx->ctx_state;
4072 is_system = ctx->ctx_fl_system;
4073
4074 if (state != PFM_CTX_LOADED) return -EINVAL;
4075
4076 /*
4077 * In system wide and when the context is loaded, access can only happen
4078 * when the caller is running on the CPU being monitored by the session.
4079 * It does not have to be the owner (ctx_task) of the context per se.
4080 */
4081 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
4082 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
4083 return -EBUSY;
4084 }
4085
4086 /*
4087 * in system mode, we need to update the PMU directly
4088 * and the user level state of the caller, which may not
4089 * necessarily be the creator of the context.
4090 */
4091 if (is_system) {
4092
4093 /*
4094 * set user level psr.pp for the caller
4095 */
4096 ia64_psr(regs)->pp = 1;
4097
4098 /*
4099 * now update the local PMU and cpuinfo
4100 */
4101 PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
4102
4103 /*
4104 * start monitoring at kernel level
4105 */
4106 pfm_set_psr_pp();
4107
4108 /* enable dcr pp */
4109 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
4110 ia64_srlz_i();
4111
4112 return 0;
4113 }
4114
4115 /*
4116 * per-process mode
4117 */
4118
4119 if (ctx->ctx_task == current) {
4120
4121 /* start monitoring at kernel level */
4122 pfm_set_psr_up();
4123
4124 /*
4125 * activate monitoring at user level
4126 */
4127 ia64_psr(regs)->up = 1;
4128
4129 } else {
6450578f 4130 tregs = task_pt_regs(ctx->ctx_task);
1da177e4
LT
4131
4132 /*
4133 * start monitoring at the kernel level the next
4134 * time the task is scheduled
4135 */
4136 ctx->ctx_saved_psr_up = IA64_PSR_UP;
4137
4138 /*
4139 * activate monitoring at user level
4140 */
4141 ia64_psr(tregs)->up = 1;
4142 }
4143 return 0;
4144}
4145
4146static int
4147pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4148{
4149 pfarg_reg_t *req = (pfarg_reg_t *)arg;
4150 unsigned int cnum;
4151 int i;
4152 int ret = -EINVAL;
4153
4154 for (i = 0; i < count; i++, req++) {
4155
4156 cnum = req->reg_num;
4157
4158 if (!PMC_IS_IMPL(cnum)) goto abort_mission;
4159
4160 req->reg_value = PMC_DFL_VAL(cnum);
4161
4162 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
4163
4164 DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
4165 }
4166 return 0;
4167
4168abort_mission:
4169 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
4170 return ret;
4171}
4172
4173static int
4174pfm_check_task_exist(pfm_context_t *ctx)
4175{
4176 struct task_struct *g, *t;
4177 int ret = -ESRCH;
4178
4179 read_lock(&tasklist_lock);
4180
4181 do_each_thread (g, t) {
4182 if (t->thread.pfm_context == ctx) {
4183 ret = 0;
6794c752 4184 goto out;
1da177e4
LT
4185 }
4186 } while_each_thread (g, t);
6794c752 4187out:
1da177e4
LT
4188 read_unlock(&tasklist_lock);
4189
4190 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
4191
4192 return ret;
4193}
4194
4195static int
4196pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4197{
4198 struct task_struct *task;
4199 struct thread_struct *thread;
4200 struct pfm_context_t *old;
4201 unsigned long flags;
4202#ifndef CONFIG_SMP
4203 struct task_struct *owner_task = NULL;
4204#endif
4205 pfarg_load_t *req = (pfarg_load_t *)arg;
4206 unsigned long *pmcs_source, *pmds_source;
4207 int the_cpu;
4208 int ret = 0;
4209 int state, is_system, set_dbregs = 0;
4210
4211 state = ctx->ctx_state;
4212 is_system = ctx->ctx_fl_system;
4213 /*
4214 * can only load from unloaded or terminated state
4215 */
4216 if (state != PFM_CTX_UNLOADED) {
4217 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4218 req->load_pid,
4219 ctx->ctx_state));
a5a70b75 4220 return -EBUSY;
1da177e4
LT
4221 }
4222
4223 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
4224
4225 if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
4226 DPRINT(("cannot use blocking mode on self\n"));
4227 return -EINVAL;
4228 }
4229
4230 ret = pfm_get_task(ctx, req->load_pid, &task);
4231 if (ret) {
4232 DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
4233 return ret;
4234 }
4235
4236 ret = -EINVAL;
4237
4238 /*
4239 * system wide is self monitoring only
4240 */
4241 if (is_system && task != current) {
4242 DPRINT(("system wide is self monitoring only load_pid=%d\n",
4243 req->load_pid));
4244 goto error;
4245 }
4246
4247 thread = &task->thread;
4248
4249 ret = 0;
4250 /*
4251 * cannot load a context which is using range restrictions,
4252 * into a task that is being debugged.
4253 */
4254 if (ctx->ctx_fl_using_dbreg) {
4255 if (thread->flags & IA64_THREAD_DBG_VALID) {
4256 ret = -EBUSY;
4257 DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
4258 goto error;
4259 }
4260 LOCK_PFS(flags);
4261
4262 if (is_system) {
4263 if (pfm_sessions.pfs_ptrace_use_dbregs) {
19c5870c
AD
4264 DPRINT(("cannot load [%d] dbregs in use\n",
4265 task_pid_nr(task)));
1da177e4
LT
4266 ret = -EBUSY;
4267 } else {
4268 pfm_sessions.pfs_sys_use_dbregs++;
19c5870c 4269 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
1da177e4
LT
4270 set_dbregs = 1;
4271 }
4272 }
4273
4274 UNLOCK_PFS(flags);
4275
4276 if (ret) goto error;
4277 }
4278
4279 /*
4280 * SMP system-wide monitoring implies self-monitoring.
4281 *
4282 * The programming model expects the task to
4283 * be pinned on a CPU throughout the session.
4284 * Here we take note of the current CPU at the
4285 * time the context is loaded. No call from
4286 * another CPU will be allowed.
4287 *
4288 * The pinning via shed_setaffinity()
4289 * must be done by the calling task prior
4290 * to this call.
4291 *
4292 * systemwide: keep track of CPU this session is supposed to run on
4293 */
4294 the_cpu = ctx->ctx_cpu = smp_processor_id();
4295
4296 ret = -EBUSY;
4297 /*
4298 * now reserve the session
4299 */
4300 ret = pfm_reserve_session(current, is_system, the_cpu);
4301 if (ret) goto error;
4302
4303 /*
4304 * task is necessarily stopped at this point.
4305 *
4306 * If the previous context was zombie, then it got removed in
4307 * pfm_save_regs(). Therefore we should not see it here.
4308 * If we see a context, then this is an active context
4309 *
4310 * XXX: needs to be atomic
4311 */
4312 DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
4313 thread->pfm_context, ctx));
4314
6bf11e8c 4315 ret = -EBUSY;
1da177e4
LT
4316 old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
4317 if (old != NULL) {
4318 DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
4319 goto error_unres;
4320 }
4321
4322 pfm_reset_msgq(ctx);
4323
4324 ctx->ctx_state = PFM_CTX_LOADED;
4325
4326 /*
4327 * link context to task
4328 */
4329 ctx->ctx_task = task;
4330
4331 if (is_system) {
4332 /*
4333 * we load as stopped
4334 */
4335 PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
4336 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4337
4338 if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
4339 } else {
4340 thread->flags |= IA64_THREAD_PM_VALID;
4341 }
4342
4343 /*
4344 * propagate into thread-state
4345 */
4346 pfm_copy_pmds(task, ctx);
4347 pfm_copy_pmcs(task, ctx);
4348
35589a8f
KA
4349 pmcs_source = ctx->th_pmcs;
4350 pmds_source = ctx->th_pmds;
1da177e4
LT
4351
4352 /*
4353 * always the case for system-wide
4354 */
4355 if (task == current) {
4356
4357 if (is_system == 0) {
4358
4359 /* allow user level control */
4360 ia64_psr(regs)->sp = 0;
19c5870c 4361 DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
1da177e4
LT
4362
4363 SET_LAST_CPU(ctx, smp_processor_id());
4364 INC_ACTIVATION();
4365 SET_ACTIVATION(ctx);
4366#ifndef CONFIG_SMP
4367 /*
4368 * push the other task out, if any
4369 */
4370 owner_task = GET_PMU_OWNER();
4371 if (owner_task) pfm_lazy_save_regs(owner_task);
4372#endif
4373 }
4374 /*
4375 * load all PMD from ctx to PMU (as opposed to thread state)
4376 * restore all PMC from ctx to PMU
4377 */
4378 pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
4379 pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
4380
4381 ctx->ctx_reload_pmcs[0] = 0UL;
4382 ctx->ctx_reload_pmds[0] = 0UL;
4383
4384 /*
4385 * guaranteed safe by earlier check against DBG_VALID
4386 */
4387 if (ctx->ctx_fl_using_dbreg) {
4388 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
4389 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
4390 }
4391 /*
4392 * set new ownership
4393 */
4394 SET_PMU_OWNER(task, ctx);
4395
19c5870c 4396 DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
1da177e4
LT
4397 } else {
4398 /*
4399 * when not current, task MUST be stopped, so this is safe
4400 */
6450578f 4401 regs = task_pt_regs(task);
1da177e4
LT
4402
4403 /* force a full reload */
4404 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4405 SET_LAST_CPU(ctx, -1);
4406
4407 /* initial saved psr (stopped) */
4408 ctx->ctx_saved_psr_up = 0UL;
4409 ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
4410 }
4411
4412 ret = 0;
4413
4414error_unres:
4415 if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
4416error:
4417 /*
4418 * we must undo the dbregs setting (for system-wide)
4419 */
4420 if (ret && set_dbregs) {
4421 LOCK_PFS(flags);
4422 pfm_sessions.pfs_sys_use_dbregs--;
4423 UNLOCK_PFS(flags);
4424 }
4425 /*
4426 * release task, there is now a link with the context
4427 */
4428 if (is_system == 0 && task != current) {
4429 pfm_put_task(task);
4430
4431 if (ret == 0) {
4432 ret = pfm_check_task_exist(ctx);
4433 if (ret) {
4434 ctx->ctx_state = PFM_CTX_UNLOADED;
4435 ctx->ctx_task = NULL;
4436 }
4437 }
4438 }
4439 return ret;
4440}
4441
4442/*
4443 * in this function, we do not need to increase the use count
4444 * for the task via get_task_struct(), because we hold the
4445 * context lock. If the task were to disappear while having
4446 * a context attached, it would go through pfm_exit_thread()
4447 * which also grabs the context lock and would therefore be blocked
4448 * until we are here.
4449 */
4450static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
4451
4452static int
4453pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4454{
4455 struct task_struct *task = PFM_CTX_TASK(ctx);
4456 struct pt_regs *tregs;
4457 int prev_state, is_system;
4458 int ret;
4459
19c5870c 4460 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
1da177e4
LT
4461
4462 prev_state = ctx->ctx_state;
4463 is_system = ctx->ctx_fl_system;
4464
4465 /*
4466 * unload only when necessary
4467 */
4468 if (prev_state == PFM_CTX_UNLOADED) {
4469 DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
4470 return 0;
4471 }
4472
4473 /*
4474 * clear psr and dcr bits
4475 */
4476 ret = pfm_stop(ctx, NULL, 0, regs);
4477 if (ret) return ret;
4478
4479 ctx->ctx_state = PFM_CTX_UNLOADED;
4480
4481 /*
4482 * in system mode, we need to update the PMU directly
4483 * and the user level state of the caller, which may not
4484 * necessarily be the creator of the context.
4485 */
4486 if (is_system) {
4487
4488 /*
4489 * Update cpuinfo
4490 *
4491 * local PMU is taken care of in pfm_stop()
4492 */
4493 PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
4494 PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
4495
4496 /*
4497 * save PMDs in context
4498 * release ownership
4499 */
4500 pfm_flush_pmds(current, ctx);
4501
4502 /*
4503 * at this point we are done with the PMU
4504 * so we can unreserve the resource.
4505 */
4506 if (prev_state != PFM_CTX_ZOMBIE)
4507 pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
4508
4509 /*
4510 * disconnect context from task
4511 */
4512 task->thread.pfm_context = NULL;
4513 /*
4514 * disconnect task from context
4515 */
4516 ctx->ctx_task = NULL;
4517
4518 /*
4519 * There is nothing more to cleanup here.
4520 */
4521 return 0;
4522 }
4523
4524 /*
4525 * per-task mode
4526 */
6450578f 4527 tregs = task == current ? regs : task_pt_regs(task);
1da177e4
LT
4528
4529 if (task == current) {
4530 /*
4531 * cancel user level control
4532 */
4533 ia64_psr(regs)->sp = 1;
4534
19c5870c 4535 DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
1da177e4
LT
4536 }
4537 /*
4538 * save PMDs to context
4539 * release ownership
4540 */
4541 pfm_flush_pmds(task, ctx);
4542
4543 /*
4544 * at this point we are done with the PMU
4545 * so we can unreserve the resource.
4546 *
4547 * when state was ZOMBIE, we have already unreserved.
4548 */
4549 if (prev_state != PFM_CTX_ZOMBIE)
4550 pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
4551
4552 /*
4553 * reset activation counter and psr
4554 */
4555 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4556 SET_LAST_CPU(ctx, -1);
4557
4558 /*
4559 * PMU state will not be restored
4560 */
4561 task->thread.flags &= ~IA64_THREAD_PM_VALID;
4562
4563 /*
4564 * break links between context and task
4565 */
4566 task->thread.pfm_context = NULL;
4567 ctx->ctx_task = NULL;
4568
4569 PFM_SET_WORK_PENDING(task, 0);
4570
4571 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4572 ctx->ctx_fl_can_restart = 0;
4573 ctx->ctx_fl_going_zombie = 0;
4574
19c5870c 4575 DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
1da177e4
LT
4576
4577 return 0;
4578}
4579
4580
4581/*
4582 * called only from exit_thread(): task == current
4583 * we come here only if current has a context attached (loaded or masked)
4584 */
4585void
4586pfm_exit_thread(struct task_struct *task)
4587{
4588 pfm_context_t *ctx;
4589 unsigned long flags;
6450578f 4590 struct pt_regs *regs = task_pt_regs(task);
1da177e4
LT
4591 int ret, state;
4592 int free_ok = 0;
4593
4594 ctx = PFM_GET_CTX(task);
4595
4596 PROTECT_CTX(ctx, flags);
4597
19c5870c 4598 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
1da177e4
LT
4599
4600 state = ctx->ctx_state;
4601 switch(state) {
4602 case PFM_CTX_UNLOADED:
4603 /*
72fdbdce 4604 * only comes to this function if pfm_context is not NULL, i.e., cannot
1da177e4
LT
4605 * be in unloaded state
4606 */
19c5870c 4607 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
1da177e4
LT
4608 break;
4609 case PFM_CTX_LOADED:
4610 case PFM_CTX_MASKED:
4611 ret = pfm_context_unload(ctx, NULL, 0, regs);
4612 if (ret) {
19c5870c 4613 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
1da177e4
LT
4614 }
4615 DPRINT(("ctx unloaded for current state was %d\n", state));
4616
4617 pfm_end_notify_user(ctx);
4618 break;
4619 case PFM_CTX_ZOMBIE:
4620 ret = pfm_context_unload(ctx, NULL, 0, regs);
4621 if (ret) {
19c5870c 4622 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
1da177e4
LT
4623 }
4624 free_ok = 1;
4625 break;
4626 default:
19c5870c 4627 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
1da177e4
LT
4628 break;
4629 }
4630 UNPROTECT_CTX(ctx, flags);
4631
4632 { u64 psr = pfm_get_psr();
4633 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
4634 BUG_ON(GET_PMU_OWNER());
4635 BUG_ON(ia64_psr(regs)->up);
4636 BUG_ON(ia64_psr(regs)->pp);
4637 }
4638
4639 /*
4640 * All memory free operations (especially for vmalloc'ed memory)
4641 * MUST be done with interrupts ENABLED.
4642 */
4643 if (free_ok) pfm_context_free(ctx);
4644}
4645
4646/*
4647 * functions MUST be listed in the increasing order of their index (see permfon.h)
4648 */
4649#define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
4650#define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
4651#define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
4652#define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
4653#define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
4654
4655static pfm_cmd_desc_t pfm_cmd_tab[]={
4656/* 0 */PFM_CMD_NONE,
4657/* 1 */PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4658/* 2 */PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4659/* 3 */PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4660/* 4 */PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
4661/* 5 */PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
4662/* 6 */PFM_CMD_NONE,
4663/* 7 */PFM_CMD_NONE,
4664/* 8 */PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
4665/* 9 */PFM_CMD_NONE,
4666/* 10 */PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
4667/* 11 */PFM_CMD_NONE,
4668/* 12 */PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
4669/* 13 */PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
4670/* 14 */PFM_CMD_NONE,
4671/* 15 */PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4672/* 16 */PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
4673/* 17 */PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
4674/* 18 */PFM_CMD_NONE,
4675/* 19 */PFM_CMD_NONE,
4676/* 20 */PFM_CMD_NONE,
4677/* 21 */PFM_CMD_NONE,
4678/* 22 */PFM_CMD_NONE,
4679/* 23 */PFM_CMD_NONE,
4680/* 24 */PFM_CMD_NONE,
4681/* 25 */PFM_CMD_NONE,
4682/* 26 */PFM_CMD_NONE,
4683/* 27 */PFM_CMD_NONE,
4684/* 28 */PFM_CMD_NONE,
4685/* 29 */PFM_CMD_NONE,
4686/* 30 */PFM_CMD_NONE,
4687/* 31 */PFM_CMD_NONE,
4688/* 32 */PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
4689/* 33 */PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
4690};
4691#define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
4692
4693static int
4694pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
4695{
4696 struct task_struct *task;
4697 int state, old_state;
4698
4699recheck:
4700 state = ctx->ctx_state;
4701 task = ctx->ctx_task;
4702
4703 if (task == NULL) {
4704 DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
4705 return 0;
4706 }
4707
4708 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4709 ctx->ctx_fd,
4710 state,
19c5870c 4711 task_pid_nr(task),
1da177e4
LT
4712 task->state, PFM_CMD_STOPPED(cmd)));
4713
4714 /*
4715 * self-monitoring always ok.
4716 *
4717 * for system-wide the caller can either be the creator of the
4718 * context (to one to which the context is attached to) OR
4719 * a task running on the same CPU as the session.
4720 */
4721 if (task == current || ctx->ctx_fl_system) return 0;
4722
4723 /*
a5a70b75 4724 * we are monitoring another thread
1da177e4 4725 */
a5a70b75 4726 switch(state) {
4727 case PFM_CTX_UNLOADED:
4728 /*
4729 * if context is UNLOADED we are safe to go
4730 */
4731 return 0;
4732 case PFM_CTX_ZOMBIE:
4733 /*
4734 * no command can operate on a zombie context
4735 */
4736 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
4737 return -EINVAL;
4738 case PFM_CTX_MASKED:
4739 /*
4740 * PMU state has been saved to software even though
4741 * the thread may still be running.
4742 */
4743 if (cmd != PFM_UNLOAD_CONTEXT) return 0;
1da177e4
LT
4744 }
4745
4746 /*
4747 * context is LOADED or MASKED. Some commands may need to have
4748 * the task stopped.
4749 *
4750 * We could lift this restriction for UP but it would mean that
4751 * the user has no guarantee the task would not run between
4752 * two successive calls to perfmonctl(). That's probably OK.
4753 * If this user wants to ensure the task does not run, then
4754 * the task must be stopped.
4755 */
4756 if (PFM_CMD_STOPPED(cmd)) {
21498223 4757 if (!task_is_stopped_or_traced(task)) {
19c5870c 4758 DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
1da177e4
LT
4759 return -EBUSY;
4760 }
4761 /*
4762 * task is now stopped, wait for ctxsw out
4763 *
4764 * This is an interesting point in the code.
4765 * We need to unprotect the context because
4766 * the pfm_save_regs() routines needs to grab
4767 * the same lock. There are danger in doing
4768 * this because it leaves a window open for
4769 * another task to get access to the context
4770 * and possibly change its state. The one thing
4771 * that is not possible is for the context to disappear
4772 * because we are protected by the VFS layer, i.e.,
4773 * get_fd()/put_fd().
4774 */
4775 old_state = state;
4776
4777 UNPROTECT_CTX(ctx, flags);
4778
85ba2d86 4779 wait_task_inactive(task, 0);
1da177e4
LT
4780
4781 PROTECT_CTX(ctx, flags);
4782
4783 /*
4784 * we must recheck to verify if state has changed
4785 */
4786 if (ctx->ctx_state != old_state) {
4787 DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
4788 goto recheck;
4789 }
4790 }
4791 return 0;
4792}
4793
4794/*
4795 * system-call entry point (must return long)
4796 */
4797asmlinkage long
4798sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
4799{
4800 struct file *file = NULL;
4801 pfm_context_t *ctx = NULL;
4802 unsigned long flags = 0UL;
4803 void *args_k = NULL;
4804 long ret; /* will expand int return types */
4805 size_t base_sz, sz, xtra_sz = 0;
4806 int narg, completed_args = 0, call_made = 0, cmd_flags;
4807 int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
4808 int (*getsize)(void *arg, size_t *sz);
4809#define PFM_MAX_ARGSIZE 4096
4810
4811 /*
4812 * reject any call if perfmon was disabled at initialization
4813 */
4814 if (unlikely(pmu_conf == NULL)) return -ENOSYS;
4815
4816 if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
4817 DPRINT(("invalid cmd=%d\n", cmd));
4818 return -EINVAL;
4819 }
4820
4821 func = pfm_cmd_tab[cmd].cmd_func;
4822 narg = pfm_cmd_tab[cmd].cmd_narg;
4823 base_sz = pfm_cmd_tab[cmd].cmd_argsize;
4824 getsize = pfm_cmd_tab[cmd].cmd_getsize;
4825 cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
4826
4827 if (unlikely(func == NULL)) {
4828 DPRINT(("invalid cmd=%d\n", cmd));
4829 return -EINVAL;
4830 }
4831
4832 DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
4833 PFM_CMD_NAME(cmd),
4834 cmd,
4835 narg,
4836 base_sz,
4837 count));
4838
4839 /*
4840 * check if number of arguments matches what the command expects
4841 */
4842 if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
4843 return -EINVAL;
4844
4845restart_args:
4846 sz = xtra_sz + base_sz*count;
4847 /*
4848 * limit abuse to min page size
4849 */
4850 if (unlikely(sz > PFM_MAX_ARGSIZE)) {
19c5870c 4851 printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
1da177e4
LT
4852 return -E2BIG;
4853 }
4854
4855 /*
4856 * allocate default-sized argument buffer
4857 */
4858 if (likely(count && args_k == NULL)) {
4859 args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
4860 if (args_k == NULL) return -ENOMEM;
4861 }
4862
4863 ret = -EFAULT;
4864
4865 /*
4866 * copy arguments
4867 *
4868 * assume sz = 0 for command without parameters
4869 */
4870 if (sz && copy_from_user(args_k, arg, sz)) {
4871 DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
4872 goto error_args;
4873 }
4874
4875 /*
4876 * check if command supports extra parameters
4877 */
4878 if (completed_args == 0 && getsize) {
4879 /*
4880 * get extra parameters size (based on main argument)
4881 */
4882 ret = (*getsize)(args_k, &xtra_sz);
4883 if (ret) goto error_args;
4884
4885 completed_args = 1;
4886
4887 DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
4888
4889 /* retry if necessary */
4890 if (likely(xtra_sz)) goto restart_args;
4891 }
4892
4893 if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
4894
4895 ret = -EBADF;
4896
4897 file = fget(fd);
4898 if (unlikely(file == NULL)) {
4899 DPRINT(("invalid fd %d\n", fd));
4900 goto error_args;
4901 }
4902 if (unlikely(PFM_IS_FILE(file) == 0)) {
4903 DPRINT(("fd %d not related to perfmon\n", fd));
4904 goto error_args;
4905 }
4906
4907 ctx = (pfm_context_t *)file->private_data;
4908 if (unlikely(ctx == NULL)) {
4909 DPRINT(("no context for fd %d\n", fd));
4910 goto error_args;
4911 }
4912 prefetch(&ctx->ctx_state);
4913
4914 PROTECT_CTX(ctx, flags);
4915
4916 /*
4917 * check task is stopped
4918 */
4919 ret = pfm_check_task_state(ctx, cmd, flags);
4920 if (unlikely(ret)) goto abort_locked;
4921
4922skip_fd:
6450578f 4923 ret = (*func)(ctx, args_k, count, task_pt_regs(current));
1da177e4
LT
4924
4925 call_made = 1;
4926
4927abort_locked:
4928 if (likely(ctx)) {
4929 DPRINT(("context unlocked\n"));
4930 UNPROTECT_CTX(ctx, flags);
1da177e4
LT
4931 }
4932
4933 /* copy argument back to user, if needed */
4934 if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
4935
4936error_args:
b8444d00
SE
4937 if (file)
4938 fput(file);
4939
b2325fe1 4940 kfree(args_k);
1da177e4
LT
4941
4942 DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
4943
4944 return ret;
4945}
4946
4947static void
4948pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
4949{
4950 pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
4951 pfm_ovfl_ctrl_t rst_ctrl;
4952 int state;
4953 int ret = 0;
4954
4955 state = ctx->ctx_state;
4956 /*
4957 * Unlock sampling buffer and reset index atomically
4958 * XXX: not really needed when blocking
4959 */
4960 if (CTX_HAS_SMPL(ctx)) {
4961
4962 rst_ctrl.bits.mask_monitoring = 0;
4963 rst_ctrl.bits.reset_ovfl_pmds = 0;
4964
4965 if (state == PFM_CTX_LOADED)
4966 ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4967 else
4968 ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4969 } else {
4970 rst_ctrl.bits.mask_monitoring = 0;
4971 rst_ctrl.bits.reset_ovfl_pmds = 1;
4972 }
4973
4974 if (ret == 0) {
4975 if (rst_ctrl.bits.reset_ovfl_pmds) {
4976 pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
4977 }
4978 if (rst_ctrl.bits.mask_monitoring == 0) {
4979 DPRINT(("resuming monitoring\n"));
4980 if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
4981 } else {
4982 DPRINT(("stopping monitoring\n"));
4983 //pfm_stop_monitoring(current, regs);
4984 }
4985 ctx->ctx_state = PFM_CTX_LOADED;
4986 }
4987}
4988
4989/*
4990 * context MUST BE LOCKED when calling
4991 * can only be called for current
4992 */
4993static void
4994pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
4995{
4996 int ret;
4997
19c5870c 4998 DPRINT(("entering for [%d]\n", task_pid_nr(current)));
1da177e4
LT
4999
5000 ret = pfm_context_unload(ctx, NULL, 0, regs);
5001 if (ret) {
19c5870c 5002 printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
1da177e4
LT
5003 }
5004
5005 /*
5006 * and wakeup controlling task, indicating we are now disconnected
5007 */
5008 wake_up_interruptible(&ctx->ctx_zombieq);
5009
5010 /*
5011 * given that context is still locked, the controlling
5012 * task will only get access when we return from
5013 * pfm_handle_work().
5014 */
5015}
5016
5017static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
0fb232fd 5018
4944930a
SE
5019 /*
5020 * pfm_handle_work() can be called with interrupts enabled
5021 * (TIF_NEED_RESCHED) or disabled. The down_interruptible
5022 * call may sleep, therefore we must re-enable interrupts
5023 * to avoid deadlocks. It is safe to do so because this function
0fb232fd 5024 * is called ONLY when returning to user level (pUStk=1), in which case
4944930a
SE
5025 * there is no risk of kernel stack overflow due to deep
5026 * interrupt nesting.
5027 */
1da177e4
LT
5028void
5029pfm_handle_work(void)
5030{
5031 pfm_context_t *ctx;
5032 struct pt_regs *regs;
4944930a 5033 unsigned long flags, dummy_flags;
1da177e4
LT
5034 unsigned long ovfl_regs;
5035 unsigned int reason;
5036 int ret;
5037
5038 ctx = PFM_GET_CTX(current);
5039 if (ctx == NULL) {
0fb232fd
HS
5040 printk(KERN_ERR "perfmon: [%d] has no PFM context\n",
5041 task_pid_nr(current));
1da177e4
LT
5042 return;
5043 }
5044
5045 PROTECT_CTX(ctx, flags);
5046
5047 PFM_SET_WORK_PENDING(current, 0);
5048
6450578f 5049 regs = task_pt_regs(current);
1da177e4
LT
5050
5051 /*
5052 * extract reason for being here and clear
5053 */
5054 reason = ctx->ctx_fl_trap_reason;
5055 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
5056 ovfl_regs = ctx->ctx_ovfl_regs[0];
5057
5058 DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
5059
5060 /*
5061 * must be done before we check for simple-reset mode
5062 */
0fb232fd
HS
5063 if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE)
5064 goto do_zombie;
1da177e4
LT
5065
5066 //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
0fb232fd
HS
5067 if (reason == PFM_TRAP_REASON_RESET)
5068 goto skip_blocking;
1da177e4 5069
4944930a
SE
5070 /*
5071 * restore interrupt mask to what it was on entry.
5072 * Could be enabled/diasbled.
5073 */
1da177e4
LT
5074 UNPROTECT_CTX(ctx, flags);
5075
4944930a
SE
5076 /*
5077 * force interrupt enable because of down_interruptible()
5078 */
1da177e4
LT
5079 local_irq_enable();
5080
5081 DPRINT(("before block sleeping\n"));
5082
5083 /*
5084 * may go through without blocking on SMP systems
5085 * if restart has been received already by the time we call down()
5086 */
60f1c444 5087 ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
1da177e4
LT
5088
5089 DPRINT(("after block sleeping ret=%d\n", ret));
5090
5091 /*
4944930a
SE
5092 * lock context and mask interrupts again
5093 * We save flags into a dummy because we may have
5094 * altered interrupts mask compared to entry in this
5095 * function.
1da177e4 5096 */
4944930a 5097 PROTECT_CTX(ctx, dummy_flags);
1da177e4
LT
5098
5099 /*
5100 * we need to read the ovfl_regs only after wake-up
5101 * because we may have had pfm_write_pmds() in between
5102 * and that can changed PMD values and therefore
5103 * ovfl_regs is reset for these new PMD values.
5104 */
5105 ovfl_regs = ctx->ctx_ovfl_regs[0];
5106
5107 if (ctx->ctx_fl_going_zombie) {
5108do_zombie:
5109 DPRINT(("context is zombie, bailing out\n"));
5110 pfm_context_force_terminate(ctx, regs);
5111 goto nothing_to_do;
5112 }
5113 /*
5114 * in case of interruption of down() we don't restart anything
5115 */
0fb232fd
HS
5116 if (ret < 0)
5117 goto nothing_to_do;
1da177e4
LT
5118
5119skip_blocking:
5120 pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
5121 ctx->ctx_ovfl_regs[0] = 0UL;
5122
5123nothing_to_do:
4944930a
SE
5124 /*
5125 * restore flags as they were upon entry
5126 */
1da177e4
LT
5127 UNPROTECT_CTX(ctx, flags);
5128}
5129
5130static int
5131pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
5132{
5133 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5134 DPRINT(("ignoring overflow notification, owner is zombie\n"));
5135 return 0;
5136 }
5137
5138 DPRINT(("waking up somebody\n"));
5139
5140 if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
5141
5142 /*
5143 * safe, we are not in intr handler, nor in ctxsw when
5144 * we come here
5145 */
5146 kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
5147
5148 return 0;
5149}
5150
5151static int
5152pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
5153{
5154 pfm_msg_t *msg = NULL;
5155
5156 if (ctx->ctx_fl_no_msg == 0) {
5157 msg = pfm_get_new_msg(ctx);
5158 if (msg == NULL) {
5159 printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
5160 return -1;
5161 }
5162
5163 msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
5164 msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
5165 msg->pfm_ovfl_msg.msg_active_set = 0;
5166 msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
5167 msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
5168 msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
5169 msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
5170 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5171 }
5172
5173 DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
5174 msg,
5175 ctx->ctx_fl_no_msg,
5176 ctx->ctx_fd,
5177 ovfl_pmds));
5178
5179 return pfm_notify_user(ctx, msg);
5180}
5181
5182static int
5183pfm_end_notify_user(pfm_context_t *ctx)
5184{
5185 pfm_msg_t *msg;
5186
5187 msg = pfm_get_new_msg(ctx);
5188 if (msg == NULL) {
5189 printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
5190 return -1;
5191 }
5192 /* no leak */
5193 memset(msg, 0, sizeof(*msg));
5194
5195 msg->pfm_end_msg.msg_type = PFM_MSG_END;
5196 msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
5197 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5198
5199 DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
5200 msg,
5201 ctx->ctx_fl_no_msg,
5202 ctx->ctx_fd));
5203
5204 return pfm_notify_user(ctx, msg);
5205}
5206
5207/*
5208 * main overflow processing routine.
72fdbdce 5209 * it can be called from the interrupt path or explicitly during the context switch code
1da177e4 5210 */
e088a4ad
MW
5211static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx,
5212 unsigned long pmc0, struct pt_regs *regs)
1da177e4
LT
5213{
5214 pfm_ovfl_arg_t *ovfl_arg;
5215 unsigned long mask;
5216 unsigned long old_val, ovfl_val, new_val;
5217 unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
5218 unsigned long tstamp;
5219 pfm_ovfl_ctrl_t ovfl_ctrl;
5220 unsigned int i, has_smpl;
5221 int must_notify = 0;
5222
5223 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
5224
5225 /*
5226 * sanity test. Should never happen
5227 */
5228 if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
5229
5230 tstamp = ia64_get_itc();
5231 mask = pmc0 >> PMU_FIRST_COUNTER;
5232 ovfl_val = pmu_conf->ovfl_val;
5233 has_smpl = CTX_HAS_SMPL(ctx);
5234
5235 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5236 "used_pmds=0x%lx\n",
5237 pmc0,
19c5870c 5238 task ? task_pid_nr(task): -1,
1da177e4
LT
5239 (regs ? regs->cr_iip : 0),
5240 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
5241 ctx->ctx_used_pmds[0]));
5242
5243
5244 /*
5245 * first we update the virtual counters
5246 * assume there was a prior ia64_srlz_d() issued
5247 */
5248 for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
5249
5250 /* skip pmd which did not overflow */
5251 if ((mask & 0x1) == 0) continue;
5252
5253 /*
5254 * Note that the pmd is not necessarily 0 at this point as qualified events
5255 * may have happened before the PMU was frozen. The residual count is not
5256 * taken into consideration here but will be with any read of the pmd via
5257 * pfm_read_pmds().
5258 */
5259 old_val = new_val = ctx->ctx_pmds[i].val;
5260 new_val += 1 + ovfl_val;
5261 ctx->ctx_pmds[i].val = new_val;
5262
5263 /*
5264 * check for overflow condition
5265 */
5266 if (likely(old_val > new_val)) {
5267 ovfl_pmds |= 1UL << i;
5268 if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
5269 }
5270
5271 DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
5272 i,
5273 new_val,
5274 old_val,
5275 ia64_get_pmd(i) & ovfl_val,
5276 ovfl_pmds,
5277 ovfl_notify));
5278 }
5279
5280 /*
5281 * there was no 64-bit overflow, nothing else to do
5282 */
5283 if (ovfl_pmds == 0UL) return;
5284
5285 /*
5286 * reset all control bits
5287 */
5288 ovfl_ctrl.val = 0;
5289 reset_pmds = 0UL;
5290
5291 /*
5292 * if a sampling format module exists, then we "cache" the overflow by
5293 * calling the module's handler() routine.
5294 */
5295 if (has_smpl) {
5296 unsigned long start_cycles, end_cycles;
5297 unsigned long pmd_mask;
5298 int j, k, ret = 0;
5299 int this_cpu = smp_processor_id();
5300
5301 pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
5302 ovfl_arg = &ctx->ctx_ovfl_arg;
5303
5304 prefetch(ctx->ctx_smpl_hdr);
5305
5306 for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
5307
5308 mask = 1UL << i;
5309
5310 if ((pmd_mask & 0x1) == 0) continue;
5311
5312 ovfl_arg->ovfl_pmd = (unsigned char )i;
5313 ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
5314 ovfl_arg->active_set = 0;
5315 ovfl_arg->ovfl_ctrl.val = 0; /* module must fill in all fields */
5316 ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
5317
5318 ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
5319 ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
5320 ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
5321
5322 /*
5323 * copy values of pmds of interest. Sampling format may copy them
5324 * into sampling buffer.
5325 */
5326 if (smpl_pmds) {
5327 for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
5328 if ((smpl_pmds & 0x1) == 0) continue;
5329 ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
5330 DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
5331 }
5332 }
5333
5334 pfm_stats[this_cpu].pfm_smpl_handler_calls++;
5335
5336 start_cycles = ia64_get_itc();
5337
5338 /*
5339 * call custom buffer format record (handler) routine
5340 */
5341 ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
5342
5343 end_cycles = ia64_get_itc();
5344
5345 /*
5346 * For those controls, we take the union because they have
5347 * an all or nothing behavior.
5348 */
5349 ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
5350 ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
5351 ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
5352 /*
5353 * build the bitmask of pmds to reset now
5354 */
5355 if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
5356
5357 pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
5358 }
5359 /*
5360 * when the module cannot handle the rest of the overflows, we abort right here
5361 */
5362 if (ret && pmd_mask) {
5363 DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
5364 pmd_mask<<PMU_FIRST_COUNTER));
5365 }
5366 /*
5367 * remove the pmds we reset now from the set of pmds to reset in pfm_restart()
5368 */
5369 ovfl_pmds &= ~reset_pmds;
5370 } else {
5371 /*
5372 * when no sampling module is used, then the default
5373 * is to notify on overflow if requested by user
5374 */
5375 ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
5376 ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
5377 ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */
5378 ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
5379 /*
5380 * if needed, we reset all overflowed pmds
5381 */
5382 if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
5383 }
5384
5385 DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
5386
5387 /*
5388 * reset the requested PMD registers using the short reset values
5389 */
5390 if (reset_pmds) {
5391 unsigned long bm = reset_pmds;
5392 pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
5393 }
5394
5395 if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
5396 /*
5397 * keep track of what to reset when unblocking
5398 */
5399 ctx->ctx_ovfl_regs[0] = ovfl_pmds;
5400
5401 /*
5402 * check for blocking context
5403 */
5404 if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
5405
5406 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
5407
5408 /*
5409 * set the perfmon specific checking pending work for the task
5410 */
5411 PFM_SET_WORK_PENDING(task, 1);
5412
5413 /*
5414 * when coming from ctxsw, current still points to the
5415 * previous task, therefore we must work with task and not current.
5416 */
f14488cc 5417 set_notify_resume(task);
1da177e4
LT
5418 }
5419 /*
5420 * defer until state is changed (shorten spin window). the context is locked
5421 * anyway, so the signal receiver would come spin for nothing.
5422 */
5423 must_notify = 1;
5424 }
5425
5426 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
19c5870c 5427 GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
1da177e4
LT
5428 PFM_GET_WORK_PENDING(task),
5429 ctx->ctx_fl_trap_reason,
5430 ovfl_pmds,
5431 ovfl_notify,
5432 ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
5433 /*
5434 * in case monitoring must be stopped, we toggle the psr bits
5435 */
5436 if (ovfl_ctrl.bits.mask_monitoring) {
5437 pfm_mask_monitoring(task);
5438 ctx->ctx_state = PFM_CTX_MASKED;
5439 ctx->ctx_fl_can_restart = 1;
5440 }
5441
5442 /*
5443 * send notification now
5444 */
5445 if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
5446
5447 return;
5448
5449sanity_check:
5450 printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5451 smp_processor_id(),
19c5870c 5452 task ? task_pid_nr(task) : -1,
1da177e4
LT
5453 pmc0);
5454 return;
5455
5456stop_monitoring:
5457 /*
5458 * in SMP, zombie context is never restored but reclaimed in pfm_load_regs().
5459 * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can
5460 * come here as zombie only if the task is the current task. In which case, we
5461 * can access the PMU hardware directly.
5462 *
5463 * Note that zombies do have PM_VALID set. So here we do the minimal.
5464 *
5465 * In case the context was zombified it could not be reclaimed at the time
5466 * the monitoring program exited. At this point, the PMU reservation has been
5467 * returned, the sampiing buffer has been freed. We must convert this call
5468 * into a spurious interrupt. However, we must also avoid infinite overflows
5469 * by stopping monitoring for this task. We can only come here for a per-task
5470 * context. All we need to do is to stop monitoring using the psr bits which
5471 * are always task private. By re-enabling secure montioring, we ensure that
5472 * the monitored task will not be able to re-activate monitoring.
5473 * The task will eventually be context switched out, at which point the context
5474 * will be reclaimed (that includes releasing ownership of the PMU).
5475 *
5476 * So there might be a window of time where the number of per-task session is zero
5477 * yet one PMU might have a owner and get at most one overflow interrupt for a zombie
5478 * context. This is safe because if a per-task session comes in, it will push this one
5479 * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide
5480 * session is force on that CPU, given that we use task pinning, pfm_save_regs() will
5481 * also push our zombie context out.
5482 *
5483 * Overall pretty hairy stuff....
5484 */
19c5870c 5485 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
1da177e4
LT
5486 pfm_clear_psr_up();
5487 ia64_psr(regs)->up = 0;
5488 ia64_psr(regs)->sp = 1;
5489 return;
5490}
5491
5492static int
9010eff0 5493pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
1da177e4
LT
5494{
5495 struct task_struct *task;
5496 pfm_context_t *ctx;
5497 unsigned long flags;
5498 u64 pmc0;
5499 int this_cpu = smp_processor_id();
5500 int retval = 0;
5501
5502 pfm_stats[this_cpu].pfm_ovfl_intr_count++;
5503
5504 /*
5505 * srlz.d done before arriving here
5506 */
5507 pmc0 = ia64_get_pmc(0);
5508
5509 task = GET_PMU_OWNER();
5510 ctx = GET_PMU_CTX();
5511
5512 /*
5513 * if we have some pending bits set
5514 * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
5515 */
5516 if (PMC0_HAS_OVFL(pmc0) && task) {
5517 /*
5518 * we assume that pmc0.fr is always set here
5519 */
5520
5521 /* sanity check */
5522 if (!ctx) goto report_spurious1;
5523
5524 if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
5525 goto report_spurious2;
5526
5527 PROTECT_CTX_NOPRINT(ctx, flags);
5528
5529 pfm_overflow_handler(task, ctx, pmc0, regs);
5530
5531 UNPROTECT_CTX_NOPRINT(ctx, flags);
5532
5533 } else {
5534 pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
5535 retval = -1;
5536 }
5537 /*
5538 * keep it unfrozen at all times
5539 */
5540 pfm_unfreeze_pmu();
5541
5542 return retval;
5543
5544report_spurious1:
5545 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
19c5870c 5546 this_cpu, task_pid_nr(task));
1da177e4
LT
5547 pfm_unfreeze_pmu();
5548 return -1;
5549report_spurious2:
5550 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5551 this_cpu,
19c5870c 5552 task_pid_nr(task));
1da177e4
LT
5553 pfm_unfreeze_pmu();
5554 return -1;
5555}
5556
5557static irqreturn_t
3bbe486b 5558pfm_interrupt_handler(int irq, void *arg)
1da177e4
LT
5559{
5560 unsigned long start_cycles, total_cycles;
5561 unsigned long min, max;
5562 int this_cpu;
5563 int ret;
3bbe486b 5564 struct pt_regs *regs = get_irq_regs();
1da177e4
LT
5565
5566 this_cpu = get_cpu();
a1ecf7f6
TL
5567 if (likely(!pfm_alt_intr_handler)) {
5568 min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
5569 max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
1da177e4 5570
a1ecf7f6 5571 start_cycles = ia64_get_itc();
1da177e4 5572
9010eff0 5573 ret = pfm_do_interrupt_handler(arg, regs);
1da177e4 5574
a1ecf7f6 5575 total_cycles = ia64_get_itc();
1da177e4 5576
a1ecf7f6
TL
5577 /*
5578 * don't measure spurious interrupts
5579 */
5580 if (likely(ret == 0)) {
5581 total_cycles -= start_cycles;
1da177e4 5582
a1ecf7f6
TL
5583 if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
5584 if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
1da177e4 5585
a1ecf7f6
TL
5586 pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
5587 }
5588 }
5589 else {
5590 (*pfm_alt_intr_handler->handler)(irq, arg, regs);
1da177e4 5591 }
a1ecf7f6 5592
8b0b1db0 5593 put_cpu();
1da177e4
LT
5594 return IRQ_HANDLED;
5595}
5596
5597/*
5598 * /proc/perfmon interface, for debug only
5599 */
5600
fa276f36 5601#define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
1da177e4
LT
5602
5603static void *
5604pfm_proc_start(struct seq_file *m, loff_t *pos)
5605{
5606 if (*pos == 0) {
5607 return PFM_PROC_SHOW_HEADER;
5608 }
5609
5dd3c994 5610 while (*pos <= nr_cpu_ids) {
1da177e4
LT
5611 if (cpu_online(*pos - 1)) {
5612 return (void *)*pos;
5613 }
5614 ++*pos;
5615 }
5616 return NULL;
5617}
5618
5619static void *
5620pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
5621{
5622 ++*pos;
5623 return pfm_proc_start(m, pos);
5624}
5625
5626static void
5627pfm_proc_stop(struct seq_file *m, void *v)
5628{
5629}
5630
5631static void
5632pfm_proc_show_header(struct seq_file *m)
5633{
5634 struct list_head * pos;
5635 pfm_buffer_fmt_t * entry;
5636 unsigned long flags;
5637
5638 seq_printf(m,
5639 "perfmon version : %u.%u\n"
5640 "model : %s\n"
5641 "fastctxsw : %s\n"
5642 "expert mode : %s\n"
5643 "ovfl_mask : 0x%lx\n"
5644 "PMU flags : 0x%x\n",
5645 PFM_VERSION_MAJ, PFM_VERSION_MIN,
5646 pmu_conf->pmu_name,
5647 pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
5648 pfm_sysctl.expert_mode > 0 ? "Yes": "No",
5649 pmu_conf->ovfl_val,
5650 pmu_conf->flags);
5651
5652 LOCK_PFS(flags);
5653
5654 seq_printf(m,
5655 "proc_sessions : %u\n"
5656 "sys_sessions : %u\n"
5657 "sys_use_dbregs : %u\n"
5658 "ptrace_use_dbregs : %u\n",
5659 pfm_sessions.pfs_task_sessions,
5660 pfm_sessions.pfs_sys_sessions,
5661 pfm_sessions.pfs_sys_use_dbregs,
5662 pfm_sessions.pfs_ptrace_use_dbregs);
5663
5664 UNLOCK_PFS(flags);
5665
5666 spin_lock(&pfm_buffer_fmt_lock);
5667
5668 list_for_each(pos, &pfm_buffer_fmt_list) {
5669 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
5670 seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
5671 entry->fmt_uuid[0],
5672 entry->fmt_uuid[1],
5673 entry->fmt_uuid[2],
5674 entry->fmt_uuid[3],
5675 entry->fmt_uuid[4],
5676 entry->fmt_uuid[5],
5677 entry->fmt_uuid[6],
5678 entry->fmt_uuid[7],
5679 entry->fmt_uuid[8],
5680 entry->fmt_uuid[9],
5681 entry->fmt_uuid[10],
5682 entry->fmt_uuid[11],
5683 entry->fmt_uuid[12],
5684 entry->fmt_uuid[13],
5685 entry->fmt_uuid[14],
5686 entry->fmt_uuid[15],
5687 entry->fmt_name);
5688 }
5689 spin_unlock(&pfm_buffer_fmt_lock);
5690
5691}
5692
5693static int
5694pfm_proc_show(struct seq_file *m, void *v)
5695{
5696 unsigned long psr;
5697 unsigned int i;
5698 int cpu;
5699
5700 if (v == PFM_PROC_SHOW_HEADER) {
5701 pfm_proc_show_header(m);
5702 return 0;
5703 }
5704
5705 /* show info for CPU (v - 1) */
5706
5707 cpu = (long)v - 1;
5708 seq_printf(m,
5709 "CPU%-2d overflow intrs : %lu\n"
5710 "CPU%-2d overflow cycles : %lu\n"
5711 "CPU%-2d overflow min : %lu\n"
5712 "CPU%-2d overflow max : %lu\n"
5713 "CPU%-2d smpl handler calls : %lu\n"
5714 "CPU%-2d smpl handler cycles : %lu\n"
5715 "CPU%-2d spurious intrs : %lu\n"
5716 "CPU%-2d replay intrs : %lu\n"
5717 "CPU%-2d syst_wide : %d\n"
5718 "CPU%-2d dcr_pp : %d\n"
5719 "CPU%-2d exclude idle : %d\n"
5720 "CPU%-2d owner : %d\n"
5721 "CPU%-2d context : %p\n"
5722 "CPU%-2d activations : %lu\n",
5723 cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
5724 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
5725 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
5726 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
5727 cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
5728 cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
5729 cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
5730 cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
5731 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
5732 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
5733 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
5734 cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
5735 cpu, pfm_get_cpu_data(pmu_ctx, cpu),
5736 cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
5737
5738 if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
5739
5740 psr = pfm_get_psr();
5741
5742 ia64_srlz_d();
5743
5744 seq_printf(m,
5745 "CPU%-2d psr : 0x%lx\n"
5746 "CPU%-2d pmc0 : 0x%lx\n",
5747 cpu, psr,
5748 cpu, ia64_get_pmc(0));
5749
5750 for (i=0; PMC_IS_LAST(i) == 0; i++) {
5751 if (PMC_IS_COUNTING(i) == 0) continue;
5752 seq_printf(m,
5753 "CPU%-2d pmc%u : 0x%lx\n"
5754 "CPU%-2d pmd%u : 0x%lx\n",
5755 cpu, i, ia64_get_pmc(i),
5756 cpu, i, ia64_get_pmd(i));
5757 }
5758 }
5759 return 0;
5760}
5761
a23fe55e 5762const struct seq_operations pfm_seq_ops = {
1da177e4
LT
5763 .start = pfm_proc_start,
5764 .next = pfm_proc_next,
5765 .stop = pfm_proc_stop,
5766 .show = pfm_proc_show
5767};
5768
5769static int
5770pfm_proc_open(struct inode *inode, struct file *file)
5771{
5772 return seq_open(file, &pfm_seq_ops);
5773}
5774
5775
5776/*
5777 * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens
5778 * during pfm_enable() hence before pfm_start(). We cannot assume monitoring
5779 * is active or inactive based on mode. We must rely on the value in
5780 * local_cpu_data->pfm_syst_info
5781 */
5782void
5783pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
5784{
5785 struct pt_regs *regs;
5786 unsigned long dcr;
5787 unsigned long dcr_pp;
5788
5789 dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
5790
5791 /*
5792 * pid 0 is guaranteed to be the idle task. There is one such task with pid 0
5793 * on every CPU, so we can rely on the pid to identify the idle task.
5794 */
5795 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
6450578f 5796 regs = task_pt_regs(task);
1da177e4
LT
5797 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
5798 return;
5799 }
5800 /*
5801 * if monitoring has started
5802 */
5803 if (dcr_pp) {
5804 dcr = ia64_getreg(_IA64_REG_CR_DCR);
5805 /*
5806 * context switching in?
5807 */
5808 if (is_ctxswin) {
5809 /* mask monitoring for the idle task */
5810 ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
5811 pfm_clear_psr_pp();
5812 ia64_srlz_i();
5813 return;
5814 }
5815 /*
5816 * context switching out
5817 * restore monitoring for next task
5818 *
5819 * Due to inlining this odd if-then-else construction generates
5820 * better code.
5821 */
5822 ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
5823 pfm_set_psr_pp();
5824 ia64_srlz_i();
5825 }
5826}
5827
5828#ifdef CONFIG_SMP
5829
5830static void
5831pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
5832{
5833 struct task_struct *task = ctx->ctx_task;
5834
5835 ia64_psr(regs)->up = 0;
5836 ia64_psr(regs)->sp = 1;
5837
5838 if (GET_PMU_OWNER() == task) {
19c5870c
AD
5839 DPRINT(("cleared ownership for [%d]\n",
5840 task_pid_nr(ctx->ctx_task)));
1da177e4
LT
5841 SET_PMU_OWNER(NULL, NULL);
5842 }
5843
5844 /*
5845 * disconnect the task from the context and vice-versa
5846 */
5847 PFM_SET_WORK_PENDING(task, 0);
5848
5849 task->thread.pfm_context = NULL;
5850 task->thread.flags &= ~IA64_THREAD_PM_VALID;
5851
19c5870c 5852 DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
1da177e4
LT
5853}
5854
5855
5856/*
5857 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
5858 */
5859void
5860pfm_save_regs(struct task_struct *task)
5861{
5862 pfm_context_t *ctx;
1da177e4
LT
5863 unsigned long flags;
5864 u64 psr;
5865
5866
5867 ctx = PFM_GET_CTX(task);
5868 if (ctx == NULL) return;
1da177e4
LT
5869
5870 /*
5871 * we always come here with interrupts ALREADY disabled by
5872 * the scheduler. So we simply need to protect against concurrent
5873 * access, not CPU concurrency.
5874 */
5875 flags = pfm_protect_ctx_ctxsw(ctx);
5876
5877 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
6450578f 5878 struct pt_regs *regs = task_pt_regs(task);
1da177e4
LT
5879
5880 pfm_clear_psr_up();
5881
5882 pfm_force_cleanup(ctx, regs);
5883
5884 BUG_ON(ctx->ctx_smpl_hdr);
5885
5886 pfm_unprotect_ctx_ctxsw(ctx, flags);
5887
5888 pfm_context_free(ctx);
5889 return;
5890 }
5891
5892 /*
5893 * save current PSR: needed because we modify it
5894 */
5895 ia64_srlz_d();
5896 psr = pfm_get_psr();
5897
5898 BUG_ON(psr & (IA64_PSR_I));
5899
5900 /*
5901 * stop monitoring:
5902 * This is the last instruction which may generate an overflow
5903 *
5904 * We do not need to set psr.sp because, it is irrelevant in kernel.
5905 * It will be restored from ipsr when going back to user level
5906 */
5907 pfm_clear_psr_up();
5908
5909 /*
5910 * keep a copy of psr.up (for reload)
5911 */
5912 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5913
5914 /*
5915 * release ownership of this PMU.
5916 * PM interrupts are masked, so nothing
5917 * can happen.
5918 */
5919 SET_PMU_OWNER(NULL, NULL);
5920
5921 /*
5922 * we systematically save the PMD as we have no
5923 * guarantee we will be schedule at that same
5924 * CPU again.
5925 */
35589a8f 5926 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
1da177e4
LT
5927
5928 /*
5929 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
5930 * we will need it on the restore path to check
5931 * for pending overflow.
5932 */
35589a8f 5933 ctx->th_pmcs[0] = ia64_get_pmc(0);
1da177e4
LT
5934
5935 /*
5936 * unfreeze PMU if had pending overflows
5937 */
35589a8f 5938 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
1da177e4
LT
5939
5940 /*
5941 * finally, allow context access.
5942 * interrupts will still be masked after this call.
5943 */
5944 pfm_unprotect_ctx_ctxsw(ctx, flags);
5945}
5946
5947#else /* !CONFIG_SMP */
5948void
5949pfm_save_regs(struct task_struct *task)
5950{
5951 pfm_context_t *ctx;
5952 u64 psr;
5953
5954 ctx = PFM_GET_CTX(task);
5955 if (ctx == NULL) return;
5956
5957 /*
5958 * save current PSR: needed because we modify it
5959 */
5960 psr = pfm_get_psr();
5961
5962 BUG_ON(psr & (IA64_PSR_I));
5963
5964 /*
5965 * stop monitoring:
5966 * This is the last instruction which may generate an overflow
5967 *
5968 * We do not need to set psr.sp because, it is irrelevant in kernel.
5969 * It will be restored from ipsr when going back to user level
5970 */
5971 pfm_clear_psr_up();
5972
5973 /*
5974 * keep a copy of psr.up (for reload)
5975 */
5976 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5977}
5978
5979static void
5980pfm_lazy_save_regs (struct task_struct *task)
5981{
5982 pfm_context_t *ctx;
1da177e4
LT
5983 unsigned long flags;
5984
5985 { u64 psr = pfm_get_psr();
5986 BUG_ON(psr & IA64_PSR_UP);
5987 }
5988
5989 ctx = PFM_GET_CTX(task);
1da177e4
LT
5990
5991 /*
5992 * we need to mask PMU overflow here to
5993 * make sure that we maintain pmc0 until
5994 * we save it. overflow interrupts are
5995 * treated as spurious if there is no
5996 * owner.
5997 *
5998 * XXX: I don't think this is necessary
5999 */
6000 PROTECT_CTX(ctx,flags);
6001
6002 /*
6003 * release ownership of this PMU.
6004 * must be done before we save the registers.
6005 *
6006 * after this call any PMU interrupt is treated
6007 * as spurious.
6008 */
6009 SET_PMU_OWNER(NULL, NULL);
6010
6011 /*
6012 * save all the pmds we use
6013 */
35589a8f 6014 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
1da177e4
LT
6015
6016 /*
6017 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
6018 * it is needed to check for pended overflow
6019 * on the restore path
6020 */
35589a8f 6021 ctx->th_pmcs[0] = ia64_get_pmc(0);
1da177e4
LT
6022
6023 /*
6024 * unfreeze PMU if had pending overflows
6025 */
35589a8f 6026 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
1da177e4
LT
6027
6028 /*
6029 * now get can unmask PMU interrupts, they will
6030 * be treated as purely spurious and we will not
6031 * lose any information
6032 */
6033 UNPROTECT_CTX(ctx,flags);
6034}
6035#endif /* CONFIG_SMP */
6036
6037#ifdef CONFIG_SMP
6038/*
6039 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
6040 */
6041void
6042pfm_load_regs (struct task_struct *task)
6043{
6044 pfm_context_t *ctx;
1da177e4
LT
6045 unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
6046 unsigned long flags;
6047 u64 psr, psr_up;
6048 int need_irq_resend;
6049
6050 ctx = PFM_GET_CTX(task);
6051 if (unlikely(ctx == NULL)) return;
6052
6053 BUG_ON(GET_PMU_OWNER());
6054
1da177e4
LT
6055 /*
6056 * possible on unload
6057 */
35589a8f 6058 if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
1da177e4
LT
6059
6060 /*
6061 * we always come here with interrupts ALREADY disabled by
6062 * the scheduler. So we simply need to protect against concurrent
6063 * access, not CPU concurrency.
6064 */
6065 flags = pfm_protect_ctx_ctxsw(ctx);
6066 psr = pfm_get_psr();
6067
6068 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6069
6070 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6071 BUG_ON(psr & IA64_PSR_I);
6072
6073 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
6450578f 6074 struct pt_regs *regs = task_pt_regs(task);
1da177e4
LT
6075
6076 BUG_ON(ctx->ctx_smpl_hdr);
6077
6078 pfm_force_cleanup(ctx, regs);
6079
6080 pfm_unprotect_ctx_ctxsw(ctx, flags);
6081
6082 /*
6083 * this one (kmalloc'ed) is fine with interrupts disabled
6084 */
6085 pfm_context_free(ctx);
6086
6087 return;
6088 }
6089
6090 /*
6091 * we restore ALL the debug registers to avoid picking up
6092 * stale state.
6093 */
6094 if (ctx->ctx_fl_using_dbreg) {
6095 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6096 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6097 }
6098 /*
6099 * retrieve saved psr.up
6100 */
6101 psr_up = ctx->ctx_saved_psr_up;
6102
6103 /*
6104 * if we were the last user of the PMU on that CPU,
6105 * then nothing to do except restore psr
6106 */
6107 if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
6108
6109 /*
6110 * retrieve partial reload masks (due to user modifications)
6111 */
6112 pmc_mask = ctx->ctx_reload_pmcs[0];
6113 pmd_mask = ctx->ctx_reload_pmds[0];
6114
6115 } else {
6116 /*
6117 * To avoid leaking information to the user level when psr.sp=0,
6118 * we must reload ALL implemented pmds (even the ones we don't use).
6119 * In the kernel we only allow PFM_READ_PMDS on registers which
6120 * we initialized or requested (sampling) so there is no risk there.
6121 */
6122 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6123
6124 /*
6125 * ALL accessible PMCs are systematically reloaded, unused registers
6126 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6127 * up stale configuration.
6128 *
6129 * PMC0 is never in the mask. It is always restored separately.
6130 */
6131 pmc_mask = ctx->ctx_all_pmcs[0];
6132 }
6133 /*
6134 * when context is MASKED, we will restore PMC with plm=0
6135 * and PMD with stale information, but that's ok, nothing
6136 * will be captured.
6137 *
6138 * XXX: optimize here
6139 */
35589a8f
KA
6140 if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6141 if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
1da177e4
LT
6142
6143 /*
6144 * check for pending overflow at the time the state
6145 * was saved.
6146 */
35589a8f 6147 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
1da177e4
LT
6148 /*
6149 * reload pmc0 with the overflow information
6150 * On McKinley PMU, this will trigger a PMU interrupt
6151 */
35589a8f 6152 ia64_set_pmc(0, ctx->th_pmcs[0]);
1da177e4 6153 ia64_srlz_d();
35589a8f 6154 ctx->th_pmcs[0] = 0UL;
1da177e4
LT
6155
6156 /*
6157 * will replay the PMU interrupt
6158 */
c0ad90a3 6159 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
1da177e4
LT
6160
6161 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6162 }
6163
6164 /*
6165 * we just did a reload, so we reset the partial reload fields
6166 */
6167 ctx->ctx_reload_pmcs[0] = 0UL;
6168 ctx->ctx_reload_pmds[0] = 0UL;
6169
6170 SET_LAST_CPU(ctx, smp_processor_id());
6171
6172 /*
6173 * dump activation value for this PMU
6174 */
6175 INC_ACTIVATION();
6176 /*
6177 * record current activation for this context
6178 */
6179 SET_ACTIVATION(ctx);
6180
6181 /*
6182 * establish new ownership.
6183 */
6184 SET_PMU_OWNER(task, ctx);
6185
6186 /*
6187 * restore the psr.up bit. measurement
6188 * is active again.
6189 * no PMU interrupt can happen at this point
6190 * because we still have interrupts disabled.
6191 */
6192 if (likely(psr_up)) pfm_set_psr_up();
6193
6194 /*
6195 * allow concurrent access to context
6196 */
6197 pfm_unprotect_ctx_ctxsw(ctx, flags);
6198}
6199#else /* !CONFIG_SMP */
6200/*
6201 * reload PMU state for UP kernels
6202 * in 2.5 we come here with interrupts disabled
6203 */
6204void
6205pfm_load_regs (struct task_struct *task)
6206{
1da177e4
LT
6207 pfm_context_t *ctx;
6208 struct task_struct *owner;
6209 unsigned long pmd_mask, pmc_mask;
6210 u64 psr, psr_up;
6211 int need_irq_resend;
6212
6213 owner = GET_PMU_OWNER();
6214 ctx = PFM_GET_CTX(task);
1da177e4
LT
6215 psr = pfm_get_psr();
6216
6217 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6218 BUG_ON(psr & IA64_PSR_I);
6219
6220 /*
6221 * we restore ALL the debug registers to avoid picking up
6222 * stale state.
6223 *
6224 * This must be done even when the task is still the owner
6225 * as the registers may have been modified via ptrace()
6226 * (not perfmon) by the previous task.
6227 */
6228 if (ctx->ctx_fl_using_dbreg) {
6229 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6230 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6231 }
6232
6233 /*
6234 * retrieved saved psr.up
6235 */
6236 psr_up = ctx->ctx_saved_psr_up;
6237 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6238
6239 /*
6240 * short path, our state is still there, just
6241 * need to restore psr and we go
6242 *
6243 * we do not touch either PMC nor PMD. the psr is not touched
6244 * by the overflow_handler. So we are safe w.r.t. to interrupt
6245 * concurrency even without interrupt masking.
6246 */
6247 if (likely(owner == task)) {
6248 if (likely(psr_up)) pfm_set_psr_up();
6249 return;
6250 }
6251
6252 /*
6253 * someone else is still using the PMU, first push it out and
6254 * then we'll be able to install our stuff !
6255 *
6256 * Upon return, there will be no owner for the current PMU
6257 */
6258 if (owner) pfm_lazy_save_regs(owner);
6259
6260 /*
6261 * To avoid leaking information to the user level when psr.sp=0,
6262 * we must reload ALL implemented pmds (even the ones we don't use).
6263 * In the kernel we only allow PFM_READ_PMDS on registers which
6264 * we initialized or requested (sampling) so there is no risk there.
6265 */
6266 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6267
6268 /*
6269 * ALL accessible PMCs are systematically reloaded, unused registers
6270 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6271 * up stale configuration.
6272 *
6273 * PMC0 is never in the mask. It is always restored separately
6274 */
6275 pmc_mask = ctx->ctx_all_pmcs[0];
6276
35589a8f
KA
6277 pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6278 pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
1da177e4
LT
6279
6280 /*
6281 * check for pending overflow at the time the state
6282 * was saved.
6283 */
35589a8f 6284 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
1da177e4
LT
6285 /*
6286 * reload pmc0 with the overflow information
6287 * On McKinley PMU, this will trigger a PMU interrupt
6288 */
35589a8f 6289 ia64_set_pmc(0, ctx->th_pmcs[0]);
1da177e4
LT
6290 ia64_srlz_d();
6291
35589a8f 6292 ctx->th_pmcs[0] = 0UL;
1da177e4
LT
6293
6294 /*
6295 * will replay the PMU interrupt
6296 */
c0ad90a3 6297 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
1da177e4
LT
6298
6299 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6300 }
6301
6302 /*
6303 * establish new ownership.
6304 */
6305 SET_PMU_OWNER(task, ctx);
6306
6307 /*
6308 * restore the psr.up bit. measurement
6309 * is active again.
6310 * no PMU interrupt can happen at this point
6311 * because we still have interrupts disabled.
6312 */
6313 if (likely(psr_up)) pfm_set_psr_up();
6314}
6315#endif /* CONFIG_SMP */
6316
6317/*
6318 * this function assumes monitoring is stopped
6319 */
6320static void
6321pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6322{
6323 u64 pmc0;
6324 unsigned long mask2, val, pmd_val, ovfl_val;
6325 int i, can_access_pmu = 0;
6326 int is_self;
6327
6328 /*
6329 * is the caller the task being monitored (or which initiated the
6330 * session for system wide measurements)
6331 */
6332 is_self = ctx->ctx_task == task ? 1 : 0;
6333
6334 /*
6335 * can access PMU is task is the owner of the PMU state on the current CPU
6336 * or if we are running on the CPU bound to the context in system-wide mode
6337 * (that is not necessarily the task the context is attached to in this mode).
6338 * In system-wide we always have can_access_pmu true because a task running on an
6339 * invalid processor is flagged earlier in the call stack (see pfm_stop).
6340 */
6341 can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
6342 if (can_access_pmu) {
6343 /*
6344 * Mark the PMU as not owned
6345 * This will cause the interrupt handler to do nothing in case an overflow
6346 * interrupt was in-flight
6347 * This also guarantees that pmc0 will contain the final state
6348 * It virtually gives us full control on overflow processing from that point
6349 * on.
6350 */
6351 SET_PMU_OWNER(NULL, NULL);
6352 DPRINT(("releasing ownership\n"));
6353
6354 /*
6355 * read current overflow status:
6356 *
6357 * we are guaranteed to read the final stable state
6358 */
6359 ia64_srlz_d();
6360 pmc0 = ia64_get_pmc(0); /* slow */
6361
6362 /*
6363 * reset freeze bit, overflow status information destroyed
6364 */
6365 pfm_unfreeze_pmu();
6366 } else {
35589a8f 6367 pmc0 = ctx->th_pmcs[0];
1da177e4
LT
6368 /*
6369 * clear whatever overflow status bits there were
6370 */
35589a8f 6371 ctx->th_pmcs[0] = 0;
1da177e4
LT
6372 }
6373 ovfl_val = pmu_conf->ovfl_val;
6374 /*
6375 * we save all the used pmds
6376 * we take care of overflows for counting PMDs
6377 *
6378 * XXX: sampling situation is not taken into account here
6379 */
6380 mask2 = ctx->ctx_used_pmds[0];
6381
6382 DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
6383
6384 for (i = 0; mask2; i++, mask2>>=1) {
6385
6386 /* skip non used pmds */
6387 if ((mask2 & 0x1) == 0) continue;
6388
6389 /*
6390 * can access PMU always true in system wide mode
6391 */
35589a8f 6392 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
1da177e4
LT
6393
6394 if (PMD_IS_COUNTING(i)) {
6395 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
19c5870c 6396 task_pid_nr(task),
1da177e4
LT
6397 i,
6398 ctx->ctx_pmds[i].val,
6399 val & ovfl_val));
6400
6401 /*
6402 * we rebuild the full 64 bit value of the counter
6403 */
6404 val = ctx->ctx_pmds[i].val + (val & ovfl_val);
6405
6406 /*
6407 * now everything is in ctx_pmds[] and we need
6408 * to clear the saved context from save_regs() such that
6409 * pfm_read_pmds() gets the correct value
6410 */
6411 pmd_val = 0UL;
6412
6413 /*
6414 * take care of overflow inline
6415 */
6416 if (pmc0 & (1UL << i)) {
6417 val += 1 + ovfl_val;
19c5870c 6418 DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
1da177e4
LT
6419 }
6420 }
6421
19c5870c 6422 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
1da177e4 6423
35589a8f 6424 if (is_self) ctx->th_pmds[i] = pmd_val;
1da177e4
LT
6425
6426 ctx->ctx_pmds[i].val = val;
6427 }
6428}
6429
6430static struct irqaction perfmon_irqaction = {
6431 .handler = pfm_interrupt_handler,
121a4226 6432 .flags = IRQF_DISABLED,
1da177e4
LT
6433 .name = "perfmon"
6434};
6435
a1ecf7f6
TL
6436static void
6437pfm_alt_save_pmu_state(void *data)
6438{
6439 struct pt_regs *regs;
6440
6450578f 6441 regs = task_pt_regs(current);
a1ecf7f6
TL
6442
6443 DPRINT(("called\n"));
6444
6445 /*
6446 * should not be necessary but
6447 * let's take not risk
6448 */
6449 pfm_clear_psr_up();
6450 pfm_clear_psr_pp();
6451 ia64_psr(regs)->pp = 0;
6452
6453 /*
6454 * This call is required
6455 * May cause a spurious interrupt on some processors
6456 */
6457 pfm_freeze_pmu();
6458
6459 ia64_srlz_d();
6460}
6461
6462void
6463pfm_alt_restore_pmu_state(void *data)
6464{
6465 struct pt_regs *regs;
6466
6450578f 6467 regs = task_pt_regs(current);
a1ecf7f6
TL
6468
6469 DPRINT(("called\n"));
6470
6471 /*
6472 * put PMU back in state expected
6473 * by perfmon
6474 */
6475 pfm_clear_psr_up();
6476 pfm_clear_psr_pp();
6477 ia64_psr(regs)->pp = 0;
6478
6479 /*
6480 * perfmon runs with PMU unfrozen at all times
6481 */
6482 pfm_unfreeze_pmu();
6483
6484 ia64_srlz_d();
6485}
6486
6487int
6488pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6489{
6490 int ret, i;
6491 int reserve_cpu;
6492
6493 /* some sanity checks */
6494 if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
6495
6496 /* do the easy test first */
6497 if (pfm_alt_intr_handler) return -EBUSY;
6498
6499 /* one at a time in the install or remove, just fail the others */
6500 if (!spin_trylock(&pfm_alt_install_check)) {
6501 return -EBUSY;
6502 }
6503
6504 /* reserve our session */
6505 for_each_online_cpu(reserve_cpu) {
6506 ret = pfm_reserve_session(NULL, 1, reserve_cpu);
6507 if (ret) goto cleanup_reserve;
6508 }
6509
6510 /* save the current system wide pmu states */
15c8b6c1 6511 ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
a1ecf7f6
TL
6512 if (ret) {
6513 DPRINT(("on_each_cpu() failed: %d\n", ret));
6514 goto cleanup_reserve;
6515 }
6516
6517 /* officially change to the alternate interrupt handler */
6518 pfm_alt_intr_handler = hdl;
6519
6520 spin_unlock(&pfm_alt_install_check);
6521
6522 return 0;
6523
6524cleanup_reserve:
6525 for_each_online_cpu(i) {
6526 /* don't unreserve more than we reserved */
6527 if (i >= reserve_cpu) break;
6528
6529 pfm_unreserve_session(NULL, 1, i);
6530 }
6531
6532 spin_unlock(&pfm_alt_install_check);
6533
6534 return ret;
6535}
6536EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
6537
6538int
6539pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6540{
6541 int i;
6542 int ret;
6543
6544 if (hdl == NULL) return -EINVAL;
6545
6546 /* cannot remove someone else's handler! */
6547 if (pfm_alt_intr_handler != hdl) return -EINVAL;
6548
6549 /* one at a time in the install or remove, just fail the others */
6550 if (!spin_trylock(&pfm_alt_install_check)) {
6551 return -EBUSY;
6552 }
6553
6554 pfm_alt_intr_handler = NULL;
6555
15c8b6c1 6556 ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
a1ecf7f6
TL
6557 if (ret) {
6558 DPRINT(("on_each_cpu() failed: %d\n", ret));
6559 }
6560
6561 for_each_online_cpu(i) {
6562 pfm_unreserve_session(NULL, 1, i);
6563 }
6564
6565 spin_unlock(&pfm_alt_install_check);
6566
6567 return 0;
6568}
6569EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
6570
1da177e4
LT
6571/*
6572 * perfmon initialization routine, called from the initcall() table
6573 */
6574static int init_pfm_fs(void);
6575
6576static int __init
6577pfm_probe_pmu(void)
6578{
6579 pmu_config_t **p;
6580 int family;
6581
6582 family = local_cpu_data->family;
6583 p = pmu_confs;
6584
6585 while(*p) {
6586 if ((*p)->probe) {
6587 if ((*p)->probe() == 0) goto found;
6588 } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
6589 goto found;
6590 }
6591 p++;
6592 }
6593 return -1;
6594found:
6595 pmu_conf = *p;
6596 return 0;
6597}
6598
5dfe4c96 6599static const struct file_operations pfm_proc_fops = {
1da177e4
LT
6600 .open = pfm_proc_open,
6601 .read = seq_read,
6602 .llseek = seq_lseek,
6603 .release = seq_release,
6604};
6605
6606int __init
6607pfm_init(void)
6608{
6609 unsigned int n, n_counters, i;
6610
6611 printk("perfmon: version %u.%u IRQ %u\n",
6612 PFM_VERSION_MAJ,
6613 PFM_VERSION_MIN,
6614 IA64_PERFMON_VECTOR);
6615
6616 if (pfm_probe_pmu()) {
6617 printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
6618 local_cpu_data->family);
6619 return -ENODEV;
6620 }
6621
6622 /*
6623 * compute the number of implemented PMD/PMC from the
6624 * description tables
6625 */
6626 n = 0;
6627 for (i=0; PMC_IS_LAST(i) == 0; i++) {
6628 if (PMC_IS_IMPL(i) == 0) continue;
6629 pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
6630 n++;
6631 }
6632 pmu_conf->num_pmcs = n;
6633
6634 n = 0; n_counters = 0;
6635 for (i=0; PMD_IS_LAST(i) == 0; i++) {
6636 if (PMD_IS_IMPL(i) == 0) continue;
6637 pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
6638 n++;
6639 if (PMD_IS_COUNTING(i)) n_counters++;
6640 }
6641 pmu_conf->num_pmds = n;
6642 pmu_conf->num_counters = n_counters;
6643
6644 /*
6645 * sanity checks on the number of debug registers
6646 */
6647 if (pmu_conf->use_rr_dbregs) {
6648 if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
6649 printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
6650 pmu_conf = NULL;
6651 return -1;
6652 }
6653 if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
6654 printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
6655 pmu_conf = NULL;
6656 return -1;
6657 }
6658 }
6659
6660 printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
6661 pmu_conf->pmu_name,
6662 pmu_conf->num_pmcs,
6663 pmu_conf->num_pmds,
6664 pmu_conf->num_counters,
6665 ffz(pmu_conf->ovfl_val));
6666
6667 /* sanity check */
35589a8f 6668 if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
1da177e4
LT
6669 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
6670 pmu_conf = NULL;
6671 return -1;
6672 }
6673
6674 /*
6675 * create /proc/perfmon (mostly for debugging purposes)
6676 */
e2363768 6677 perfmon_dir = proc_create("perfmon", S_IRUGO, NULL, &pfm_proc_fops);
1da177e4
LT
6678 if (perfmon_dir == NULL) {
6679 printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
6680 pmu_conf = NULL;
6681 return -1;
6682 }
1da177e4
LT
6683
6684 /*
6685 * create /proc/sys/kernel/perfmon (for debugging purposes)
6686 */
0b4d4147 6687 pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
1da177e4
LT
6688
6689 /*
6690 * initialize all our spinlocks
6691 */
6692 spin_lock_init(&pfm_sessions.pfs_lock);
6693 spin_lock_init(&pfm_buffer_fmt_lock);
6694
6695 init_pfm_fs();
6696
6697 for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
6698
6699 return 0;
6700}
6701
6702__initcall(pfm_init);
6703
6704/*
6705 * this function is called before pfm_init()
6706 */
6707void
6708pfm_init_percpu (void)
6709{
ff741906 6710 static int first_time=1;
1da177e4
LT
6711 /*
6712 * make sure no measurement is active
6713 * (may inherit programmed PMCs from EFI).
6714 */
6715 pfm_clear_psr_pp();
6716 pfm_clear_psr_up();
6717
6718 /*
6719 * we run with the PMU not frozen at all times
6720 */
6721 pfm_unfreeze_pmu();
6722
ff741906 6723 if (first_time) {
1da177e4 6724 register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
ff741906
AR
6725 first_time=0;
6726 }
1da177e4
LT
6727
6728 ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
6729 ia64_srlz_d();
6730}
6731
6732/*
6733 * used for debug purposes only
6734 */
6735void
6736dump_pmu_state(const char *from)
6737{
6738 struct task_struct *task;
1da177e4
LT
6739 struct pt_regs *regs;
6740 pfm_context_t *ctx;
6741 unsigned long psr, dcr, info, flags;
6742 int i, this_cpu;
6743
6744 local_irq_save(flags);
6745
6746 this_cpu = smp_processor_id();
6450578f 6747 regs = task_pt_regs(current);
1da177e4
LT
6748 info = PFM_CPUINFO_GET();
6749 dcr = ia64_getreg(_IA64_REG_CR_DCR);
6750
6751 if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
6752 local_irq_restore(flags);
6753 return;
6754 }
6755
6756 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6757 this_cpu,
6758 from,
19c5870c 6759 task_pid_nr(current),
1da177e4
LT
6760 regs->cr_iip,
6761 current->comm);
6762
6763 task = GET_PMU_OWNER();
6764 ctx = GET_PMU_CTX();
6765
19c5870c 6766 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
1da177e4
LT
6767
6768 psr = pfm_get_psr();
6769
6770 printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
6771 this_cpu,
6772 ia64_get_pmc(0),
6773 psr & IA64_PSR_PP ? 1 : 0,
6774 psr & IA64_PSR_UP ? 1 : 0,
6775 dcr & IA64_DCR_PP ? 1 : 0,
6776 info,
6777 ia64_psr(regs)->up,
6778 ia64_psr(regs)->pp);
6779
6780 ia64_psr(regs)->up = 0;
6781 ia64_psr(regs)->pp = 0;
6782
1da177e4
LT
6783 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6784 if (PMC_IS_IMPL(i) == 0) continue;
35589a8f 6785 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
1da177e4
LT
6786 }
6787
6788 for (i=1; PMD_IS_LAST(i) == 0; i++) {
6789 if (PMD_IS_IMPL(i) == 0) continue;
35589a8f 6790 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
1da177e4
LT
6791 }
6792
6793 if (ctx) {
6794 printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
6795 this_cpu,
6796 ctx->ctx_state,
6797 ctx->ctx_smpl_vaddr,
6798 ctx->ctx_smpl_hdr,
6799 ctx->ctx_msgq_head,
6800 ctx->ctx_msgq_tail,
6801 ctx->ctx_saved_psr_up);
6802 }
6803 local_irq_restore(flags);
6804}
6805
6806/*
6807 * called from process.c:copy_thread(). task is new child.
6808 */
6809void
6810pfm_inherit(struct task_struct *task, struct pt_regs *regs)
6811{
6812 struct thread_struct *thread;
6813
19c5870c 6814 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
1da177e4
LT
6815
6816 thread = &task->thread;
6817
6818 /*
6819 * cut links inherited from parent (current)
6820 */
6821 thread->pfm_context = NULL;
6822
6823 PFM_SET_WORK_PENDING(task, 0);
6824
6825 /*
6826 * the psr bits are already set properly in copy_threads()
6827 */
6828}
6829#else /* !CONFIG_PERFMON */
6830asmlinkage long
6831sys_perfmonctl (int fd, int cmd, void *arg, int count)
6832{
6833 return -ENOSYS;
6834}
6835#endif /* CONFIG_PERFMON */