]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/ia64/kernel/perfmon.c
[IA64] honor notify_die() returning NOTIFY_STOP
[net-next-2.6.git] / arch / ia64 / kernel / perfmon.c
CommitLineData
1da177e4
LT
1/*
2 * This file implements the perfmon-2 subsystem which is used
3 * to program the IA-64 Performance Monitoring Unit (PMU).
4 *
5 * The initial version of perfmon.c was written by
6 * Ganesh Venkitachalam, IBM Corp.
7 *
8 * Then it was modified for perfmon-1.x by Stephane Eranian and
9 * David Mosberger, Hewlett Packard Co.
10 *
11 * Version Perfmon-2.x is a rewrite of perfmon-1.x
12 * by Stephane Eranian, Hewlett Packard Co.
13 *
a1ecf7f6 14 * Copyright (C) 1999-2005 Hewlett Packard Co
1da177e4
LT
15 * Stephane Eranian <eranian@hpl.hp.com>
16 * David Mosberger-Tang <davidm@hpl.hp.com>
17 *
18 * More information about perfmon available at:
19 * http://www.hpl.hp.com/research/linux/perfmon
20 */
21
1da177e4
LT
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/interrupt.h>
1da177e4
LT
26#include <linux/proc_fs.h>
27#include <linux/seq_file.h>
28#include <linux/init.h>
29#include <linux/vmalloc.h>
30#include <linux/mm.h>
31#include <linux/sysctl.h>
32#include <linux/list.h>
33#include <linux/file.h>
34#include <linux/poll.h>
35#include <linux/vfs.h>
a3bc0dbc 36#include <linux/smp.h>
1da177e4
LT
37#include <linux/pagemap.h>
38#include <linux/mount.h>
1da177e4 39#include <linux/bitops.h>
a9415644 40#include <linux/capability.h>
badf1662 41#include <linux/rcupdate.h>
60f1c444 42#include <linux/completion.h>
1da177e4
LT
43
44#include <asm/errno.h>
45#include <asm/intrinsics.h>
46#include <asm/page.h>
47#include <asm/perfmon.h>
48#include <asm/processor.h>
49#include <asm/signal.h>
50#include <asm/system.h>
51#include <asm/uaccess.h>
52#include <asm/delay.h>
53
54#ifdef CONFIG_PERFMON
55/*
56 * perfmon context state
57 */
58#define PFM_CTX_UNLOADED 1 /* context is not loaded onto any task */
59#define PFM_CTX_LOADED 2 /* context is loaded onto a task */
60#define PFM_CTX_MASKED 3 /* context is loaded but monitoring is masked due to overflow */
61#define PFM_CTX_ZOMBIE 4 /* owner of the context is closing it */
62
63#define PFM_INVALID_ACTIVATION (~0UL)
64
35589a8f
KA
65#define PFM_NUM_PMC_REGS 64 /* PMC save area for ctxsw */
66#define PFM_NUM_PMD_REGS 64 /* PMD save area for ctxsw */
67
1da177e4
LT
68/*
69 * depth of message queue
70 */
71#define PFM_MAX_MSGS 32
72#define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
73
74/*
75 * type of a PMU register (bitmask).
76 * bitmask structure:
77 * bit0 : register implemented
78 * bit1 : end marker
79 * bit2-3 : reserved
80 * bit4 : pmc has pmc.pm
81 * bit5 : pmc controls a counter (has pmc.oi), pmd is used as counter
82 * bit6-7 : register type
83 * bit8-31: reserved
84 */
85#define PFM_REG_NOTIMPL 0x0 /* not implemented at all */
86#define PFM_REG_IMPL 0x1 /* register implemented */
87#define PFM_REG_END 0x2 /* end marker */
88#define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
89#define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR) /* a monitor + pmc.oi+ PMD used as a counter */
90#define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL) /* PMU control register */
91#define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL) /* configuration register */
92#define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL) /* PMD used as buffer */
93
94#define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
95#define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
96
97#define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
98
99/* i assumed unsigned */
100#define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
101#define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
102
103/* XXX: these assume that register i is implemented */
104#define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
105#define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
106#define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
107#define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
108
109#define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
110#define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
111#define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
112#define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
113
114#define PFM_NUM_IBRS IA64_NUM_DBG_REGS
115#define PFM_NUM_DBRS IA64_NUM_DBG_REGS
116
117#define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
118#define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
119#define PFM_CTX_TASK(h) (h)->ctx_task
120
121#define PMU_PMC_OI 5 /* position of pmc.oi bit */
122
123/* XXX: does not support more than 64 PMDs */
124#define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
125#define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
126
127#define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
128
129#define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
130#define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
131#define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
132#define PFM_CODE_RR 0 /* requesting code range restriction */
133#define PFM_DATA_RR 1 /* requestion data range restriction */
134
135#define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
136#define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
137#define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
138
139#define RDEP(x) (1UL<<(x))
140
141/*
142 * context protection macros
143 * in SMP:
144 * - we need to protect against CPU concurrency (spin_lock)
145 * - we need to protect against PMU overflow interrupts (local_irq_disable)
146 * in UP:
147 * - we need to protect against PMU overflow interrupts (local_irq_disable)
148 *
85d1fe09 149 * spin_lock_irqsave()/spin_unlock_irqrestore():
1da177e4
LT
150 * in SMP: local_irq_disable + spin_lock
151 * in UP : local_irq_disable
152 *
153 * spin_lock()/spin_lock():
154 * in UP : removed automatically
155 * in SMP: protect against context accesses from other CPU. interrupts
156 * are not masked. This is useful for the PMU interrupt handler
157 * because we know we will not get PMU concurrency in that code.
158 */
159#define PROTECT_CTX(c, f) \
160 do { \
19c5870c 161 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
1da177e4 162 spin_lock_irqsave(&(c)->ctx_lock, f); \
19c5870c 163 DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
1da177e4
LT
164 } while(0)
165
166#define UNPROTECT_CTX(c, f) \
167 do { \
19c5870c 168 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
1da177e4
LT
169 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
170 } while(0)
171
172#define PROTECT_CTX_NOPRINT(c, f) \
173 do { \
174 spin_lock_irqsave(&(c)->ctx_lock, f); \
175 } while(0)
176
177
178#define UNPROTECT_CTX_NOPRINT(c, f) \
179 do { \
180 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
181 } while(0)
182
183
184#define PROTECT_CTX_NOIRQ(c) \
185 do { \
186 spin_lock(&(c)->ctx_lock); \
187 } while(0)
188
189#define UNPROTECT_CTX_NOIRQ(c) \
190 do { \
191 spin_unlock(&(c)->ctx_lock); \
192 } while(0)
193
194
195#ifdef CONFIG_SMP
196
197#define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
198#define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
199#define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
200
201#else /* !CONFIG_SMP */
202#define SET_ACTIVATION(t) do {} while(0)
203#define GET_ACTIVATION(t) do {} while(0)
204#define INC_ACTIVATION(t) do {} while(0)
205#endif /* CONFIG_SMP */
206
207#define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
208#define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
209#define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
210
211#define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
212#define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
213
214#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
215
216/*
217 * cmp0 must be the value of pmc0
218 */
219#define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
220
221#define PFMFS_MAGIC 0xa0b4d889
222
223/*
224 * debugging
225 */
226#define PFM_DEBUGGING 1
227#ifdef PFM_DEBUGGING
228#define DPRINT(a) \
229 do { \
19c5870c 230 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
1da177e4
LT
231 } while (0)
232
233#define DPRINT_ovfl(a) \
234 do { \
19c5870c 235 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
1da177e4
LT
236 } while (0)
237#endif
238
239/*
240 * 64-bit software counter structure
241 *
242 * the next_reset_type is applied to the next call to pfm_reset_regs()
243 */
244typedef struct {
245 unsigned long val; /* virtual 64bit counter value */
246 unsigned long lval; /* last reset value */
247 unsigned long long_reset; /* reset value on sampling overflow */
248 unsigned long short_reset; /* reset value on overflow */
249 unsigned long reset_pmds[4]; /* which other pmds to reset when this counter overflows */
250 unsigned long smpl_pmds[4]; /* which pmds are accessed when counter overflow */
251 unsigned long seed; /* seed for random-number generator */
252 unsigned long mask; /* mask for random-number generator */
253 unsigned int flags; /* notify/do not notify */
254 unsigned long eventid; /* overflow event identifier */
255} pfm_counter_t;
256
257/*
258 * context flags
259 */
260typedef struct {
261 unsigned int block:1; /* when 1, task will blocked on user notifications */
262 unsigned int system:1; /* do system wide monitoring */
263 unsigned int using_dbreg:1; /* using range restrictions (debug registers) */
264 unsigned int is_sampling:1; /* true if using a custom format */
265 unsigned int excl_idle:1; /* exclude idle task in system wide session */
266 unsigned int going_zombie:1; /* context is zombie (MASKED+blocking) */
267 unsigned int trap_reason:2; /* reason for going into pfm_handle_work() */
268 unsigned int no_msg:1; /* no message sent on overflow */
269 unsigned int can_restart:1; /* allowed to issue a PFM_RESTART */
270 unsigned int reserved:22;
271} pfm_context_flags_t;
272
273#define PFM_TRAP_REASON_NONE 0x0 /* default value */
274#define PFM_TRAP_REASON_BLOCK 0x1 /* we need to block on overflow */
275#define PFM_TRAP_REASON_RESET 0x2 /* we need to reset PMDs */
276
277
278/*
279 * perfmon context: encapsulates all the state of a monitoring session
280 */
281
282typedef struct pfm_context {
283 spinlock_t ctx_lock; /* context protection */
284
285 pfm_context_flags_t ctx_flags; /* bitmask of flags (block reason incl.) */
286 unsigned int ctx_state; /* state: active/inactive (no bitfield) */
287
288 struct task_struct *ctx_task; /* task to which context is attached */
289
290 unsigned long ctx_ovfl_regs[4]; /* which registers overflowed (notification) */
291
60f1c444 292 struct completion ctx_restart_done; /* use for blocking notification mode */
1da177e4
LT
293
294 unsigned long ctx_used_pmds[4]; /* bitmask of PMD used */
295 unsigned long ctx_all_pmds[4]; /* bitmask of all accessible PMDs */
296 unsigned long ctx_reload_pmds[4]; /* bitmask of force reload PMD on ctxsw in */
297
298 unsigned long ctx_all_pmcs[4]; /* bitmask of all accessible PMCs */
299 unsigned long ctx_reload_pmcs[4]; /* bitmask of force reload PMC on ctxsw in */
300 unsigned long ctx_used_monitors[4]; /* bitmask of monitor PMC being used */
301
35589a8f 302 unsigned long ctx_pmcs[PFM_NUM_PMC_REGS]; /* saved copies of PMC values */
1da177e4
LT
303
304 unsigned int ctx_used_ibrs[1]; /* bitmask of used IBR (speedup ctxsw in) */
305 unsigned int ctx_used_dbrs[1]; /* bitmask of used DBR (speedup ctxsw in) */
306 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS]; /* DBR values (cache) when not loaded */
307 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS]; /* IBR values (cache) when not loaded */
308
35589a8f
KA
309 pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS]; /* software state for PMDS */
310
311 unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */
312 unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */
1da177e4
LT
313
314 u64 ctx_saved_psr_up; /* only contains psr.up value */
315
316 unsigned long ctx_last_activation; /* context last activation number for last_cpu */
317 unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */
318 unsigned int ctx_cpu; /* cpu to which perfmon is applied (system wide) */
319
320 int ctx_fd; /* file descriptor used my this context */
321 pfm_ovfl_arg_t ctx_ovfl_arg; /* argument to custom buffer format handler */
322
323 pfm_buffer_fmt_t *ctx_buf_fmt; /* buffer format callbacks */
324 void *ctx_smpl_hdr; /* points to sampling buffer header kernel vaddr */
325 unsigned long ctx_smpl_size; /* size of sampling buffer */
326 void *ctx_smpl_vaddr; /* user level virtual address of smpl buffer */
327
328 wait_queue_head_t ctx_msgq_wait;
329 pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
330 int ctx_msgq_head;
331 int ctx_msgq_tail;
332 struct fasync_struct *ctx_async_queue;
333
334 wait_queue_head_t ctx_zombieq; /* termination cleanup wait queue */
335} pfm_context_t;
336
337/*
338 * magic number used to verify that structure is really
339 * a perfmon context
340 */
341#define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
342
343#define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
344
345#ifdef CONFIG_SMP
346#define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
347#define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
348#else
349#define SET_LAST_CPU(ctx, v) do {} while(0)
350#define GET_LAST_CPU(ctx) do {} while(0)
351#endif
352
353
354#define ctx_fl_block ctx_flags.block
355#define ctx_fl_system ctx_flags.system
356#define ctx_fl_using_dbreg ctx_flags.using_dbreg
357#define ctx_fl_is_sampling ctx_flags.is_sampling
358#define ctx_fl_excl_idle ctx_flags.excl_idle
359#define ctx_fl_going_zombie ctx_flags.going_zombie
360#define ctx_fl_trap_reason ctx_flags.trap_reason
361#define ctx_fl_no_msg ctx_flags.no_msg
362#define ctx_fl_can_restart ctx_flags.can_restart
363
364#define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
365#define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
366
367/*
368 * global information about all sessions
369 * mostly used to synchronize between system wide and per-process
370 */
371typedef struct {
372 spinlock_t pfs_lock; /* lock the structure */
373
374 unsigned int pfs_task_sessions; /* number of per task sessions */
375 unsigned int pfs_sys_sessions; /* number of per system wide sessions */
376 unsigned int pfs_sys_use_dbregs; /* incremented when a system wide session uses debug regs */
377 unsigned int pfs_ptrace_use_dbregs; /* incremented when a process uses debug regs */
378 struct task_struct *pfs_sys_session[NR_CPUS]; /* point to task owning a system-wide session */
379} pfm_session_t;
380
381/*
382 * information about a PMC or PMD.
383 * dep_pmd[]: a bitmask of dependent PMD registers
384 * dep_pmc[]: a bitmask of dependent PMC registers
385 */
386typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
387typedef struct {
388 unsigned int type;
389 int pm_pos;
390 unsigned long default_value; /* power-on default value */
391 unsigned long reserved_mask; /* bitmask of reserved bits */
392 pfm_reg_check_t read_check;
393 pfm_reg_check_t write_check;
394 unsigned long dep_pmd[4];
395 unsigned long dep_pmc[4];
396} pfm_reg_desc_t;
397
398/* assume cnum is a valid monitor */
399#define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
400
401/*
402 * This structure is initialized at boot time and contains
403 * a description of the PMU main characteristics.
404 *
405 * If the probe function is defined, detection is based
406 * on its return value:
407 * - 0 means recognized PMU
408 * - anything else means not supported
409 * When the probe function is not defined, then the pmu_family field
410 * is used and it must match the host CPU family such that:
411 * - cpu->family & config->pmu_family != 0
412 */
413typedef struct {
414 unsigned long ovfl_val; /* overflow value for counters */
415
416 pfm_reg_desc_t *pmc_desc; /* detailed PMC register dependencies descriptions */
417 pfm_reg_desc_t *pmd_desc; /* detailed PMD register dependencies descriptions */
418
419 unsigned int num_pmcs; /* number of PMCS: computed at init time */
420 unsigned int num_pmds; /* number of PMDS: computed at init time */
421 unsigned long impl_pmcs[4]; /* bitmask of implemented PMCS */
422 unsigned long impl_pmds[4]; /* bitmask of implemented PMDS */
423
424 char *pmu_name; /* PMU family name */
425 unsigned int pmu_family; /* cpuid family pattern used to identify pmu */
426 unsigned int flags; /* pmu specific flags */
427 unsigned int num_ibrs; /* number of IBRS: computed at init time */
428 unsigned int num_dbrs; /* number of DBRS: computed at init time */
429 unsigned int num_counters; /* PMC/PMD counting pairs : computed at init time */
430 int (*probe)(void); /* customized probe routine */
431 unsigned int use_rr_dbregs:1; /* set if debug registers used for range restriction */
432} pmu_config_t;
433/*
434 * PMU specific flags
435 */
436#define PFM_PMU_IRQ_RESEND 1 /* PMU needs explicit IRQ resend */
437
438/*
439 * debug register related type definitions
440 */
441typedef struct {
442 unsigned long ibr_mask:56;
443 unsigned long ibr_plm:4;
444 unsigned long ibr_ig:3;
445 unsigned long ibr_x:1;
446} ibr_mask_reg_t;
447
448typedef struct {
449 unsigned long dbr_mask:56;
450 unsigned long dbr_plm:4;
451 unsigned long dbr_ig:2;
452 unsigned long dbr_w:1;
453 unsigned long dbr_r:1;
454} dbr_mask_reg_t;
455
456typedef union {
457 unsigned long val;
458 ibr_mask_reg_t ibr;
459 dbr_mask_reg_t dbr;
460} dbreg_t;
461
462
463/*
464 * perfmon command descriptions
465 */
466typedef struct {
467 int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
468 char *cmd_name;
469 int cmd_flags;
470 unsigned int cmd_narg;
471 size_t cmd_argsize;
472 int (*cmd_getsize)(void *arg, size_t *sz);
473} pfm_cmd_desc_t;
474
475#define PFM_CMD_FD 0x01 /* command requires a file descriptor */
476#define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */
477#define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */
478#define PFM_CMD_STOP 0x08 /* command does not work on zombie context */
479
480
481#define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
482#define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
483#define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
484#define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
485#define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
486
487#define PFM_CMD_ARG_MANY -1 /* cannot be zero */
488
1da177e4
LT
489typedef struct {
490 unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
491 unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
492 unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */
493 unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */
494 unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */
495 unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */
496 unsigned long pfm_smpl_handler_calls;
497 unsigned long pfm_smpl_handler_cycles;
498 char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
499} pfm_stats_t;
500
501/*
502 * perfmon internal variables
503 */
504static pfm_stats_t pfm_stats[NR_CPUS];
505static pfm_session_t pfm_sessions; /* global sessions information */
506
a9f6a0dd 507static DEFINE_SPINLOCK(pfm_alt_install_check);
a1ecf7f6
TL
508static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
509
1da177e4
LT
510static struct proc_dir_entry *perfmon_dir;
511static pfm_uuid_t pfm_null_uuid = {0,};
512
513static spinlock_t pfm_buffer_fmt_lock;
514static LIST_HEAD(pfm_buffer_fmt_list);
515
516static pmu_config_t *pmu_conf;
517
518/* sysctl() controls */
4944930a
SE
519pfm_sysctl_t pfm_sysctl;
520EXPORT_SYMBOL(pfm_sysctl);
1da177e4
LT
521
522static ctl_table pfm_ctl_table[]={
4e009901
EB
523 {
524 .ctl_name = CTL_UNNUMBERED,
525 .procname = "debug",
526 .data = &pfm_sysctl.debug,
527 .maxlen = sizeof(int),
528 .mode = 0666,
529 .proc_handler = &proc_dointvec,
530 },
531 {
532 .ctl_name = CTL_UNNUMBERED,
533 .procname = "debug_ovfl",
534 .data = &pfm_sysctl.debug_ovfl,
535 .maxlen = sizeof(int),
536 .mode = 0666,
537 .proc_handler = &proc_dointvec,
538 },
539 {
540 .ctl_name = CTL_UNNUMBERED,
541 .procname = "fastctxsw",
542 .data = &pfm_sysctl.fastctxsw,
543 .maxlen = sizeof(int),
544 .mode = 0600,
545 .proc_handler = &proc_dointvec,
546 },
547 {
548 .ctl_name = CTL_UNNUMBERED,
549 .procname = "expert_mode",
550 .data = &pfm_sysctl.expert_mode,
551 .maxlen = sizeof(int),
552 .mode = 0600,
553 .proc_handler = &proc_dointvec,
554 },
555 {}
1da177e4
LT
556};
557static ctl_table pfm_sysctl_dir[] = {
4e009901
EB
558 {
559 .ctl_name = CTL_UNNUMBERED,
560 .procname = "perfmon",
e3ad42be 561 .mode = 0555,
4e009901
EB
562 .child = pfm_ctl_table,
563 },
564 {}
1da177e4
LT
565};
566static ctl_table pfm_sysctl_root[] = {
4e009901
EB
567 {
568 .ctl_name = CTL_KERN,
569 .procname = "kernel",
e3ad42be 570 .mode = 0555,
4e009901
EB
571 .child = pfm_sysctl_dir,
572 },
573 {}
1da177e4
LT
574};
575static struct ctl_table_header *pfm_sysctl_header;
576
577static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
1da177e4
LT
578
579#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
580#define pfm_get_cpu_data(a,b) per_cpu(a, b)
581
582static inline void
583pfm_put_task(struct task_struct *task)
584{
585 if (task != current) put_task_struct(task);
586}
587
588static inline void
589pfm_set_task_notify(struct task_struct *task)
590{
591 struct thread_info *info;
592
593 info = (struct thread_info *) ((char *) task + IA64_TASK_SIZE);
a583f1b5 594 set_bit(TIF_PERFMON_WORK, &info->flags);
1da177e4
LT
595}
596
597static inline void
598pfm_clear_task_notify(void)
599{
a583f1b5 600 clear_thread_flag(TIF_PERFMON_WORK);
1da177e4
LT
601}
602
603static inline void
604pfm_reserve_page(unsigned long a)
605{
606 SetPageReserved(vmalloc_to_page((void *)a));
607}
608static inline void
609pfm_unreserve_page(unsigned long a)
610{
611 ClearPageReserved(vmalloc_to_page((void*)a));
612}
613
614static inline unsigned long
615pfm_protect_ctx_ctxsw(pfm_context_t *x)
616{
617 spin_lock(&(x)->ctx_lock);
618 return 0UL;
619}
620
24b8e0cc 621static inline void
1da177e4
LT
622pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
623{
624 spin_unlock(&(x)->ctx_lock);
625}
626
627static inline unsigned int
628pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct)
629{
630 return do_munmap(mm, addr, len);
631}
632
633static inline unsigned long
634pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
635{
636 return get_unmapped_area(file, addr, len, pgoff, flags);
637}
638
639
454e2398
DH
640static int
641pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data,
642 struct vfsmount *mnt)
1da177e4 643{
454e2398 644 return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC, mnt);
1da177e4
LT
645}
646
647static struct file_system_type pfm_fs_type = {
648 .name = "pfmfs",
649 .get_sb = pfmfs_get_sb,
650 .kill_sb = kill_anon_super,
651};
652
653DEFINE_PER_CPU(unsigned long, pfm_syst_info);
654DEFINE_PER_CPU(struct task_struct *, pmu_owner);
655DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
656DEFINE_PER_CPU(unsigned long, pmu_activation_number);
fffcc150 657EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
1da177e4
LT
658
659
660/* forward declaration */
5dfe4c96 661static const struct file_operations pfm_file_ops;
1da177e4
LT
662
663/*
664 * forward declarations
665 */
666#ifndef CONFIG_SMP
667static void pfm_lazy_save_regs (struct task_struct *ta);
668#endif
669
670void dump_pmu_state(const char *);
671static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
672
673#include "perfmon_itanium.h"
674#include "perfmon_mckinley.h"
9179cb65 675#include "perfmon_montecito.h"
1da177e4
LT
676#include "perfmon_generic.h"
677
678static pmu_config_t *pmu_confs[]={
9179cb65 679 &pmu_conf_mont,
1da177e4
LT
680 &pmu_conf_mck,
681 &pmu_conf_ita,
682 &pmu_conf_gen, /* must be last */
683 NULL
684};
685
686
687static int pfm_end_notify_user(pfm_context_t *ctx);
688
689static inline void
690pfm_clear_psr_pp(void)
691{
692 ia64_rsm(IA64_PSR_PP);
693 ia64_srlz_i();
694}
695
696static inline void
697pfm_set_psr_pp(void)
698{
699 ia64_ssm(IA64_PSR_PP);
700 ia64_srlz_i();
701}
702
703static inline void
704pfm_clear_psr_up(void)
705{
706 ia64_rsm(IA64_PSR_UP);
707 ia64_srlz_i();
708}
709
710static inline void
711pfm_set_psr_up(void)
712{
713 ia64_ssm(IA64_PSR_UP);
714 ia64_srlz_i();
715}
716
717static inline unsigned long
718pfm_get_psr(void)
719{
720 unsigned long tmp;
721 tmp = ia64_getreg(_IA64_REG_PSR);
722 ia64_srlz_i();
723 return tmp;
724}
725
726static inline void
727pfm_set_psr_l(unsigned long val)
728{
729 ia64_setreg(_IA64_REG_PSR_L, val);
730 ia64_srlz_i();
731}
732
733static inline void
734pfm_freeze_pmu(void)
735{
736 ia64_set_pmc(0,1UL);
737 ia64_srlz_d();
738}
739
740static inline void
741pfm_unfreeze_pmu(void)
742{
743 ia64_set_pmc(0,0UL);
744 ia64_srlz_d();
745}
746
747static inline void
748pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
749{
750 int i;
751
752 for (i=0; i < nibrs; i++) {
753 ia64_set_ibr(i, ibrs[i]);
754 ia64_dv_serialize_instruction();
755 }
756 ia64_srlz_i();
757}
758
759static inline void
760pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
761{
762 int i;
763
764 for (i=0; i < ndbrs; i++) {
765 ia64_set_dbr(i, dbrs[i]);
766 ia64_dv_serialize_data();
767 }
768 ia64_srlz_d();
769}
770
771/*
772 * PMD[i] must be a counter. no check is made
773 */
774static inline unsigned long
775pfm_read_soft_counter(pfm_context_t *ctx, int i)
776{
777 return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
778}
779
780/*
781 * PMD[i] must be a counter. no check is made
782 */
783static inline void
784pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
785{
786 unsigned long ovfl_val = pmu_conf->ovfl_val;
787
788 ctx->ctx_pmds[i].val = val & ~ovfl_val;
789 /*
790 * writing to unimplemented part is ignore, so we do not need to
791 * mask off top part
792 */
793 ia64_set_pmd(i, val & ovfl_val);
794}
795
796static pfm_msg_t *
797pfm_get_new_msg(pfm_context_t *ctx)
798{
799 int idx, next;
800
801 next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
802
803 DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
804 if (next == ctx->ctx_msgq_head) return NULL;
805
806 idx = ctx->ctx_msgq_tail;
807 ctx->ctx_msgq_tail = next;
808
809 DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
810
811 return ctx->ctx_msgq+idx;
812}
813
814static pfm_msg_t *
815pfm_get_next_msg(pfm_context_t *ctx)
816{
817 pfm_msg_t *msg;
818
819 DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
820
821 if (PFM_CTXQ_EMPTY(ctx)) return NULL;
822
823 /*
824 * get oldest message
825 */
826 msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
827
828 /*
829 * and move forward
830 */
831 ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
832
833 DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
834
835 return msg;
836}
837
838static void
839pfm_reset_msgq(pfm_context_t *ctx)
840{
841 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
842 DPRINT(("ctx=%p msgq reset\n", ctx));
843}
844
845static void *
846pfm_rvmalloc(unsigned long size)
847{
848 void *mem;
849 unsigned long addr;
850
851 size = PAGE_ALIGN(size);
852 mem = vmalloc(size);
853 if (mem) {
854 //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);
855 memset(mem, 0, size);
856 addr = (unsigned long)mem;
857 while (size > 0) {
858 pfm_reserve_page(addr);
859 addr+=PAGE_SIZE;
860 size-=PAGE_SIZE;
861 }
862 }
863 return mem;
864}
865
866static void
867pfm_rvfree(void *mem, unsigned long size)
868{
869 unsigned long addr;
870
871 if (mem) {
872 DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
873 addr = (unsigned long) mem;
874 while ((long) size > 0) {
875 pfm_unreserve_page(addr);
876 addr+=PAGE_SIZE;
877 size-=PAGE_SIZE;
878 }
879 vfree(mem);
880 }
881 return;
882}
883
884static pfm_context_t *
885pfm_context_alloc(void)
886{
887 pfm_context_t *ctx;
888
889 /*
890 * allocate context descriptor
891 * must be able to free with interrupts disabled
892 */
52fd9108 893 ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
1da177e4 894 if (ctx) {
1da177e4
LT
895 DPRINT(("alloc ctx @%p\n", ctx));
896 }
897 return ctx;
898}
899
900static void
901pfm_context_free(pfm_context_t *ctx)
902{
903 if (ctx) {
904 DPRINT(("free ctx @%p\n", ctx));
905 kfree(ctx);
906 }
907}
908
909static void
910pfm_mask_monitoring(struct task_struct *task)
911{
912 pfm_context_t *ctx = PFM_GET_CTX(task);
1da177e4
LT
913 unsigned long mask, val, ovfl_mask;
914 int i;
915
19c5870c 916 DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
1da177e4
LT
917
918 ovfl_mask = pmu_conf->ovfl_val;
919 /*
920 * monitoring can only be masked as a result of a valid
921 * counter overflow. In UP, it means that the PMU still
922 * has an owner. Note that the owner can be different
923 * from the current task. However the PMU state belongs
924 * to the owner.
925 * In SMP, a valid overflow only happens when task is
926 * current. Therefore if we come here, we know that
927 * the PMU state belongs to the current task, therefore
928 * we can access the live registers.
929 *
930 * So in both cases, the live register contains the owner's
931 * state. We can ONLY touch the PMU registers and NOT the PSR.
932 *
35589a8f 933 * As a consequence to this call, the ctx->th_pmds[] array
1da177e4
LT
934 * contains stale information which must be ignored
935 * when context is reloaded AND monitoring is active (see
936 * pfm_restart).
937 */
938 mask = ctx->ctx_used_pmds[0];
939 for (i = 0; mask; i++, mask>>=1) {
940 /* skip non used pmds */
941 if ((mask & 0x1) == 0) continue;
942 val = ia64_get_pmd(i);
943
944 if (PMD_IS_COUNTING(i)) {
945 /*
946 * we rebuild the full 64 bit value of the counter
947 */
948 ctx->ctx_pmds[i].val += (val & ovfl_mask);
949 } else {
950 ctx->ctx_pmds[i].val = val;
951 }
952 DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
953 i,
954 ctx->ctx_pmds[i].val,
955 val & ovfl_mask));
956 }
957 /*
958 * mask monitoring by setting the privilege level to 0
959 * we cannot use psr.pp/psr.up for this, it is controlled by
960 * the user
961 *
962 * if task is current, modify actual registers, otherwise modify
963 * thread save state, i.e., what will be restored in pfm_load_regs()
964 */
965 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
966 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
967 if ((mask & 0x1) == 0UL) continue;
35589a8f
KA
968 ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
969 ctx->th_pmcs[i] &= ~0xfUL;
970 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1da177e4
LT
971 }
972 /*
973 * make all of this visible
974 */
975 ia64_srlz_d();
976}
977
978/*
979 * must always be done with task == current
980 *
981 * context must be in MASKED state when calling
982 */
983static void
984pfm_restore_monitoring(struct task_struct *task)
985{
986 pfm_context_t *ctx = PFM_GET_CTX(task);
1da177e4
LT
987 unsigned long mask, ovfl_mask;
988 unsigned long psr, val;
989 int i, is_system;
990
991 is_system = ctx->ctx_fl_system;
992 ovfl_mask = pmu_conf->ovfl_val;
993
994 if (task != current) {
19c5870c 995 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
1da177e4
LT
996 return;
997 }
998 if (ctx->ctx_state != PFM_CTX_MASKED) {
999 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
19c5870c 1000 task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
1da177e4
LT
1001 return;
1002 }
1003 psr = pfm_get_psr();
1004 /*
1005 * monitoring is masked via the PMC.
1006 * As we restore their value, we do not want each counter to
1007 * restart right away. We stop monitoring using the PSR,
1008 * restore the PMC (and PMD) and then re-establish the psr
1009 * as it was. Note that there can be no pending overflow at
1010 * this point, because monitoring was MASKED.
1011 *
1012 * system-wide session are pinned and self-monitoring
1013 */
1014 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1015 /* disable dcr pp */
1016 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
1017 pfm_clear_psr_pp();
1018 } else {
1019 pfm_clear_psr_up();
1020 }
1021 /*
1022 * first, we restore the PMD
1023 */
1024 mask = ctx->ctx_used_pmds[0];
1025 for (i = 0; mask; i++, mask>>=1) {
1026 /* skip non used pmds */
1027 if ((mask & 0x1) == 0) continue;
1028
1029 if (PMD_IS_COUNTING(i)) {
1030 /*
1031 * we split the 64bit value according to
1032 * counter width
1033 */
1034 val = ctx->ctx_pmds[i].val & ovfl_mask;
1035 ctx->ctx_pmds[i].val &= ~ovfl_mask;
1036 } else {
1037 val = ctx->ctx_pmds[i].val;
1038 }
1039 ia64_set_pmd(i, val);
1040
1041 DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
1042 i,
1043 ctx->ctx_pmds[i].val,
1044 val));
1045 }
1046 /*
1047 * restore the PMCs
1048 */
1049 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
1050 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
1051 if ((mask & 0x1) == 0UL) continue;
35589a8f
KA
1052 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1053 ia64_set_pmc(i, ctx->th_pmcs[i]);
19c5870c
AD
1054 DPRINT(("[%d] pmc[%d]=0x%lx\n",
1055 task_pid_nr(task), i, ctx->th_pmcs[i]));
1da177e4
LT
1056 }
1057 ia64_srlz_d();
1058
1059 /*
1060 * must restore DBR/IBR because could be modified while masked
1061 * XXX: need to optimize
1062 */
1063 if (ctx->ctx_fl_using_dbreg) {
1064 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
1065 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
1066 }
1067
1068 /*
1069 * now restore PSR
1070 */
1071 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1072 /* enable dcr pp */
1073 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
1074 ia64_srlz_i();
1075 }
1076 pfm_set_psr_l(psr);
1077}
1078
1079static inline void
1080pfm_save_pmds(unsigned long *pmds, unsigned long mask)
1081{
1082 int i;
1083
1084 ia64_srlz_d();
1085
1086 for (i=0; mask; i++, mask>>=1) {
1087 if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
1088 }
1089}
1090
1091/*
1092 * reload from thread state (used for ctxw only)
1093 */
1094static inline void
1095pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
1096{
1097 int i;
1098 unsigned long val, ovfl_val = pmu_conf->ovfl_val;
1099
1100 for (i=0; mask; i++, mask>>=1) {
1101 if ((mask & 0x1) == 0) continue;
1102 val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
1103 ia64_set_pmd(i, val);
1104 }
1105 ia64_srlz_d();
1106}
1107
1108/*
1109 * propagate PMD from context to thread-state
1110 */
1111static inline void
1112pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1113{
1da177e4
LT
1114 unsigned long ovfl_val = pmu_conf->ovfl_val;
1115 unsigned long mask = ctx->ctx_all_pmds[0];
1116 unsigned long val;
1117 int i;
1118
1119 DPRINT(("mask=0x%lx\n", mask));
1120
1121 for (i=0; mask; i++, mask>>=1) {
1122
1123 val = ctx->ctx_pmds[i].val;
1124
1125 /*
1126 * We break up the 64 bit value into 2 pieces
1127 * the lower bits go to the machine state in the
1128 * thread (will be reloaded on ctxsw in).
1129 * The upper part stays in the soft-counter.
1130 */
1131 if (PMD_IS_COUNTING(i)) {
1132 ctx->ctx_pmds[i].val = val & ~ovfl_val;
1133 val &= ovfl_val;
1134 }
35589a8f 1135 ctx->th_pmds[i] = val;
1da177e4
LT
1136
1137 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1138 i,
35589a8f 1139 ctx->th_pmds[i],
1da177e4
LT
1140 ctx->ctx_pmds[i].val));
1141 }
1142}
1143
1144/*
1145 * propagate PMC from context to thread-state
1146 */
1147static inline void
1148pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1149{
1da177e4
LT
1150 unsigned long mask = ctx->ctx_all_pmcs[0];
1151 int i;
1152
1153 DPRINT(("mask=0x%lx\n", mask));
1154
1155 for (i=0; mask; i++, mask>>=1) {
1156 /* masking 0 with ovfl_val yields 0 */
35589a8f
KA
1157 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1158 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1da177e4
LT
1159 }
1160}
1161
1162
1163
1164static inline void
1165pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
1166{
1167 int i;
1168
1169 for (i=0; mask; i++, mask>>=1) {
1170 if ((mask & 0x1) == 0) continue;
1171 ia64_set_pmc(i, pmcs[i]);
1172 }
1173 ia64_srlz_d();
1174}
1175
1176static inline int
1177pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
1178{
1179 return memcmp(a, b, sizeof(pfm_uuid_t));
1180}
1181
1182static inline int
1183pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
1184{
1185 int ret = 0;
1186 if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
1187 return ret;
1188}
1189
1190static inline int
1191pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
1192{
1193 int ret = 0;
1194 if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
1195 return ret;
1196}
1197
1198
1199static inline int
1200pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
1201 int cpu, void *arg)
1202{
1203 int ret = 0;
1204 if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
1205 return ret;
1206}
1207
1208static inline int
1209pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
1210 int cpu, void *arg)
1211{
1212 int ret = 0;
1213 if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
1214 return ret;
1215}
1216
1217static inline int
1218pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1219{
1220 int ret = 0;
1221 if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
1222 return ret;
1223}
1224
1225static inline int
1226pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1227{
1228 int ret = 0;
1229 if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
1230 return ret;
1231}
1232
1233static pfm_buffer_fmt_t *
1234__pfm_find_buffer_fmt(pfm_uuid_t uuid)
1235{
1236 struct list_head * pos;
1237 pfm_buffer_fmt_t * entry;
1238
1239 list_for_each(pos, &pfm_buffer_fmt_list) {
1240 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
1241 if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
1242 return entry;
1243 }
1244 return NULL;
1245}
1246
1247/*
1248 * find a buffer format based on its uuid
1249 */
1250static pfm_buffer_fmt_t *
1251pfm_find_buffer_fmt(pfm_uuid_t uuid)
1252{
1253 pfm_buffer_fmt_t * fmt;
1254 spin_lock(&pfm_buffer_fmt_lock);
1255 fmt = __pfm_find_buffer_fmt(uuid);
1256 spin_unlock(&pfm_buffer_fmt_lock);
1257 return fmt;
1258}
1259
1260int
1261pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
1262{
1263 int ret = 0;
1264
1265 /* some sanity checks */
1266 if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
1267
1268 /* we need at least a handler */
1269 if (fmt->fmt_handler == NULL) return -EINVAL;
1270
1271 /*
1272 * XXX: need check validity of fmt_arg_size
1273 */
1274
1275 spin_lock(&pfm_buffer_fmt_lock);
1276
1277 if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
1278 printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
1279 ret = -EBUSY;
1280 goto out;
1281 }
1282 list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
1283 printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
1284
1285out:
1286 spin_unlock(&pfm_buffer_fmt_lock);
1287 return ret;
1288}
1289EXPORT_SYMBOL(pfm_register_buffer_fmt);
1290
1291int
1292pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
1293{
1294 pfm_buffer_fmt_t *fmt;
1295 int ret = 0;
1296
1297 spin_lock(&pfm_buffer_fmt_lock);
1298
1299 fmt = __pfm_find_buffer_fmt(uuid);
1300 if (!fmt) {
1301 printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
1302 ret = -EINVAL;
1303 goto out;
1304 }
1305 list_del_init(&fmt->fmt_list);
1306 printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
1307
1308out:
1309 spin_unlock(&pfm_buffer_fmt_lock);
1310 return ret;
1311
1312}
1313EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1314
8df5a500
SE
1315extern void update_pal_halt_status(int);
1316
1da177e4
LT
1317static int
1318pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1319{
1320 unsigned long flags;
1321 /*
72fdbdce 1322 * validity checks on cpu_mask have been done upstream
1da177e4
LT
1323 */
1324 LOCK_PFS(flags);
1325
1326 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1327 pfm_sessions.pfs_sys_sessions,
1328 pfm_sessions.pfs_task_sessions,
1329 pfm_sessions.pfs_sys_use_dbregs,
1330 is_syswide,
1331 cpu));
1332
1333 if (is_syswide) {
1334 /*
1335 * cannot mix system wide and per-task sessions
1336 */
1337 if (pfm_sessions.pfs_task_sessions > 0UL) {
1338 DPRINT(("system wide not possible, %u conflicting task_sessions\n",
1339 pfm_sessions.pfs_task_sessions));
1340 goto abort;
1341 }
1342
1343 if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
1344
1345 DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
1346
1347 pfm_sessions.pfs_sys_session[cpu] = task;
1348
1349 pfm_sessions.pfs_sys_sessions++ ;
1350
1351 } else {
1352 if (pfm_sessions.pfs_sys_sessions) goto abort;
1353 pfm_sessions.pfs_task_sessions++;
1354 }
1355
1356 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1357 pfm_sessions.pfs_sys_sessions,
1358 pfm_sessions.pfs_task_sessions,
1359 pfm_sessions.pfs_sys_use_dbregs,
1360 is_syswide,
1361 cpu));
1362
8df5a500
SE
1363 /*
1364 * disable default_idle() to go to PAL_HALT
1365 */
1366 update_pal_halt_status(0);
1367
1da177e4
LT
1368 UNLOCK_PFS(flags);
1369
1370 return 0;
1371
1372error_conflict:
1373 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
19c5870c 1374 task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
a1ecf7f6 1375 cpu));
1da177e4
LT
1376abort:
1377 UNLOCK_PFS(flags);
1378
1379 return -EBUSY;
1380
1381}
1382
1383static int
1384pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1385{
1386 unsigned long flags;
1387 /*
72fdbdce 1388 * validity checks on cpu_mask have been done upstream
1da177e4
LT
1389 */
1390 LOCK_PFS(flags);
1391
1392 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1393 pfm_sessions.pfs_sys_sessions,
1394 pfm_sessions.pfs_task_sessions,
1395 pfm_sessions.pfs_sys_use_dbregs,
1396 is_syswide,
1397 cpu));
1398
1399
1400 if (is_syswide) {
1401 pfm_sessions.pfs_sys_session[cpu] = NULL;
1402 /*
1403 * would not work with perfmon+more than one bit in cpu_mask
1404 */
1405 if (ctx && ctx->ctx_fl_using_dbreg) {
1406 if (pfm_sessions.pfs_sys_use_dbregs == 0) {
1407 printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
1408 } else {
1409 pfm_sessions.pfs_sys_use_dbregs--;
1410 }
1411 }
1412 pfm_sessions.pfs_sys_sessions--;
1413 } else {
1414 pfm_sessions.pfs_task_sessions--;
1415 }
1416 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1417 pfm_sessions.pfs_sys_sessions,
1418 pfm_sessions.pfs_task_sessions,
1419 pfm_sessions.pfs_sys_use_dbregs,
1420 is_syswide,
1421 cpu));
1422
8df5a500
SE
1423 /*
1424 * if possible, enable default_idle() to go into PAL_HALT
1425 */
1426 if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
1427 update_pal_halt_status(1);
1428
1da177e4
LT
1429 UNLOCK_PFS(flags);
1430
1431 return 0;
1432}
1433
1434/*
1435 * removes virtual mapping of the sampling buffer.
1436 * IMPORTANT: cannot be called with interrupts disable, e.g. inside
1437 * a PROTECT_CTX() section.
1438 */
1439static int
1440pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size)
1441{
1442 int r;
1443
1444 /* sanity checks */
1445 if (task->mm == NULL || size == 0UL || vaddr == NULL) {
19c5870c 1446 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
1da177e4
LT
1447 return -EINVAL;
1448 }
1449
1450 DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
1451
1452 /*
1453 * does the actual unmapping
1454 */
1455 down_write(&task->mm->mmap_sem);
1456
1457 DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size));
1458
1459 r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0);
1460
1461 up_write(&task->mm->mmap_sem);
1462 if (r !=0) {
19c5870c 1463 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
1da177e4
LT
1464 }
1465
1466 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
1467
1468 return 0;
1469}
1470
1471/*
1472 * free actual physical storage used by sampling buffer
1473 */
1474#if 0
1475static int
1476pfm_free_smpl_buffer(pfm_context_t *ctx)
1477{
1478 pfm_buffer_fmt_t *fmt;
1479
1480 if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
1481
1482 /*
1483 * we won't use the buffer format anymore
1484 */
1485 fmt = ctx->ctx_buf_fmt;
1486
1487 DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
1488 ctx->ctx_smpl_hdr,
1489 ctx->ctx_smpl_size,
1490 ctx->ctx_smpl_vaddr));
1491
1492 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1493
1494 /*
1495 * free the buffer
1496 */
1497 pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
1498
1499 ctx->ctx_smpl_hdr = NULL;
1500 ctx->ctx_smpl_size = 0UL;
1501
1502 return 0;
1503
1504invalid_free:
19c5870c 1505 printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
1da177e4
LT
1506 return -EINVAL;
1507}
1508#endif
1509
1510static inline void
1511pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
1512{
1513 if (fmt == NULL) return;
1514
1515 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1516
1517}
1518
1519/*
1520 * pfmfs should _never_ be mounted by userland - too much of security hassle,
1521 * no real gain from having the whole whorehouse mounted. So we don't need
1522 * any operations on the root directory. However, we need a non-trivial
1523 * d_name - pfm: will go nicely and kill the special-casing in procfs.
1524 */
1525static struct vfsmount *pfmfs_mnt;
1526
1527static int __init
1528init_pfm_fs(void)
1529{
1530 int err = register_filesystem(&pfm_fs_type);
1531 if (!err) {
1532 pfmfs_mnt = kern_mount(&pfm_fs_type);
1533 err = PTR_ERR(pfmfs_mnt);
1534 if (IS_ERR(pfmfs_mnt))
1535 unregister_filesystem(&pfm_fs_type);
1536 else
1537 err = 0;
1538 }
1539 return err;
1540}
1541
1da177e4
LT
1542static ssize_t
1543pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1544{
1545 pfm_context_t *ctx;
1546 pfm_msg_t *msg;
1547 ssize_t ret;
1548 unsigned long flags;
1549 DECLARE_WAITQUEUE(wait, current);
1550 if (PFM_IS_FILE(filp) == 0) {
19c5870c 1551 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1da177e4
LT
1552 return -EINVAL;
1553 }
1554
1555 ctx = (pfm_context_t *)filp->private_data;
1556 if (ctx == NULL) {
19c5870c 1557 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
1da177e4
LT
1558 return -EINVAL;
1559 }
1560
1561 /*
1562 * check even when there is no message
1563 */
1564 if (size < sizeof(pfm_msg_t)) {
1565 DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
1566 return -EINVAL;
1567 }
1568
1569 PROTECT_CTX(ctx, flags);
1570
1571 /*
1572 * put ourselves on the wait queue
1573 */
1574 add_wait_queue(&ctx->ctx_msgq_wait, &wait);
1575
1576
1577 for(;;) {
1578 /*
1579 * check wait queue
1580 */
1581
1582 set_current_state(TASK_INTERRUPTIBLE);
1583
1584 DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
1585
1586 ret = 0;
1587 if(PFM_CTXQ_EMPTY(ctx) == 0) break;
1588
1589 UNPROTECT_CTX(ctx, flags);
1590
1591 /*
1592 * check non-blocking read
1593 */
1594 ret = -EAGAIN;
1595 if(filp->f_flags & O_NONBLOCK) break;
1596
1597 /*
1598 * check pending signals
1599 */
1600 if(signal_pending(current)) {
1601 ret = -EINTR;
1602 break;
1603 }
1604 /*
1605 * no message, so wait
1606 */
1607 schedule();
1608
1609 PROTECT_CTX(ctx, flags);
1610 }
19c5870c 1611 DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
1da177e4
LT
1612 set_current_state(TASK_RUNNING);
1613 remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
1614
1615 if (ret < 0) goto abort;
1616
1617 ret = -EINVAL;
1618 msg = pfm_get_next_msg(ctx);
1619 if (msg == NULL) {
19c5870c 1620 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
1da177e4
LT
1621 goto abort_locked;
1622 }
1623
4944930a 1624 DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
1da177e4
LT
1625
1626 ret = -EFAULT;
1627 if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
1628
1629abort_locked:
1630 UNPROTECT_CTX(ctx, flags);
1631abort:
1632 return ret;
1633}
1634
1635static ssize_t
1636pfm_write(struct file *file, const char __user *ubuf,
1637 size_t size, loff_t *ppos)
1638{
1639 DPRINT(("pfm_write called\n"));
1640 return -EINVAL;
1641}
1642
1643static unsigned int
1644pfm_poll(struct file *filp, poll_table * wait)
1645{
1646 pfm_context_t *ctx;
1647 unsigned long flags;
1648 unsigned int mask = 0;
1649
1650 if (PFM_IS_FILE(filp) == 0) {
19c5870c 1651 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1da177e4
LT
1652 return 0;
1653 }
1654
1655 ctx = (pfm_context_t *)filp->private_data;
1656 if (ctx == NULL) {
19c5870c 1657 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
1da177e4
LT
1658 return 0;
1659 }
1660
1661
1662 DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
1663
1664 poll_wait(filp, &ctx->ctx_msgq_wait, wait);
1665
1666 PROTECT_CTX(ctx, flags);
1667
1668 if (PFM_CTXQ_EMPTY(ctx) == 0)
1669 mask = POLLIN | POLLRDNORM;
1670
1671 UNPROTECT_CTX(ctx, flags);
1672
1673 DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
1674
1675 return mask;
1676}
1677
1678static int
1679pfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1680{
1681 DPRINT(("pfm_ioctl called\n"));
1682 return -EINVAL;
1683}
1684
1685/*
1686 * interrupt cannot be masked when coming here
1687 */
1688static inline int
1689pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
1690{
1691 int ret;
1692
1693 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
1694
1695 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
19c5870c 1696 task_pid_nr(current),
1da177e4
LT
1697 fd,
1698 on,
1699 ctx->ctx_async_queue, ret));
1700
1701 return ret;
1702}
1703
1704static int
1705pfm_fasync(int fd, struct file *filp, int on)
1706{
1707 pfm_context_t *ctx;
1708 int ret;
1709
1710 if (PFM_IS_FILE(filp) == 0) {
19c5870c 1711 printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
1da177e4
LT
1712 return -EBADF;
1713 }
1714
1715 ctx = (pfm_context_t *)filp->private_data;
1716 if (ctx == NULL) {
19c5870c 1717 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
1da177e4
LT
1718 return -EBADF;
1719 }
1720 /*
1721 * we cannot mask interrupts during this call because this may
1722 * may go to sleep if memory is not readily avalaible.
1723 *
1724 * We are protected from the conetxt disappearing by the get_fd()/put_fd()
1725 * done in caller. Serialization of this function is ensured by caller.
1726 */
1727 ret = pfm_do_fasync(fd, filp, ctx, on);
1728
1729
1730 DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1731 fd,
1732 on,
1733 ctx->ctx_async_queue, ret));
1734
1735 return ret;
1736}
1737
1738#ifdef CONFIG_SMP
1739/*
1740 * this function is exclusively called from pfm_close().
1741 * The context is not protected at that time, nor are interrupts
1742 * on the remote CPU. That's necessary to avoid deadlocks.
1743 */
1744static void
1745pfm_syswide_force_stop(void *info)
1746{
1747 pfm_context_t *ctx = (pfm_context_t *)info;
6450578f 1748 struct pt_regs *regs = task_pt_regs(current);
1da177e4
LT
1749 struct task_struct *owner;
1750 unsigned long flags;
1751 int ret;
1752
1753 if (ctx->ctx_cpu != smp_processor_id()) {
1754 printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
1755 ctx->ctx_cpu,
1756 smp_processor_id());
1757 return;
1758 }
1759 owner = GET_PMU_OWNER();
1760 if (owner != ctx->ctx_task) {
1761 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1762 smp_processor_id(),
19c5870c 1763 task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
1da177e4
LT
1764 return;
1765 }
1766 if (GET_PMU_CTX() != ctx) {
1767 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
1768 smp_processor_id(),
1769 GET_PMU_CTX(), ctx);
1770 return;
1771 }
1772
19c5870c 1773 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
1da177e4
LT
1774 /*
1775 * the context is already protected in pfm_close(), we simply
1776 * need to mask interrupts to avoid a PMU interrupt race on
1777 * this CPU
1778 */
1779 local_irq_save(flags);
1780
1781 ret = pfm_context_unload(ctx, NULL, 0, regs);
1782 if (ret) {
1783 DPRINT(("context_unload returned %d\n", ret));
1784 }
1785
1786 /*
1787 * unmask interrupts, PMU interrupts are now spurious here
1788 */
1789 local_irq_restore(flags);
1790}
1791
1792static void
1793pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
1794{
1795 int ret;
1796
1797 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
1798 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1);
1799 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
1800}
1801#endif /* CONFIG_SMP */
1802
1803/*
1804 * called for each close(). Partially free resources.
1805 * When caller is self-monitoring, the context is unloaded.
1806 */
1807static int
75e1fcc0 1808pfm_flush(struct file *filp, fl_owner_t id)
1da177e4
LT
1809{
1810 pfm_context_t *ctx;
1811 struct task_struct *task;
1812 struct pt_regs *regs;
1813 unsigned long flags;
1814 unsigned long smpl_buf_size = 0UL;
1815 void *smpl_buf_vaddr = NULL;
1816 int state, is_system;
1817
1818 if (PFM_IS_FILE(filp) == 0) {
1819 DPRINT(("bad magic for\n"));
1820 return -EBADF;
1821 }
1822
1823 ctx = (pfm_context_t *)filp->private_data;
1824 if (ctx == NULL) {
19c5870c 1825 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
1da177e4
LT
1826 return -EBADF;
1827 }
1828
1829 /*
1830 * remove our file from the async queue, if we use this mode.
1831 * This can be done without the context being protected. We come
72fdbdce 1832 * here when the context has become unreachable by other tasks.
1da177e4
LT
1833 *
1834 * We may still have active monitoring at this point and we may
1835 * end up in pfm_overflow_handler(). However, fasync_helper()
1836 * operates with interrupts disabled and it cleans up the
1837 * queue. If the PMU handler is called prior to entering
1838 * fasync_helper() then it will send a signal. If it is
1839 * invoked after, it will find an empty queue and no
1840 * signal will be sent. In both case, we are safe
1841 */
1842 if (filp->f_flags & FASYNC) {
1843 DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue));
1844 pfm_do_fasync (-1, filp, ctx, 0);
1845 }
1846
1847 PROTECT_CTX(ctx, flags);
1848
1849 state = ctx->ctx_state;
1850 is_system = ctx->ctx_fl_system;
1851
1852 task = PFM_CTX_TASK(ctx);
6450578f 1853 regs = task_pt_regs(task);
1da177e4
LT
1854
1855 DPRINT(("ctx_state=%d is_current=%d\n",
1856 state,
1857 task == current ? 1 : 0));
1858
1859 /*
1860 * if state == UNLOADED, then task is NULL
1861 */
1862
1863 /*
1864 * we must stop and unload because we are losing access to the context.
1865 */
1866 if (task == current) {
1867#ifdef CONFIG_SMP
1868 /*
1869 * the task IS the owner but it migrated to another CPU: that's bad
1870 * but we must handle this cleanly. Unfortunately, the kernel does
1871 * not provide a mechanism to block migration (while the context is loaded).
1872 *
1873 * We need to release the resource on the ORIGINAL cpu.
1874 */
1875 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
1876
1877 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
1878 /*
1879 * keep context protected but unmask interrupt for IPI
1880 */
1881 local_irq_restore(flags);
1882
1883 pfm_syswide_cleanup_other_cpu(ctx);
1884
1885 /*
1886 * restore interrupt masking
1887 */
1888 local_irq_save(flags);
1889
1890 /*
1891 * context is unloaded at this point
1892 */
1893 } else
1894#endif /* CONFIG_SMP */
1895 {
1896
1897 DPRINT(("forcing unload\n"));
1898 /*
1899 * stop and unload, returning with state UNLOADED
1900 * and session unreserved.
1901 */
1902 pfm_context_unload(ctx, NULL, 0, regs);
1903
1904 DPRINT(("ctx_state=%d\n", ctx->ctx_state));
1905 }
1906 }
1907
1908 /*
1909 * remove virtual mapping, if any, for the calling task.
1910 * cannot reset ctx field until last user is calling close().
1911 *
1912 * ctx_smpl_vaddr must never be cleared because it is needed
1913 * by every task with access to the context
1914 *
1915 * When called from do_exit(), the mm context is gone already, therefore
1916 * mm is NULL, i.e., the VMA is already gone and we do not have to
1917 * do anything here
1918 */
1919 if (ctx->ctx_smpl_vaddr && current->mm) {
1920 smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
1921 smpl_buf_size = ctx->ctx_smpl_size;
1922 }
1923
1924 UNPROTECT_CTX(ctx, flags);
1925
1926 /*
1927 * if there was a mapping, then we systematically remove it
1928 * at this point. Cannot be done inside critical section
1929 * because some VM function reenables interrupts.
1930 *
1931 */
1932 if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size);
1933
1934 return 0;
1935}
1936/*
1937 * called either on explicit close() or from exit_files().
1938 * Only the LAST user of the file gets to this point, i.e., it is
1939 * called only ONCE.
1940 *
1941 * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero
1942 * (fput()),i.e, last task to access the file. Nobody else can access the
1943 * file at this point.
1944 *
1945 * When called from exit_files(), the VMA has been freed because exit_mm()
1946 * is executed before exit_files().
1947 *
1948 * When called from exit_files(), the current task is not yet ZOMBIE but we
1949 * flush the PMU state to the context.
1950 */
1951static int
1952pfm_close(struct inode *inode, struct file *filp)
1953{
1954 pfm_context_t *ctx;
1955 struct task_struct *task;
1956 struct pt_regs *regs;
1957 DECLARE_WAITQUEUE(wait, current);
1958 unsigned long flags;
1959 unsigned long smpl_buf_size = 0UL;
1960 void *smpl_buf_addr = NULL;
1961 int free_possible = 1;
1962 int state, is_system;
1963
1964 DPRINT(("pfm_close called private=%p\n", filp->private_data));
1965
1966 if (PFM_IS_FILE(filp) == 0) {
1967 DPRINT(("bad magic\n"));
1968 return -EBADF;
1969 }
1970
1971 ctx = (pfm_context_t *)filp->private_data;
1972 if (ctx == NULL) {
19c5870c 1973 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
1da177e4
LT
1974 return -EBADF;
1975 }
1976
1977 PROTECT_CTX(ctx, flags);
1978
1979 state = ctx->ctx_state;
1980 is_system = ctx->ctx_fl_system;
1981
1982 task = PFM_CTX_TASK(ctx);
6450578f 1983 regs = task_pt_regs(task);
1da177e4
LT
1984
1985 DPRINT(("ctx_state=%d is_current=%d\n",
1986 state,
1987 task == current ? 1 : 0));
1988
1989 /*
1990 * if task == current, then pfm_flush() unloaded the context
1991 */
1992 if (state == PFM_CTX_UNLOADED) goto doit;
1993
1994 /*
1995 * context is loaded/masked and task != current, we need to
1996 * either force an unload or go zombie
1997 */
1998
1999 /*
2000 * The task is currently blocked or will block after an overflow.
2001 * we must force it to wakeup to get out of the
2002 * MASKED state and transition to the unloaded state by itself.
2003 *
2004 * This situation is only possible for per-task mode
2005 */
2006 if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
2007
2008 /*
2009 * set a "partial" zombie state to be checked
2010 * upon return from down() in pfm_handle_work().
2011 *
2012 * We cannot use the ZOMBIE state, because it is checked
2013 * by pfm_load_regs() which is called upon wakeup from down().
2014 * In such case, it would free the context and then we would
2015 * return to pfm_handle_work() which would access the
2016 * stale context. Instead, we set a flag invisible to pfm_load_regs()
2017 * but visible to pfm_handle_work().
2018 *
2019 * For some window of time, we have a zombie context with
2020 * ctx_state = MASKED and not ZOMBIE
2021 */
2022 ctx->ctx_fl_going_zombie = 1;
2023
2024 /*
2025 * force task to wake up from MASKED state
2026 */
60f1c444 2027 complete(&ctx->ctx_restart_done);
1da177e4
LT
2028
2029 DPRINT(("waking up ctx_state=%d\n", state));
2030
2031 /*
2032 * put ourself to sleep waiting for the other
2033 * task to report completion
2034 *
2035 * the context is protected by mutex, therefore there
2036 * is no risk of being notified of completion before
2037 * begin actually on the waitq.
2038 */
2039 set_current_state(TASK_INTERRUPTIBLE);
2040 add_wait_queue(&ctx->ctx_zombieq, &wait);
2041
2042 UNPROTECT_CTX(ctx, flags);
2043
2044 /*
2045 * XXX: check for signals :
2046 * - ok for explicit close
2047 * - not ok when coming from exit_files()
2048 */
2049 schedule();
2050
2051
2052 PROTECT_CTX(ctx, flags);
2053
2054
2055 remove_wait_queue(&ctx->ctx_zombieq, &wait);
2056 set_current_state(TASK_RUNNING);
2057
2058 /*
2059 * context is unloaded at this point
2060 */
2061 DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
2062 }
2063 else if (task != current) {
2064#ifdef CONFIG_SMP
2065 /*
2066 * switch context to zombie state
2067 */
2068 ctx->ctx_state = PFM_CTX_ZOMBIE;
2069
19c5870c 2070 DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
1da177e4
LT
2071 /*
2072 * cannot free the context on the spot. deferred until
2073 * the task notices the ZOMBIE state
2074 */
2075 free_possible = 0;
2076#else
2077 pfm_context_unload(ctx, NULL, 0, regs);
2078#endif
2079 }
2080
2081doit:
2082 /* reload state, may have changed during opening of critical section */
2083 state = ctx->ctx_state;
2084
2085 /*
2086 * the context is still attached to a task (possibly current)
2087 * we cannot destroy it right now
2088 */
2089
2090 /*
2091 * we must free the sampling buffer right here because
2092 * we cannot rely on it being cleaned up later by the
2093 * monitored task. It is not possible to free vmalloc'ed
2094 * memory in pfm_load_regs(). Instead, we remove the buffer
2095 * now. should there be subsequent PMU overflow originally
2096 * meant for sampling, the will be converted to spurious
2097 * and that's fine because the monitoring tools is gone anyway.
2098 */
2099 if (ctx->ctx_smpl_hdr) {
2100 smpl_buf_addr = ctx->ctx_smpl_hdr;
2101 smpl_buf_size = ctx->ctx_smpl_size;
2102 /* no more sampling */
2103 ctx->ctx_smpl_hdr = NULL;
2104 ctx->ctx_fl_is_sampling = 0;
2105 }
2106
2107 DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
2108 state,
2109 free_possible,
2110 smpl_buf_addr,
2111 smpl_buf_size));
2112
2113 if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
2114
2115 /*
2116 * UNLOADED that the session has already been unreserved.
2117 */
2118 if (state == PFM_CTX_ZOMBIE) {
2119 pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
2120 }
2121
2122 /*
2123 * disconnect file descriptor from context must be done
2124 * before we unlock.
2125 */
2126 filp->private_data = NULL;
2127
2128 /*
72fdbdce 2129 * if we free on the spot, the context is now completely unreachable
1da177e4
LT
2130 * from the callers side. The monitored task side is also cut, so we
2131 * can freely cut.
2132 *
2133 * If we have a deferred free, only the caller side is disconnected.
2134 */
2135 UNPROTECT_CTX(ctx, flags);
2136
2137 /*
2138 * All memory free operations (especially for vmalloc'ed memory)
2139 * MUST be done with interrupts ENABLED.
2140 */
2141 if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
2142
2143 /*
2144 * return the memory used by the context
2145 */
2146 if (free_possible) pfm_context_free(ctx);
2147
2148 return 0;
2149}
2150
2151static int
2152pfm_no_open(struct inode *irrelevant, struct file *dontcare)
2153{
2154 DPRINT(("pfm_no_open called\n"));
2155 return -ENXIO;
2156}
2157
2158
2159
5dfe4c96 2160static const struct file_operations pfm_file_ops = {
1da177e4
LT
2161 .llseek = no_llseek,
2162 .read = pfm_read,
2163 .write = pfm_write,
2164 .poll = pfm_poll,
2165 .ioctl = pfm_ioctl,
2166 .open = pfm_no_open, /* special open code to disallow open via /proc */
2167 .fasync = pfm_fasync,
2168 .release = pfm_close,
2169 .flush = pfm_flush
2170};
2171
2172static int
2173pfmfs_delete_dentry(struct dentry *dentry)
2174{
2175 return 1;
2176}
2177
2178static struct dentry_operations pfmfs_dentry_operations = {
2179 .d_delete = pfmfs_delete_dentry,
2180};
2181
2182
2183static int
2184pfm_alloc_fd(struct file **cfile)
2185{
2186 int fd, ret = 0;
2187 struct file *file = NULL;
2188 struct inode * inode;
2189 char name[32];
2190 struct qstr this;
2191
2192 fd = get_unused_fd();
2193 if (fd < 0) return -ENFILE;
2194
2195 ret = -ENFILE;
2196
2197 file = get_empty_filp();
2198 if (!file) goto out;
2199
2200 /*
2201 * allocate a new inode
2202 */
2203 inode = new_inode(pfmfs_mnt->mnt_sb);
2204 if (!inode) goto out;
2205
2206 DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
2207
2208 inode->i_mode = S_IFCHR|S_IRUGO;
2209 inode->i_uid = current->fsuid;
2210 inode->i_gid = current->fsgid;
2211
2212 sprintf(name, "[%lu]", inode->i_ino);
2213 this.name = name;
2214 this.len = strlen(name);
2215 this.hash = inode->i_ino;
2216
2217 ret = -ENOMEM;
2218
2219 /*
2220 * allocate a new dcache entry
2221 */
b66ffad9
JS
2222 file->f_path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
2223 if (!file->f_path.dentry) goto out;
1da177e4 2224
b66ffad9 2225 file->f_path.dentry->d_op = &pfmfs_dentry_operations;
1da177e4 2226
b66ffad9
JS
2227 d_add(file->f_path.dentry, inode);
2228 file->f_path.mnt = mntget(pfmfs_mnt);
1da177e4
LT
2229 file->f_mapping = inode->i_mapping;
2230
2231 file->f_op = &pfm_file_ops;
2232 file->f_mode = FMODE_READ;
2233 file->f_flags = O_RDONLY;
2234 file->f_pos = 0;
2235
2236 /*
2237 * may have to delay until context is attached?
2238 */
2239 fd_install(fd, file);
2240
2241 /*
2242 * the file structure we will use
2243 */
2244 *cfile = file;
2245
2246 return fd;
2247out:
2248 if (file) put_filp(file);
2249 put_unused_fd(fd);
2250 return ret;
2251}
2252
2253static void
2254pfm_free_fd(int fd, struct file *file)
2255{
2256 struct files_struct *files = current->files;
4fb3a538 2257 struct fdtable *fdt;
1da177e4
LT
2258
2259 /*
2260 * there ie no fd_uninstall(), so we do it here
2261 */
2262 spin_lock(&files->file_lock);
4fb3a538 2263 fdt = files_fdtable(files);
badf1662 2264 rcu_assign_pointer(fdt->fd[fd], NULL);
1da177e4
LT
2265 spin_unlock(&files->file_lock);
2266
badf1662
DS
2267 if (file)
2268 put_filp(file);
1da177e4
LT
2269 put_unused_fd(fd);
2270}
2271
2272static int
2273pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
2274{
2275 DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
2276
2277 while (size > 0) {
2278 unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
2279
2280
2281 if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
2282 return -ENOMEM;
2283
2284 addr += PAGE_SIZE;
2285 buf += PAGE_SIZE;
2286 size -= PAGE_SIZE;
2287 }
2288 return 0;
2289}
2290
2291/*
2292 * allocate a sampling buffer and remaps it into the user address space of the task
2293 */
2294static int
41d5e5d7 2295pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
1da177e4
LT
2296{
2297 struct mm_struct *mm = task->mm;
2298 struct vm_area_struct *vma = NULL;
2299 unsigned long size;
2300 void *smpl_buf;
2301
2302
2303 /*
2304 * the fixed header + requested size and align to page boundary
2305 */
2306 size = PAGE_ALIGN(rsize);
2307
2308 DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
2309
2310 /*
2311 * check requested size to avoid Denial-of-service attacks
2312 * XXX: may have to refine this test
2313 * Check against address space limit.
2314 *
2315 * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
2316 * return -ENOMEM;
2317 */
2318 if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
2319 return -ENOMEM;
2320
2321 /*
2322 * We do the easy to undo allocations first.
2323 *
2324 * pfm_rvmalloc(), clears the buffer, so there is no leak
2325 */
2326 smpl_buf = pfm_rvmalloc(size);
2327 if (smpl_buf == NULL) {
2328 DPRINT(("Can't allocate sampling buffer\n"));
2329 return -ENOMEM;
2330 }
2331
2332 DPRINT(("smpl_buf @%p\n", smpl_buf));
2333
2334 /* allocate vma */
c3762229 2335 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1da177e4
LT
2336 if (!vma) {
2337 DPRINT(("Cannot allocate vma\n"));
2338 goto error_kmem;
2339 }
1da177e4
LT
2340
2341 /*
2342 * partially initialize the vma for the sampling buffer
2343 */
2344 vma->vm_mm = mm;
41d5e5d7 2345 vma->vm_file = filp;
1da177e4
LT
2346 vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
2347 vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
2348
2349 /*
2350 * Now we have everything we need and we can initialize
2351 * and connect all the data structures
2352 */
2353
2354 ctx->ctx_smpl_hdr = smpl_buf;
2355 ctx->ctx_smpl_size = size; /* aligned size */
2356
2357 /*
2358 * Let's do the difficult operations next.
2359 *
2360 * now we atomically find some area in the address space and
2361 * remap the buffer in it.
2362 */
2363 down_write(&task->mm->mmap_sem);
2364
2365 /* find some free area in address space, must have mmap sem held */
2366 vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0);
2367 if (vma->vm_start == 0UL) {
2368 DPRINT(("Cannot find unmapped area for size %ld\n", size));
2369 up_write(&task->mm->mmap_sem);
2370 goto error;
2371 }
2372 vma->vm_end = vma->vm_start + size;
2373 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2374
2375 DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
2376
2377 /* can only be applied to current task, need to have the mm semaphore held when called */
2378 if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
2379 DPRINT(("Can't remap buffer\n"));
2380 up_write(&task->mm->mmap_sem);
2381 goto error;
2382 }
2383
41d5e5d7
NP
2384 get_file(filp);
2385
1da177e4
LT
2386 /*
2387 * now insert the vma in the vm list for the process, must be
2388 * done with mmap lock held
2389 */
2390 insert_vm_struct(mm, vma);
2391
2392 mm->total_vm += size >> PAGE_SHIFT;
ab50b8ed
HD
2393 vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
2394 vma_pages(vma));
1da177e4
LT
2395 up_write(&task->mm->mmap_sem);
2396
2397 /*
2398 * keep track of user level virtual address
2399 */
2400 ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
2401 *(unsigned long *)user_vaddr = vma->vm_start;
2402
2403 return 0;
2404
2405error:
2406 kmem_cache_free(vm_area_cachep, vma);
2407error_kmem:
2408 pfm_rvfree(smpl_buf, size);
2409
2410 return -ENOMEM;
2411}
2412
2413/*
2414 * XXX: do something better here
2415 */
2416static int
2417pfm_bad_permissions(struct task_struct *task)
2418{
2419 /* inspired by ptrace_attach() */
2420 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
2421 current->uid,
2422 current->gid,
2423 task->euid,
2424 task->suid,
2425 task->uid,
2426 task->egid,
2427 task->sgid));
2428
2429 return ((current->uid != task->euid)
2430 || (current->uid != task->suid)
2431 || (current->uid != task->uid)
2432 || (current->gid != task->egid)
2433 || (current->gid != task->sgid)
2434 || (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE);
2435}
2436
2437static int
2438pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
2439{
2440 int ctx_flags;
2441
2442 /* valid signal */
2443
2444 ctx_flags = pfx->ctx_flags;
2445
2446 if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
2447
2448 /*
2449 * cannot block in this mode
2450 */
2451 if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
2452 DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
2453 return -EINVAL;
2454 }
2455 } else {
2456 }
2457 /* probably more to add here */
2458
2459 return 0;
2460}
2461
2462static int
41d5e5d7 2463pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
1da177e4
LT
2464 unsigned int cpu, pfarg_context_t *arg)
2465{
2466 pfm_buffer_fmt_t *fmt = NULL;
2467 unsigned long size = 0UL;
2468 void *uaddr = NULL;
2469 void *fmt_arg = NULL;
2470 int ret = 0;
2471#define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
2472
2473 /* invoke and lock buffer format, if found */
2474 fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
2475 if (fmt == NULL) {
19c5870c 2476 DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
1da177e4
LT
2477 return -EINVAL;
2478 }
2479
2480 /*
2481 * buffer argument MUST be contiguous to pfarg_context_t
2482 */
2483 if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
2484
2485 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
2486
19c5870c 2487 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
1da177e4
LT
2488
2489 if (ret) goto error;
2490
2491 /* link buffer format and context */
2492 ctx->ctx_buf_fmt = fmt;
2493
2494 /*
2495 * check if buffer format wants to use perfmon buffer allocation/mapping service
2496 */
2497 ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
2498 if (ret) goto error;
2499
2500 if (size) {
2501 /*
2502 * buffer is always remapped into the caller's address space
2503 */
41d5e5d7 2504 ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
1da177e4
LT
2505 if (ret) goto error;
2506
2507 /* keep track of user address of buffer */
2508 arg->ctx_smpl_vaddr = uaddr;
2509 }
2510 ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
2511
2512error:
2513 return ret;
2514}
2515
2516static void
2517pfm_reset_pmu_state(pfm_context_t *ctx)
2518{
2519 int i;
2520
2521 /*
2522 * install reset values for PMC.
2523 */
2524 for (i=1; PMC_IS_LAST(i) == 0; i++) {
2525 if (PMC_IS_IMPL(i) == 0) continue;
2526 ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
2527 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
2528 }
2529 /*
2530 * PMD registers are set to 0UL when the context in memset()
2531 */
2532
2533 /*
2534 * On context switched restore, we must restore ALL pmc and ALL pmd even
2535 * when they are not actively used by the task. In UP, the incoming process
2536 * may otherwise pick up left over PMC, PMD state from the previous process.
2537 * As opposed to PMD, stale PMC can cause harm to the incoming
2538 * process because they may change what is being measured.
2539 * Therefore, we must systematically reinstall the entire
2540 * PMC state. In SMP, the same thing is possible on the
2541 * same CPU but also on between 2 CPUs.
2542 *
2543 * The problem with PMD is information leaking especially
2544 * to user level when psr.sp=0
2545 *
2546 * There is unfortunately no easy way to avoid this problem
2547 * on either UP or SMP. This definitively slows down the
2548 * pfm_load_regs() function.
2549 */
2550
2551 /*
2552 * bitmask of all PMCs accessible to this context
2553 *
2554 * PMC0 is treated differently.
2555 */
2556 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
2557
2558 /*
72fdbdce 2559 * bitmask of all PMDs that are accessible to this context
1da177e4
LT
2560 */
2561 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
2562
2563 DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
2564
2565 /*
2566 * useful in case of re-enable after disable
2567 */
2568 ctx->ctx_used_ibrs[0] = 0UL;
2569 ctx->ctx_used_dbrs[0] = 0UL;
2570}
2571
2572static int
2573pfm_ctx_getsize(void *arg, size_t *sz)
2574{
2575 pfarg_context_t *req = (pfarg_context_t *)arg;
2576 pfm_buffer_fmt_t *fmt;
2577
2578 *sz = 0;
2579
2580 if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
2581
2582 fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
2583 if (fmt == NULL) {
2584 DPRINT(("cannot find buffer format\n"));
2585 return -EINVAL;
2586 }
2587 /* get just enough to copy in user parameters */
2588 *sz = fmt->fmt_arg_size;
2589 DPRINT(("arg_size=%lu\n", *sz));
2590
2591 return 0;
2592}
2593
2594
2595
2596/*
2597 * cannot attach if :
2598 * - kernel task
2599 * - task not owned by caller
2600 * - task incompatible with context mode
2601 */
2602static int
2603pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2604{
2605 /*
2606 * no kernel task or task not owner by caller
2607 */
2608 if (task->mm == NULL) {
19c5870c 2609 DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
1da177e4
LT
2610 return -EPERM;
2611 }
2612 if (pfm_bad_permissions(task)) {
19c5870c 2613 DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
1da177e4
LT
2614 return -EPERM;
2615 }
2616 /*
2617 * cannot block in self-monitoring mode
2618 */
2619 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
19c5870c 2620 DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
1da177e4
LT
2621 return -EINVAL;
2622 }
2623
2624 if (task->exit_state == EXIT_ZOMBIE) {
19c5870c 2625 DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
1da177e4
LT
2626 return -EBUSY;
2627 }
2628
2629 /*
2630 * always ok for self
2631 */
2632 if (task == current) return 0;
2633
21498223 2634 if (!task_is_stopped_or_traced(task)) {
19c5870c 2635 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
1da177e4
LT
2636 return -EBUSY;
2637 }
2638 /*
2639 * make sure the task is off any CPU
2640 */
2641 wait_task_inactive(task);
2642
2643 /* more to come... */
2644
2645 return 0;
2646}
2647
2648static int
2649pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
2650{
2651 struct task_struct *p = current;
2652 int ret;
2653
2654 /* XXX: need to add more checks here */
2655 if (pid < 2) return -EPERM;
2656
2657 if (pid != current->pid) {
2658
2659 read_lock(&tasklist_lock);
2660
2661 p = find_task_by_pid(pid);
2662
2663 /* make sure task cannot go away while we operate on it */
2664 if (p) get_task_struct(p);
2665
2666 read_unlock(&tasklist_lock);
2667
2668 if (p == NULL) return -ESRCH;
2669 }
2670
2671 ret = pfm_task_incompatible(ctx, p);
2672 if (ret == 0) {
2673 *task = p;
2674 } else if (p != current) {
2675 pfm_put_task(p);
2676 }
2677 return ret;
2678}
2679
2680
2681
2682static int
2683pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2684{
2685 pfarg_context_t *req = (pfarg_context_t *)arg;
2686 struct file *filp;
2687 int ctx_flags;
2688 int ret;
2689
2690 /* let's check the arguments first */
2691 ret = pfarg_is_sane(current, req);
2692 if (ret < 0) return ret;
2693
2694 ctx_flags = req->ctx_flags;
2695
2696 ret = -ENOMEM;
2697
2698 ctx = pfm_context_alloc();
2699 if (!ctx) goto error;
2700
2701 ret = pfm_alloc_fd(&filp);
2702 if (ret < 0) goto error_file;
2703
2704 req->ctx_fd = ctx->ctx_fd = ret;
2705
2706 /*
2707 * attach context to file
2708 */
2709 filp->private_data = ctx;
2710
2711 /*
2712 * does the user want to sample?
2713 */
2714 if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
41d5e5d7 2715 ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
1da177e4
LT
2716 if (ret) goto buffer_error;
2717 }
2718
2719 /*
2720 * init context protection lock
2721 */
2722 spin_lock_init(&ctx->ctx_lock);
2723
2724 /*
2725 * context is unloaded
2726 */
2727 ctx->ctx_state = PFM_CTX_UNLOADED;
2728
2729 /*
2730 * initialization of context's flags
2731 */
2732 ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
2733 ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
2734 ctx->ctx_fl_is_sampling = ctx->ctx_buf_fmt ? 1 : 0; /* assume record() is defined */
2735 ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
2736 /*
2737 * will move to set properties
2738 * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0;
2739 */
2740
2741 /*
2742 * init restart semaphore to locked
2743 */
60f1c444 2744 init_completion(&ctx->ctx_restart_done);
1da177e4
LT
2745
2746 /*
2747 * activation is used in SMP only
2748 */
2749 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
2750 SET_LAST_CPU(ctx, -1);
2751
2752 /*
2753 * initialize notification message queue
2754 */
2755 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
2756 init_waitqueue_head(&ctx->ctx_msgq_wait);
2757 init_waitqueue_head(&ctx->ctx_zombieq);
2758
2759 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n",
2760 ctx,
2761 ctx_flags,
2762 ctx->ctx_fl_system,
2763 ctx->ctx_fl_block,
2764 ctx->ctx_fl_excl_idle,
2765 ctx->ctx_fl_no_msg,
2766 ctx->ctx_fd));
2767
2768 /*
2769 * initialize soft PMU state
2770 */
2771 pfm_reset_pmu_state(ctx);
2772
2773 return 0;
2774
2775buffer_error:
2776 pfm_free_fd(ctx->ctx_fd, filp);
2777
2778 if (ctx->ctx_buf_fmt) {
2779 pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
2780 }
2781error_file:
2782 pfm_context_free(ctx);
2783
2784error:
2785 return ret;
2786}
2787
2788static inline unsigned long
2789pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
2790{
2791 unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
2792 unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
2793 extern unsigned long carta_random32 (unsigned long seed);
2794
2795 if (reg->flags & PFM_REGFL_RANDOM) {
2796 new_seed = carta_random32(old_seed);
2797 val -= (old_seed & mask); /* counter values are negative numbers! */
2798 if ((mask >> 32) != 0)
2799 /* construct a full 64-bit random value: */
2800 new_seed |= carta_random32(old_seed >> 32) << 32;
2801 reg->seed = new_seed;
2802 }
2803 reg->lval = val;
2804 return val;
2805}
2806
2807static void
2808pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2809{
2810 unsigned long mask = ovfl_regs[0];
2811 unsigned long reset_others = 0UL;
2812 unsigned long val;
2813 int i;
2814
2815 /*
2816 * now restore reset value on sampling overflowed counters
2817 */
2818 mask >>= PMU_FIRST_COUNTER;
2819 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2820
2821 if ((mask & 0x1UL) == 0UL) continue;
2822
2823 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2824 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2825
2826 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2827 }
2828
2829 /*
2830 * Now take care of resetting the other registers
2831 */
2832 for(i = 0; reset_others; i++, reset_others >>= 1) {
2833
2834 if ((reset_others & 0x1) == 0) continue;
2835
2836 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2837
2838 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2839 is_long_reset ? "long" : "short", i, val));
2840 }
2841}
2842
2843static void
2844pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2845{
2846 unsigned long mask = ovfl_regs[0];
2847 unsigned long reset_others = 0UL;
2848 unsigned long val;
2849 int i;
2850
2851 DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
2852
2853 if (ctx->ctx_state == PFM_CTX_MASKED) {
2854 pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
2855 return;
2856 }
2857
2858 /*
2859 * now restore reset value on sampling overflowed counters
2860 */
2861 mask >>= PMU_FIRST_COUNTER;
2862 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2863
2864 if ((mask & 0x1UL) == 0UL) continue;
2865
2866 val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2867 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2868
2869 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2870
2871 pfm_write_soft_counter(ctx, i, val);
2872 }
2873
2874 /*
2875 * Now take care of resetting the other registers
2876 */
2877 for(i = 0; reset_others; i++, reset_others >>= 1) {
2878
2879 if ((reset_others & 0x1) == 0) continue;
2880
2881 val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2882
2883 if (PMD_IS_COUNTING(i)) {
2884 pfm_write_soft_counter(ctx, i, val);
2885 } else {
2886 ia64_set_pmd(i, val);
2887 }
2888 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2889 is_long_reset ? "long" : "short", i, val));
2890 }
2891 ia64_srlz_d();
2892}
2893
2894static int
2895pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2896{
1da177e4
LT
2897 struct task_struct *task;
2898 pfarg_reg_t *req = (pfarg_reg_t *)arg;
2899 unsigned long value, pmc_pm;
2900 unsigned long smpl_pmds, reset_pmds, impl_pmds;
2901 unsigned int cnum, reg_flags, flags, pmc_type;
2902 int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
2903 int is_monitor, is_counting, state;
2904 int ret = -EINVAL;
2905 pfm_reg_check_t wr_func;
2906#define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
2907
2908 state = ctx->ctx_state;
2909 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
2910 is_system = ctx->ctx_fl_system;
2911 task = ctx->ctx_task;
2912 impl_pmds = pmu_conf->impl_pmds[0];
2913
2914 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
2915
2916 if (is_loaded) {
1da177e4
LT
2917 /*
2918 * In system wide and when the context is loaded, access can only happen
2919 * when the caller is running on the CPU being monitored by the session.
2920 * It does not have to be the owner (ctx_task) of the context per se.
2921 */
2922 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
2923 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
2924 return -EBUSY;
2925 }
2926 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
2927 }
2928 expert_mode = pfm_sysctl.expert_mode;
2929
2930 for (i = 0; i < count; i++, req++) {
2931
2932 cnum = req->reg_num;
2933 reg_flags = req->reg_flags;
2934 value = req->reg_value;
2935 smpl_pmds = req->reg_smpl_pmds[0];
2936 reset_pmds = req->reg_reset_pmds[0];
2937 flags = 0;
2938
2939
2940 if (cnum >= PMU_MAX_PMCS) {
2941 DPRINT(("pmc%u is invalid\n", cnum));
2942 goto error;
2943 }
2944
2945 pmc_type = pmu_conf->pmc_desc[cnum].type;
2946 pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
2947 is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
2948 is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
2949
2950 /*
2951 * we reject all non implemented PMC as well
2952 * as attempts to modify PMC[0-3] which are used
2953 * as status registers by the PMU
2954 */
2955 if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
2956 DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
2957 goto error;
2958 }
2959 wr_func = pmu_conf->pmc_desc[cnum].write_check;
2960 /*
2961 * If the PMC is a monitor, then if the value is not the default:
2962 * - system-wide session: PMCx.pm=1 (privileged monitor)
2963 * - per-task : PMCx.pm=0 (user monitor)
2964 */
2965 if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
2966 DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
2967 cnum,
2968 pmc_pm,
2969 is_system));
2970 goto error;
2971 }
2972
2973 if (is_counting) {
2974 /*
2975 * enforce generation of overflow interrupt. Necessary on all
2976 * CPUs.
2977 */
2978 value |= 1 << PMU_PMC_OI;
2979
2980 if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
2981 flags |= PFM_REGFL_OVFL_NOTIFY;
2982 }
2983
2984 if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
2985
2986 /* verify validity of smpl_pmds */
2987 if ((smpl_pmds & impl_pmds) != smpl_pmds) {
2988 DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
2989 goto error;
2990 }
2991
2992 /* verify validity of reset_pmds */
2993 if ((reset_pmds & impl_pmds) != reset_pmds) {
2994 DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
2995 goto error;
2996 }
2997 } else {
2998 if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
2999 DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
3000 goto error;
3001 }
3002 /* eventid on non-counting monitors are ignored */
3003 }
3004
3005 /*
3006 * execute write checker, if any
3007 */
3008 if (likely(expert_mode == 0 && wr_func)) {
3009 ret = (*wr_func)(task, ctx, cnum, &value, regs);
3010 if (ret) goto error;
3011 ret = -EINVAL;
3012 }
3013
3014 /*
3015 * no error on this register
3016 */
3017 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
3018
3019 /*
3020 * Now we commit the changes to the software state
3021 */
3022
3023 /*
3024 * update overflow information
3025 */
3026 if (is_counting) {
3027 /*
3028 * full flag update each time a register is programmed
3029 */
3030 ctx->ctx_pmds[cnum].flags = flags;
3031
3032 ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
3033 ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
3034 ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
3035
3036 /*
3037 * Mark all PMDS to be accessed as used.
3038 *
3039 * We do not keep track of PMC because we have to
3040 * systematically restore ALL of them.
3041 *
3042 * We do not update the used_monitors mask, because
3043 * if we have not programmed them, then will be in
3044 * a quiescent state, therefore we will not need to
3045 * mask/restore then when context is MASKED.
3046 */
3047 CTX_USED_PMD(ctx, reset_pmds);
3048 CTX_USED_PMD(ctx, smpl_pmds);
3049 /*
3050 * make sure we do not try to reset on
3051 * restart because we have established new values
3052 */
3053 if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3054 }
3055 /*
3056 * Needed in case the user does not initialize the equivalent
3057 * PMD. Clearing is done indirectly via pfm_reset_pmu_state() so there is no
3058 * possible leak here.
3059 */
3060 CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
3061
3062 /*
3063 * keep track of the monitor PMC that we are using.
3064 * we save the value of the pmc in ctx_pmcs[] and if
3065 * the monitoring is not stopped for the context we also
3066 * place it in the saved state area so that it will be
3067 * picked up later by the context switch code.
3068 *
3069 * The value in ctx_pmcs[] can only be changed in pfm_write_pmcs().
3070 *
35589a8f 3071 * The value in th_pmcs[] may be modified on overflow, i.e., when
1da177e4
LT
3072 * monitoring needs to be stopped.
3073 */
3074 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
3075
3076 /*
3077 * update context state
3078 */
3079 ctx->ctx_pmcs[cnum] = value;
3080
3081 if (is_loaded) {
3082 /*
3083 * write thread state
3084 */
35589a8f 3085 if (is_system == 0) ctx->th_pmcs[cnum] = value;
1da177e4
LT
3086
3087 /*
3088 * write hardware register if we can
3089 */
3090 if (can_access_pmu) {
3091 ia64_set_pmc(cnum, value);
3092 }
3093#ifdef CONFIG_SMP
3094 else {
3095 /*
3096 * per-task SMP only here
3097 *
3098 * we are guaranteed that the task is not running on the other CPU,
3099 * we indicate that this PMD will need to be reloaded if the task
3100 * is rescheduled on the CPU it ran last on.
3101 */
3102 ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
3103 }
3104#endif
3105 }
3106
3107 DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
3108 cnum,
3109 value,
3110 is_loaded,
3111 can_access_pmu,
3112 flags,
3113 ctx->ctx_all_pmcs[0],
3114 ctx->ctx_used_pmds[0],
3115 ctx->ctx_pmds[cnum].eventid,
3116 smpl_pmds,
3117 reset_pmds,
3118 ctx->ctx_reload_pmcs[0],
3119 ctx->ctx_used_monitors[0],
3120 ctx->ctx_ovfl_regs[0]));
3121 }
3122
3123 /*
3124 * make sure the changes are visible
3125 */
3126 if (can_access_pmu) ia64_srlz_d();
3127
3128 return 0;
3129error:
3130 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3131 return ret;
3132}
3133
3134static int
3135pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3136{
1da177e4
LT
3137 struct task_struct *task;
3138 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3139 unsigned long value, hw_value, ovfl_mask;
3140 unsigned int cnum;
3141 int i, can_access_pmu = 0, state;
3142 int is_counting, is_loaded, is_system, expert_mode;
3143 int ret = -EINVAL;
3144 pfm_reg_check_t wr_func;
3145
3146
3147 state = ctx->ctx_state;
3148 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3149 is_system = ctx->ctx_fl_system;
3150 ovfl_mask = pmu_conf->ovfl_val;
3151 task = ctx->ctx_task;
3152
3153 if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
3154
3155 /*
3156 * on both UP and SMP, we can only write to the PMC when the task is
3157 * the owner of the local PMU.
3158 */
3159 if (likely(is_loaded)) {
1da177e4
LT
3160 /*
3161 * In system wide and when the context is loaded, access can only happen
3162 * when the caller is running on the CPU being monitored by the session.
3163 * It does not have to be the owner (ctx_task) of the context per se.
3164 */
3165 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3166 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3167 return -EBUSY;
3168 }
3169 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3170 }
3171 expert_mode = pfm_sysctl.expert_mode;
3172
3173 for (i = 0; i < count; i++, req++) {
3174
3175 cnum = req->reg_num;
3176 value = req->reg_value;
3177
3178 if (!PMD_IS_IMPL(cnum)) {
3179 DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
3180 goto abort_mission;
3181 }
3182 is_counting = PMD_IS_COUNTING(cnum);
3183 wr_func = pmu_conf->pmd_desc[cnum].write_check;
3184
3185 /*
3186 * execute write checker, if any
3187 */
3188 if (unlikely(expert_mode == 0 && wr_func)) {
3189 unsigned long v = value;
3190
3191 ret = (*wr_func)(task, ctx, cnum, &v, regs);
3192 if (ret) goto abort_mission;
3193
3194 value = v;
3195 ret = -EINVAL;
3196 }
3197
3198 /*
3199 * no error on this register
3200 */
3201 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
3202
3203 /*
3204 * now commit changes to software state
3205 */
3206 hw_value = value;
3207
3208 /*
3209 * update virtualized (64bits) counter
3210 */
3211 if (is_counting) {
3212 /*
3213 * write context state
3214 */
3215 ctx->ctx_pmds[cnum].lval = value;
3216
3217 /*
3218 * when context is load we use the split value
3219 */
3220 if (is_loaded) {
3221 hw_value = value & ovfl_mask;
3222 value = value & ~ovfl_mask;
3223 }
3224 }
3225 /*
3226 * update reset values (not just for counters)
3227 */
3228 ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
3229 ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
3230
3231 /*
3232 * update randomization parameters (not just for counters)
3233 */
3234 ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
3235 ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
3236
3237 /*
3238 * update context value
3239 */
3240 ctx->ctx_pmds[cnum].val = value;
3241
3242 /*
3243 * Keep track of what we use
3244 *
3245 * We do not keep track of PMC because we have to
3246 * systematically restore ALL of them.
3247 */
3248 CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
3249
3250 /*
3251 * mark this PMD register used as well
3252 */
3253 CTX_USED_PMD(ctx, RDEP(cnum));
3254
3255 /*
3256 * make sure we do not try to reset on
3257 * restart because we have established new values
3258 */
3259 if (is_counting && state == PFM_CTX_MASKED) {
3260 ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3261 }
3262
3263 if (is_loaded) {
3264 /*
3265 * write thread state
3266 */
35589a8f 3267 if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
1da177e4
LT
3268
3269 /*
3270 * write hardware register if we can
3271 */
3272 if (can_access_pmu) {
3273 ia64_set_pmd(cnum, hw_value);
3274 } else {
3275#ifdef CONFIG_SMP
3276 /*
3277 * we are guaranteed that the task is not running on the other CPU,
3278 * we indicate that this PMD will need to be reloaded if the task
3279 * is rescheduled on the CPU it ran last on.
3280 */
3281 ctx->ctx_reload_pmds[0] |= 1UL << cnum;
3282#endif
3283 }
3284 }
3285
3286 DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
3287 "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
3288 cnum,
3289 value,
3290 is_loaded,
3291 can_access_pmu,
3292 hw_value,
3293 ctx->ctx_pmds[cnum].val,
3294 ctx->ctx_pmds[cnum].short_reset,
3295 ctx->ctx_pmds[cnum].long_reset,
3296 PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
3297 ctx->ctx_pmds[cnum].seed,
3298 ctx->ctx_pmds[cnum].mask,
3299 ctx->ctx_used_pmds[0],
3300 ctx->ctx_pmds[cnum].reset_pmds[0],
3301 ctx->ctx_reload_pmds[0],
3302 ctx->ctx_all_pmds[0],
3303 ctx->ctx_ovfl_regs[0]));
3304 }
3305
3306 /*
3307 * make changes visible
3308 */
3309 if (can_access_pmu) ia64_srlz_d();
3310
3311 return 0;
3312
3313abort_mission:
3314 /*
3315 * for now, we have only one possibility for error
3316 */
3317 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3318 return ret;
3319}
3320
3321/*
3322 * By the way of PROTECT_CONTEXT(), interrupts are masked while we are in this function.
3323 * Therefore we know, we do not have to worry about the PMU overflow interrupt. If an
3324 * interrupt is delivered during the call, it will be kept pending until we leave, making
3325 * it appears as if it had been generated at the UNPROTECT_CONTEXT(). At least we are
3326 * guaranteed to return consistent data to the user, it may simply be old. It is not
3327 * trivial to treat the overflow while inside the call because you may end up in
3328 * some module sampling buffer code causing deadlocks.
3329 */
3330static int
3331pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3332{
1da177e4
LT
3333 struct task_struct *task;
3334 unsigned long val = 0UL, lval, ovfl_mask, sval;
3335 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3336 unsigned int cnum, reg_flags = 0;
3337 int i, can_access_pmu = 0, state;
3338 int is_loaded, is_system, is_counting, expert_mode;
3339 int ret = -EINVAL;
3340 pfm_reg_check_t rd_func;
3341
3342 /*
3343 * access is possible when loaded only for
3344 * self-monitoring tasks or in UP mode
3345 */
3346
3347 state = ctx->ctx_state;
3348 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3349 is_system = ctx->ctx_fl_system;
3350 ovfl_mask = pmu_conf->ovfl_val;
3351 task = ctx->ctx_task;
3352
3353 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3354
3355 if (likely(is_loaded)) {
1da177e4
LT
3356 /*
3357 * In system wide and when the context is loaded, access can only happen
3358 * when the caller is running on the CPU being monitored by the session.
3359 * It does not have to be the owner (ctx_task) of the context per se.
3360 */
3361 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3362 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3363 return -EBUSY;
3364 }
3365 /*
3366 * this can be true when not self-monitoring only in UP
3367 */
3368 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3369
3370 if (can_access_pmu) ia64_srlz_d();
3371 }
3372 expert_mode = pfm_sysctl.expert_mode;
3373
3374 DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
3375 is_loaded,
3376 can_access_pmu,
3377 state));
3378
3379 /*
3380 * on both UP and SMP, we can only read the PMD from the hardware register when
3381 * the task is the owner of the local PMU.
3382 */
3383
3384 for (i = 0; i < count; i++, req++) {
3385
3386 cnum = req->reg_num;
3387 reg_flags = req->reg_flags;
3388
3389 if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
3390 /*
3391 * we can only read the register that we use. That includes
72fdbdce 3392 * the one we explicitly initialize AND the one we want included
1da177e4
LT
3393 * in the sampling buffer (smpl_regs).
3394 *
3395 * Having this restriction allows optimization in the ctxsw routine
3396 * without compromising security (leaks)
3397 */
3398 if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
3399
3400 sval = ctx->ctx_pmds[cnum].val;
3401 lval = ctx->ctx_pmds[cnum].lval;
3402 is_counting = PMD_IS_COUNTING(cnum);
3403
3404 /*
3405 * If the task is not the current one, then we check if the
3406 * PMU state is still in the local live register due to lazy ctxsw.
3407 * If true, then we read directly from the registers.
3408 */
3409 if (can_access_pmu){
3410 val = ia64_get_pmd(cnum);
3411 } else {
3412 /*
3413 * context has been saved
3414 * if context is zombie, then task does not exist anymore.
3415 * In this case, we use the full value saved in the context (pfm_flush_regs()).
3416 */
35589a8f 3417 val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
1da177e4
LT
3418 }
3419 rd_func = pmu_conf->pmd_desc[cnum].read_check;
3420
3421 if (is_counting) {
3422 /*
3423 * XXX: need to check for overflow when loaded
3424 */
3425 val &= ovfl_mask;
3426 val += sval;
3427 }
3428
3429 /*
3430 * execute read checker, if any
3431 */
3432 if (unlikely(expert_mode == 0 && rd_func)) {
3433 unsigned long v = val;
3434 ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
3435 if (ret) goto error;
3436 val = v;
3437 ret = -EINVAL;
3438 }
3439
3440 PFM_REG_RETFLAG_SET(reg_flags, 0);
3441
3442 DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
3443
3444 /*
3445 * update register return value, abort all if problem during copy.
3446 * we only modify the reg_flags field. no check mode is fine because
3447 * access has been verified upfront in sys_perfmonctl().
3448 */
3449 req->reg_value = val;
3450 req->reg_flags = reg_flags;
3451 req->reg_last_reset_val = lval;
3452 }
3453
3454 return 0;
3455
3456error:
3457 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3458 return ret;
3459}
3460
3461int
3462pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3463{
3464 pfm_context_t *ctx;
3465
3466 if (req == NULL) return -EINVAL;
3467
3468 ctx = GET_PMU_CTX();
3469
3470 if (ctx == NULL) return -EINVAL;
3471
3472 /*
3473 * for now limit to current task, which is enough when calling
3474 * from overflow handler
3475 */
3476 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3477
3478 return pfm_write_pmcs(ctx, req, nreq, regs);
3479}
3480EXPORT_SYMBOL(pfm_mod_write_pmcs);
3481
3482int
3483pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3484{
3485 pfm_context_t *ctx;
3486
3487 if (req == NULL) return -EINVAL;
3488
3489 ctx = GET_PMU_CTX();
3490
3491 if (ctx == NULL) return -EINVAL;
3492
3493 /*
3494 * for now limit to current task, which is enough when calling
3495 * from overflow handler
3496 */
3497 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3498
3499 return pfm_read_pmds(ctx, req, nreq, regs);
3500}
3501EXPORT_SYMBOL(pfm_mod_read_pmds);
3502
3503/*
3504 * Only call this function when a process it trying to
3505 * write the debug registers (reading is always allowed)
3506 */
3507int
3508pfm_use_debug_registers(struct task_struct *task)
3509{
3510 pfm_context_t *ctx = task->thread.pfm_context;
3511 unsigned long flags;
3512 int ret = 0;
3513
3514 if (pmu_conf->use_rr_dbregs == 0) return 0;
3515
19c5870c 3516 DPRINT(("called for [%d]\n", task_pid_nr(task)));
1da177e4
LT
3517
3518 /*
3519 * do it only once
3520 */
3521 if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
3522
3523 /*
3524 * Even on SMP, we do not need to use an atomic here because
3525 * the only way in is via ptrace() and this is possible only when the
3526 * process is stopped. Even in the case where the ctxsw out is not totally
3527 * completed by the time we come here, there is no way the 'stopped' process
3528 * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine.
3529 * So this is always safe.
3530 */
3531 if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
3532
3533 LOCK_PFS(flags);
3534
3535 /*
3536 * We cannot allow setting breakpoints when system wide monitoring
3537 * sessions are using the debug registers.
3538 */
3539 if (pfm_sessions.pfs_sys_use_dbregs> 0)
3540 ret = -1;
3541 else
3542 pfm_sessions.pfs_ptrace_use_dbregs++;
3543
3544 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3545 pfm_sessions.pfs_ptrace_use_dbregs,
3546 pfm_sessions.pfs_sys_use_dbregs,
19c5870c 3547 task_pid_nr(task), ret));
1da177e4
LT
3548
3549 UNLOCK_PFS(flags);
3550
3551 return ret;
3552}
3553
3554/*
3555 * This function is called for every task that exits with the
3556 * IA64_THREAD_DBG_VALID set. This indicates a task which was
3557 * able to use the debug registers for debugging purposes via
3558 * ptrace(). Therefore we know it was not using them for
3559 * perfmormance monitoring, so we only decrement the number
3560 * of "ptraced" debug register users to keep the count up to date
3561 */
3562int
3563pfm_release_debug_registers(struct task_struct *task)
3564{
3565 unsigned long flags;
3566 int ret;
3567
3568 if (pmu_conf->use_rr_dbregs == 0) return 0;
3569
3570 LOCK_PFS(flags);
3571 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
19c5870c 3572 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
1da177e4
LT
3573 ret = -1;
3574 } else {
3575 pfm_sessions.pfs_ptrace_use_dbregs--;
3576 ret = 0;
3577 }
3578 UNLOCK_PFS(flags);
3579
3580 return ret;
3581}
3582
3583static int
3584pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3585{
3586 struct task_struct *task;
3587 pfm_buffer_fmt_t *fmt;
3588 pfm_ovfl_ctrl_t rst_ctrl;
3589 int state, is_system;
3590 int ret = 0;
3591
3592 state = ctx->ctx_state;
3593 fmt = ctx->ctx_buf_fmt;
3594 is_system = ctx->ctx_fl_system;
3595 task = PFM_CTX_TASK(ctx);
3596
3597 switch(state) {
3598 case PFM_CTX_MASKED:
3599 break;
3600 case PFM_CTX_LOADED:
3601 if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
3602 /* fall through */
3603 case PFM_CTX_UNLOADED:
3604 case PFM_CTX_ZOMBIE:
3605 DPRINT(("invalid state=%d\n", state));
3606 return -EBUSY;
3607 default:
3608 DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
3609 return -EINVAL;
3610 }
3611
3612 /*
3613 * In system wide and when the context is loaded, access can only happen
3614 * when the caller is running on the CPU being monitored by the session.
3615 * It does not have to be the owner (ctx_task) of the context per se.
3616 */
3617 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3618 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3619 return -EBUSY;
3620 }
3621
3622 /* sanity check */
3623 if (unlikely(task == NULL)) {
19c5870c 3624 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
1da177e4
LT
3625 return -EINVAL;
3626 }
3627
3628 if (task == current || is_system) {
3629
3630 fmt = ctx->ctx_buf_fmt;
3631
3632 DPRINT(("restarting self %d ovfl=0x%lx\n",
19c5870c 3633 task_pid_nr(task),
1da177e4
LT
3634 ctx->ctx_ovfl_regs[0]));
3635
3636 if (CTX_HAS_SMPL(ctx)) {
3637
3638 prefetch(ctx->ctx_smpl_hdr);
3639
3640 rst_ctrl.bits.mask_monitoring = 0;
3641 rst_ctrl.bits.reset_ovfl_pmds = 0;
3642
3643 if (state == PFM_CTX_LOADED)
3644 ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3645 else
3646 ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3647 } else {
3648 rst_ctrl.bits.mask_monitoring = 0;
3649 rst_ctrl.bits.reset_ovfl_pmds = 1;
3650 }
3651
3652 if (ret == 0) {
3653 if (rst_ctrl.bits.reset_ovfl_pmds)
3654 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
3655
3656 if (rst_ctrl.bits.mask_monitoring == 0) {
19c5870c 3657 DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
1da177e4
LT
3658
3659 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
3660 } else {
19c5870c 3661 DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
1da177e4
LT
3662
3663 // cannot use pfm_stop_monitoring(task, regs);
3664 }
3665 }
3666 /*
3667 * clear overflowed PMD mask to remove any stale information
3668 */
3669 ctx->ctx_ovfl_regs[0] = 0UL;
3670
3671 /*
3672 * back to LOADED state
3673 */
3674 ctx->ctx_state = PFM_CTX_LOADED;
3675
3676 /*
3677 * XXX: not really useful for self monitoring
3678 */
3679 ctx->ctx_fl_can_restart = 0;
3680
3681 return 0;
3682 }
3683
3684 /*
3685 * restart another task
3686 */
3687
3688 /*
3689 * When PFM_CTX_MASKED, we cannot issue a restart before the previous
3690 * one is seen by the task.
3691 */
3692 if (state == PFM_CTX_MASKED) {
3693 if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
3694 /*
3695 * will prevent subsequent restart before this one is
3696 * seen by other task
3697 */
3698 ctx->ctx_fl_can_restart = 0;
3699 }
3700
3701 /*
3702 * if blocking, then post the semaphore is PFM_CTX_MASKED, i.e.
3703 * the task is blocked or on its way to block. That's the normal
3704 * restart path. If the monitoring is not masked, then the task
3705 * can be actively monitoring and we cannot directly intervene.
3706 * Therefore we use the trap mechanism to catch the task and
3707 * force it to reset the buffer/reset PMDs.
3708 *
3709 * if non-blocking, then we ensure that the task will go into
3710 * pfm_handle_work() before returning to user mode.
3711 *
72fdbdce 3712 * We cannot explicitly reset another task, it MUST always
1da177e4
LT
3713 * be done by the task itself. This works for system wide because
3714 * the tool that is controlling the session is logically doing
3715 * "self-monitoring".
3716 */
3717 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
19c5870c 3718 DPRINT(("unblocking [%d] \n", task_pid_nr(task)));
60f1c444 3719 complete(&ctx->ctx_restart_done);
1da177e4 3720 } else {
19c5870c 3721 DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
1da177e4
LT
3722
3723 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
3724
3725 PFM_SET_WORK_PENDING(task, 1);
3726
3727 pfm_set_task_notify(task);
3728
3729 /*
3730 * XXX: send reschedule if task runs on another CPU
3731 */
3732 }
3733 return 0;
3734}
3735
3736static int
3737pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3738{
3739 unsigned int m = *(unsigned int *)arg;
3740
3741 pfm_sysctl.debug = m == 0 ? 0 : 1;
3742
1da177e4
LT
3743 printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
3744
3745 if (m == 0) {
3746 memset(pfm_stats, 0, sizeof(pfm_stats));
3747 for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
3748 }
3749 return 0;
3750}
3751
3752/*
3753 * arg can be NULL and count can be zero for this function
3754 */
3755static int
3756pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3757{
3758 struct thread_struct *thread = NULL;
3759 struct task_struct *task;
3760 pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
3761 unsigned long flags;
3762 dbreg_t dbreg;
3763 unsigned int rnum;
3764 int first_time;
3765 int ret = 0, state;
3766 int i, can_access_pmu = 0;
3767 int is_system, is_loaded;
3768
3769 if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
3770
3771 state = ctx->ctx_state;
3772 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3773 is_system = ctx->ctx_fl_system;
3774 task = ctx->ctx_task;
3775
3776 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3777
3778 /*
3779 * on both UP and SMP, we can only write to the PMC when the task is
3780 * the owner of the local PMU.
3781 */
3782 if (is_loaded) {
3783 thread = &task->thread;
3784 /*
3785 * In system wide and when the context is loaded, access can only happen
3786 * when the caller is running on the CPU being monitored by the session.
3787 * It does not have to be the owner (ctx_task) of the context per se.
3788 */
3789 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3790 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3791 return -EBUSY;
3792 }
3793 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3794 }
3795
3796 /*
3797 * we do not need to check for ipsr.db because we do clear ibr.x, dbr.r, and dbr.w
3798 * ensuring that no real breakpoint can be installed via this call.
3799 *
3800 * IMPORTANT: regs can be NULL in this function
3801 */
3802
3803 first_time = ctx->ctx_fl_using_dbreg == 0;
3804
3805 /*
3806 * don't bother if we are loaded and task is being debugged
3807 */
3808 if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
19c5870c 3809 DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
1da177e4
LT
3810 return -EBUSY;
3811 }
3812
3813 /*
3814 * check for debug registers in system wide mode
3815 *
3816 * If though a check is done in pfm_context_load(),
3817 * we must repeat it here, in case the registers are
3818 * written after the context is loaded
3819 */
3820 if (is_loaded) {
3821 LOCK_PFS(flags);
3822
3823 if (first_time && is_system) {
3824 if (pfm_sessions.pfs_ptrace_use_dbregs)
3825 ret = -EBUSY;
3826 else
3827 pfm_sessions.pfs_sys_use_dbregs++;
3828 }
3829 UNLOCK_PFS(flags);
3830 }
3831
3832 if (ret != 0) return ret;
3833
3834 /*
3835 * mark ourself as user of the debug registers for
3836 * perfmon purposes.
3837 */
3838 ctx->ctx_fl_using_dbreg = 1;
3839
3840 /*
3841 * clear hardware registers to make sure we don't
3842 * pick up stale state.
3843 *
3844 * for a system wide session, we do not use
3845 * thread.dbr, thread.ibr because this process
3846 * never leaves the current CPU and the state
3847 * is shared by all processes running on it
3848 */
3849 if (first_time && can_access_pmu) {
19c5870c 3850 DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
1da177e4
LT
3851 for (i=0; i < pmu_conf->num_ibrs; i++) {
3852 ia64_set_ibr(i, 0UL);
3853 ia64_dv_serialize_instruction();
3854 }
3855 ia64_srlz_i();
3856 for (i=0; i < pmu_conf->num_dbrs; i++) {
3857 ia64_set_dbr(i, 0UL);
3858 ia64_dv_serialize_data();
3859 }
3860 ia64_srlz_d();
3861 }
3862
3863 /*
3864 * Now install the values into the registers
3865 */
3866 for (i = 0; i < count; i++, req++) {
3867
3868 rnum = req->dbreg_num;
3869 dbreg.val = req->dbreg_value;
3870
3871 ret = -EINVAL;
3872
3873 if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
3874 DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
3875 rnum, dbreg.val, mode, i, count));
3876
3877 goto abort_mission;
3878 }
3879
3880 /*
3881 * make sure we do not install enabled breakpoint
3882 */
3883 if (rnum & 0x1) {
3884 if (mode == PFM_CODE_RR)
3885 dbreg.ibr.ibr_x = 0;
3886 else
3887 dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
3888 }
3889
3890 PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
3891
3892 /*
3893 * Debug registers, just like PMC, can only be modified
3894 * by a kernel call. Moreover, perfmon() access to those
3895 * registers are centralized in this routine. The hardware
3896 * does not modify the value of these registers, therefore,
3897 * if we save them as they are written, we can avoid having
3898 * to save them on context switch out. This is made possible
3899 * by the fact that when perfmon uses debug registers, ptrace()
3900 * won't be able to modify them concurrently.
3901 */
3902 if (mode == PFM_CODE_RR) {
3903 CTX_USED_IBR(ctx, rnum);
3904
3905 if (can_access_pmu) {
3906 ia64_set_ibr(rnum, dbreg.val);
3907 ia64_dv_serialize_instruction();
3908 }
3909
3910 ctx->ctx_ibrs[rnum] = dbreg.val;
3911
3912 DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
3913 rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
3914 } else {
3915 CTX_USED_DBR(ctx, rnum);
3916
3917 if (can_access_pmu) {
3918 ia64_set_dbr(rnum, dbreg.val);
3919 ia64_dv_serialize_data();
3920 }
3921 ctx->ctx_dbrs[rnum] = dbreg.val;
3922
3923 DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
3924 rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
3925 }
3926 }
3927
3928 return 0;
3929
3930abort_mission:
3931 /*
3932 * in case it was our first attempt, we undo the global modifications
3933 */
3934 if (first_time) {
3935 LOCK_PFS(flags);
3936 if (ctx->ctx_fl_system) {
3937 pfm_sessions.pfs_sys_use_dbregs--;
3938 }
3939 UNLOCK_PFS(flags);
3940 ctx->ctx_fl_using_dbreg = 0;
3941 }
3942 /*
3943 * install error return flag
3944 */
3945 PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
3946
3947 return ret;
3948}
3949
3950static int
3951pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3952{
3953 return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
3954}
3955
3956static int
3957pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3958{
3959 return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
3960}
3961
3962int
3963pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3964{
3965 pfm_context_t *ctx;
3966
3967 if (req == NULL) return -EINVAL;
3968
3969 ctx = GET_PMU_CTX();
3970
3971 if (ctx == NULL) return -EINVAL;
3972
3973 /*
3974 * for now limit to current task, which is enough when calling
3975 * from overflow handler
3976 */
3977 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3978
3979 return pfm_write_ibrs(ctx, req, nreq, regs);
3980}
3981EXPORT_SYMBOL(pfm_mod_write_ibrs);
3982
3983int
3984pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3985{
3986 pfm_context_t *ctx;
3987
3988 if (req == NULL) return -EINVAL;
3989
3990 ctx = GET_PMU_CTX();
3991
3992 if (ctx == NULL) return -EINVAL;
3993
3994 /*
3995 * for now limit to current task, which is enough when calling
3996 * from overflow handler
3997 */
3998 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3999
4000 return pfm_write_dbrs(ctx, req, nreq, regs);
4001}
4002EXPORT_SYMBOL(pfm_mod_write_dbrs);
4003
4004
4005static int
4006pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4007{
4008 pfarg_features_t *req = (pfarg_features_t *)arg;
4009
4010 req->ft_version = PFM_VERSION;
4011 return 0;
4012}
4013
4014static int
4015pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4016{
4017 struct pt_regs *tregs;
4018 struct task_struct *task = PFM_CTX_TASK(ctx);
4019 int state, is_system;
4020
4021 state = ctx->ctx_state;
4022 is_system = ctx->ctx_fl_system;
4023
4024 /*
4025 * context must be attached to issue the stop command (includes LOADED,MASKED,ZOMBIE)
4026 */
4027 if (state == PFM_CTX_UNLOADED) return -EINVAL;
4028
4029 /*
4030 * In system wide and when the context is loaded, access can only happen
4031 * when the caller is running on the CPU being monitored by the session.
4032 * It does not have to be the owner (ctx_task) of the context per se.
4033 */
4034 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
4035 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
4036 return -EBUSY;
4037 }
4038 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
19c5870c 4039 task_pid_nr(PFM_CTX_TASK(ctx)),
1da177e4
LT
4040 state,
4041 is_system));
4042 /*
4043 * in system mode, we need to update the PMU directly
4044 * and the user level state of the caller, which may not
4045 * necessarily be the creator of the context.
4046 */
4047 if (is_system) {
4048 /*
4049 * Update local PMU first
4050 *
4051 * disable dcr pp
4052 */
4053 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
4054 ia64_srlz_i();
4055
4056 /*
4057 * update local cpuinfo
4058 */
4059 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4060
4061 /*
4062 * stop monitoring, does srlz.i
4063 */
4064 pfm_clear_psr_pp();
4065
4066 /*
4067 * stop monitoring in the caller
4068 */
4069 ia64_psr(regs)->pp = 0;
4070
4071 return 0;
4072 }
4073 /*
4074 * per-task mode
4075 */
4076
4077 if (task == current) {
4078 /* stop monitoring at kernel level */
4079 pfm_clear_psr_up();
4080
4081 /*
4082 * stop monitoring at the user level
4083 */
4084 ia64_psr(regs)->up = 0;
4085 } else {
6450578f 4086 tregs = task_pt_regs(task);
1da177e4
LT
4087
4088 /*
4089 * stop monitoring at the user level
4090 */
4091 ia64_psr(tregs)->up = 0;
4092
4093 /*
4094 * monitoring disabled in kernel at next reschedule
4095 */
4096 ctx->ctx_saved_psr_up = 0;
19c5870c 4097 DPRINT(("task=[%d]\n", task_pid_nr(task)));
1da177e4
LT
4098 }
4099 return 0;
4100}
4101
4102
4103static int
4104pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4105{
4106 struct pt_regs *tregs;
4107 int state, is_system;
4108
4109 state = ctx->ctx_state;
4110 is_system = ctx->ctx_fl_system;
4111
4112 if (state != PFM_CTX_LOADED) return -EINVAL;
4113
4114 /*
4115 * In system wide and when the context is loaded, access can only happen
4116 * when the caller is running on the CPU being monitored by the session.
4117 * It does not have to be the owner (ctx_task) of the context per se.
4118 */
4119 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
4120 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
4121 return -EBUSY;
4122 }
4123
4124 /*
4125 * in system mode, we need to update the PMU directly
4126 * and the user level state of the caller, which may not
4127 * necessarily be the creator of the context.
4128 */
4129 if (is_system) {
4130
4131 /*
4132 * set user level psr.pp for the caller
4133 */
4134 ia64_psr(regs)->pp = 1;
4135
4136 /*
4137 * now update the local PMU and cpuinfo
4138 */
4139 PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
4140
4141 /*
4142 * start monitoring at kernel level
4143 */
4144 pfm_set_psr_pp();
4145
4146 /* enable dcr pp */
4147 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
4148 ia64_srlz_i();
4149
4150 return 0;
4151 }
4152
4153 /*
4154 * per-process mode
4155 */
4156
4157 if (ctx->ctx_task == current) {
4158
4159 /* start monitoring at kernel level */
4160 pfm_set_psr_up();
4161
4162 /*
4163 * activate monitoring at user level
4164 */
4165 ia64_psr(regs)->up = 1;
4166
4167 } else {
6450578f 4168 tregs = task_pt_regs(ctx->ctx_task);
1da177e4
LT
4169
4170 /*
4171 * start monitoring at the kernel level the next
4172 * time the task is scheduled
4173 */
4174 ctx->ctx_saved_psr_up = IA64_PSR_UP;
4175
4176 /*
4177 * activate monitoring at user level
4178 */
4179 ia64_psr(tregs)->up = 1;
4180 }
4181 return 0;
4182}
4183
4184static int
4185pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4186{
4187 pfarg_reg_t *req = (pfarg_reg_t *)arg;
4188 unsigned int cnum;
4189 int i;
4190 int ret = -EINVAL;
4191
4192 for (i = 0; i < count; i++, req++) {
4193
4194 cnum = req->reg_num;
4195
4196 if (!PMC_IS_IMPL(cnum)) goto abort_mission;
4197
4198 req->reg_value = PMC_DFL_VAL(cnum);
4199
4200 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
4201
4202 DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
4203 }
4204 return 0;
4205
4206abort_mission:
4207 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
4208 return ret;
4209}
4210
4211static int
4212pfm_check_task_exist(pfm_context_t *ctx)
4213{
4214 struct task_struct *g, *t;
4215 int ret = -ESRCH;
4216
4217 read_lock(&tasklist_lock);
4218
4219 do_each_thread (g, t) {
4220 if (t->thread.pfm_context == ctx) {
4221 ret = 0;
4222 break;
4223 }
4224 } while_each_thread (g, t);
4225
4226 read_unlock(&tasklist_lock);
4227
4228 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
4229
4230 return ret;
4231}
4232
4233static int
4234pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4235{
4236 struct task_struct *task;
4237 struct thread_struct *thread;
4238 struct pfm_context_t *old;
4239 unsigned long flags;
4240#ifndef CONFIG_SMP
4241 struct task_struct *owner_task = NULL;
4242#endif
4243 pfarg_load_t *req = (pfarg_load_t *)arg;
4244 unsigned long *pmcs_source, *pmds_source;
4245 int the_cpu;
4246 int ret = 0;
4247 int state, is_system, set_dbregs = 0;
4248
4249 state = ctx->ctx_state;
4250 is_system = ctx->ctx_fl_system;
4251 /*
4252 * can only load from unloaded or terminated state
4253 */
4254 if (state != PFM_CTX_UNLOADED) {
4255 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4256 req->load_pid,
4257 ctx->ctx_state));
a5a70b75 4258 return -EBUSY;
1da177e4
LT
4259 }
4260
4261 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
4262
4263 if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
4264 DPRINT(("cannot use blocking mode on self\n"));
4265 return -EINVAL;
4266 }
4267
4268 ret = pfm_get_task(ctx, req->load_pid, &task);
4269 if (ret) {
4270 DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
4271 return ret;
4272 }
4273
4274 ret = -EINVAL;
4275
4276 /*
4277 * system wide is self monitoring only
4278 */
4279 if (is_system && task != current) {
4280 DPRINT(("system wide is self monitoring only load_pid=%d\n",
4281 req->load_pid));
4282 goto error;
4283 }
4284
4285 thread = &task->thread;
4286
4287 ret = 0;
4288 /*
4289 * cannot load a context which is using range restrictions,
4290 * into a task that is being debugged.
4291 */
4292 if (ctx->ctx_fl_using_dbreg) {
4293 if (thread->flags & IA64_THREAD_DBG_VALID) {
4294 ret = -EBUSY;
4295 DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
4296 goto error;
4297 }
4298 LOCK_PFS(flags);
4299
4300 if (is_system) {
4301 if (pfm_sessions.pfs_ptrace_use_dbregs) {
19c5870c
AD
4302 DPRINT(("cannot load [%d] dbregs in use\n",
4303 task_pid_nr(task)));
1da177e4
LT
4304 ret = -EBUSY;
4305 } else {
4306 pfm_sessions.pfs_sys_use_dbregs++;
19c5870c 4307 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
1da177e4
LT
4308 set_dbregs = 1;
4309 }
4310 }
4311
4312 UNLOCK_PFS(flags);
4313
4314 if (ret) goto error;
4315 }
4316
4317 /*
4318 * SMP system-wide monitoring implies self-monitoring.
4319 *
4320 * The programming model expects the task to
4321 * be pinned on a CPU throughout the session.
4322 * Here we take note of the current CPU at the
4323 * time the context is loaded. No call from
4324 * another CPU will be allowed.
4325 *
4326 * The pinning via shed_setaffinity()
4327 * must be done by the calling task prior
4328 * to this call.
4329 *
4330 * systemwide: keep track of CPU this session is supposed to run on
4331 */
4332 the_cpu = ctx->ctx_cpu = smp_processor_id();
4333
4334 ret = -EBUSY;
4335 /*
4336 * now reserve the session
4337 */
4338 ret = pfm_reserve_session(current, is_system, the_cpu);
4339 if (ret) goto error;
4340
4341 /*
4342 * task is necessarily stopped at this point.
4343 *
4344 * If the previous context was zombie, then it got removed in
4345 * pfm_save_regs(). Therefore we should not see it here.
4346 * If we see a context, then this is an active context
4347 *
4348 * XXX: needs to be atomic
4349 */
4350 DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
4351 thread->pfm_context, ctx));
4352
6bf11e8c 4353 ret = -EBUSY;
1da177e4
LT
4354 old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
4355 if (old != NULL) {
4356 DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
4357 goto error_unres;
4358 }
4359
4360 pfm_reset_msgq(ctx);
4361
4362 ctx->ctx_state = PFM_CTX_LOADED;
4363
4364 /*
4365 * link context to task
4366 */
4367 ctx->ctx_task = task;
4368
4369 if (is_system) {
4370 /*
4371 * we load as stopped
4372 */
4373 PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
4374 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4375
4376 if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
4377 } else {
4378 thread->flags |= IA64_THREAD_PM_VALID;
4379 }
4380
4381 /*
4382 * propagate into thread-state
4383 */
4384 pfm_copy_pmds(task, ctx);
4385 pfm_copy_pmcs(task, ctx);
4386
35589a8f
KA
4387 pmcs_source = ctx->th_pmcs;
4388 pmds_source = ctx->th_pmds;
1da177e4
LT
4389
4390 /*
4391 * always the case for system-wide
4392 */
4393 if (task == current) {
4394
4395 if (is_system == 0) {
4396
4397 /* allow user level control */
4398 ia64_psr(regs)->sp = 0;
19c5870c 4399 DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
1da177e4
LT
4400
4401 SET_LAST_CPU(ctx, smp_processor_id());
4402 INC_ACTIVATION();
4403 SET_ACTIVATION(ctx);
4404#ifndef CONFIG_SMP
4405 /*
4406 * push the other task out, if any
4407 */
4408 owner_task = GET_PMU_OWNER();
4409 if (owner_task) pfm_lazy_save_regs(owner_task);
4410#endif
4411 }
4412 /*
4413 * load all PMD from ctx to PMU (as opposed to thread state)
4414 * restore all PMC from ctx to PMU
4415 */
4416 pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
4417 pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
4418
4419 ctx->ctx_reload_pmcs[0] = 0UL;
4420 ctx->ctx_reload_pmds[0] = 0UL;
4421
4422 /*
4423 * guaranteed safe by earlier check against DBG_VALID
4424 */
4425 if (ctx->ctx_fl_using_dbreg) {
4426 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
4427 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
4428 }
4429 /*
4430 * set new ownership
4431 */
4432 SET_PMU_OWNER(task, ctx);
4433
19c5870c 4434 DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
1da177e4
LT
4435 } else {
4436 /*
4437 * when not current, task MUST be stopped, so this is safe
4438 */
6450578f 4439 regs = task_pt_regs(task);
1da177e4
LT
4440
4441 /* force a full reload */
4442 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4443 SET_LAST_CPU(ctx, -1);
4444
4445 /* initial saved psr (stopped) */
4446 ctx->ctx_saved_psr_up = 0UL;
4447 ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
4448 }
4449
4450 ret = 0;
4451
4452error_unres:
4453 if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
4454error:
4455 /*
4456 * we must undo the dbregs setting (for system-wide)
4457 */
4458 if (ret && set_dbregs) {
4459 LOCK_PFS(flags);
4460 pfm_sessions.pfs_sys_use_dbregs--;
4461 UNLOCK_PFS(flags);
4462 }
4463 /*
4464 * release task, there is now a link with the context
4465 */
4466 if (is_system == 0 && task != current) {
4467 pfm_put_task(task);
4468
4469 if (ret == 0) {
4470 ret = pfm_check_task_exist(ctx);
4471 if (ret) {
4472 ctx->ctx_state = PFM_CTX_UNLOADED;
4473 ctx->ctx_task = NULL;
4474 }
4475 }
4476 }
4477 return ret;
4478}
4479
4480/*
4481 * in this function, we do not need to increase the use count
4482 * for the task via get_task_struct(), because we hold the
4483 * context lock. If the task were to disappear while having
4484 * a context attached, it would go through pfm_exit_thread()
4485 * which also grabs the context lock and would therefore be blocked
4486 * until we are here.
4487 */
4488static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
4489
4490static int
4491pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4492{
4493 struct task_struct *task = PFM_CTX_TASK(ctx);
4494 struct pt_regs *tregs;
4495 int prev_state, is_system;
4496 int ret;
4497
19c5870c 4498 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
1da177e4
LT
4499
4500 prev_state = ctx->ctx_state;
4501 is_system = ctx->ctx_fl_system;
4502
4503 /*
4504 * unload only when necessary
4505 */
4506 if (prev_state == PFM_CTX_UNLOADED) {
4507 DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
4508 return 0;
4509 }
4510
4511 /*
4512 * clear psr and dcr bits
4513 */
4514 ret = pfm_stop(ctx, NULL, 0, regs);
4515 if (ret) return ret;
4516
4517 ctx->ctx_state = PFM_CTX_UNLOADED;
4518
4519 /*
4520 * in system mode, we need to update the PMU directly
4521 * and the user level state of the caller, which may not
4522 * necessarily be the creator of the context.
4523 */
4524 if (is_system) {
4525
4526 /*
4527 * Update cpuinfo
4528 *
4529 * local PMU is taken care of in pfm_stop()
4530 */
4531 PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
4532 PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
4533
4534 /*
4535 * save PMDs in context
4536 * release ownership
4537 */
4538 pfm_flush_pmds(current, ctx);
4539
4540 /*
4541 * at this point we are done with the PMU
4542 * so we can unreserve the resource.
4543 */
4544 if (prev_state != PFM_CTX_ZOMBIE)
4545 pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
4546
4547 /*
4548 * disconnect context from task
4549 */
4550 task->thread.pfm_context = NULL;
4551 /*
4552 * disconnect task from context
4553 */
4554 ctx->ctx_task = NULL;
4555
4556 /*
4557 * There is nothing more to cleanup here.
4558 */
4559 return 0;
4560 }
4561
4562 /*
4563 * per-task mode
4564 */
6450578f 4565 tregs = task == current ? regs : task_pt_regs(task);
1da177e4
LT
4566
4567 if (task == current) {
4568 /*
4569 * cancel user level control
4570 */
4571 ia64_psr(regs)->sp = 1;
4572
19c5870c 4573 DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
1da177e4
LT
4574 }
4575 /*
4576 * save PMDs to context
4577 * release ownership
4578 */
4579 pfm_flush_pmds(task, ctx);
4580
4581 /*
4582 * at this point we are done with the PMU
4583 * so we can unreserve the resource.
4584 *
4585 * when state was ZOMBIE, we have already unreserved.
4586 */
4587 if (prev_state != PFM_CTX_ZOMBIE)
4588 pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
4589
4590 /*
4591 * reset activation counter and psr
4592 */
4593 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4594 SET_LAST_CPU(ctx, -1);
4595
4596 /*
4597 * PMU state will not be restored
4598 */
4599 task->thread.flags &= ~IA64_THREAD_PM_VALID;
4600
4601 /*
4602 * break links between context and task
4603 */
4604 task->thread.pfm_context = NULL;
4605 ctx->ctx_task = NULL;
4606
4607 PFM_SET_WORK_PENDING(task, 0);
4608
4609 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4610 ctx->ctx_fl_can_restart = 0;
4611 ctx->ctx_fl_going_zombie = 0;
4612
19c5870c 4613 DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
1da177e4
LT
4614
4615 return 0;
4616}
4617
4618
4619/*
4620 * called only from exit_thread(): task == current
4621 * we come here only if current has a context attached (loaded or masked)
4622 */
4623void
4624pfm_exit_thread(struct task_struct *task)
4625{
4626 pfm_context_t *ctx;
4627 unsigned long flags;
6450578f 4628 struct pt_regs *regs = task_pt_regs(task);
1da177e4
LT
4629 int ret, state;
4630 int free_ok = 0;
4631
4632 ctx = PFM_GET_CTX(task);
4633
4634 PROTECT_CTX(ctx, flags);
4635
19c5870c 4636 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
1da177e4
LT
4637
4638 state = ctx->ctx_state;
4639 switch(state) {
4640 case PFM_CTX_UNLOADED:
4641 /*
72fdbdce 4642 * only comes to this function if pfm_context is not NULL, i.e., cannot
1da177e4
LT
4643 * be in unloaded state
4644 */
19c5870c 4645 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
1da177e4
LT
4646 break;
4647 case PFM_CTX_LOADED:
4648 case PFM_CTX_MASKED:
4649 ret = pfm_context_unload(ctx, NULL, 0, regs);
4650 if (ret) {
19c5870c 4651 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
1da177e4
LT
4652 }
4653 DPRINT(("ctx unloaded for current state was %d\n", state));
4654
4655 pfm_end_notify_user(ctx);
4656 break;
4657 case PFM_CTX_ZOMBIE:
4658 ret = pfm_context_unload(ctx, NULL, 0, regs);
4659 if (ret) {
19c5870c 4660 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
1da177e4
LT
4661 }
4662 free_ok = 1;
4663 break;
4664 default:
19c5870c 4665 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
1da177e4
LT
4666 break;
4667 }
4668 UNPROTECT_CTX(ctx, flags);
4669
4670 { u64 psr = pfm_get_psr();
4671 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
4672 BUG_ON(GET_PMU_OWNER());
4673 BUG_ON(ia64_psr(regs)->up);
4674 BUG_ON(ia64_psr(regs)->pp);
4675 }
4676
4677 /*
4678 * All memory free operations (especially for vmalloc'ed memory)
4679 * MUST be done with interrupts ENABLED.
4680 */
4681 if (free_ok) pfm_context_free(ctx);
4682}
4683
4684/*
4685 * functions MUST be listed in the increasing order of their index (see permfon.h)
4686 */
4687#define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
4688#define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
4689#define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
4690#define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
4691#define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
4692
4693static pfm_cmd_desc_t pfm_cmd_tab[]={
4694/* 0 */PFM_CMD_NONE,
4695/* 1 */PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4696/* 2 */PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4697/* 3 */PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4698/* 4 */PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
4699/* 5 */PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
4700/* 6 */PFM_CMD_NONE,
4701/* 7 */PFM_CMD_NONE,
4702/* 8 */PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
4703/* 9 */PFM_CMD_NONE,
4704/* 10 */PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
4705/* 11 */PFM_CMD_NONE,
4706/* 12 */PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
4707/* 13 */PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
4708/* 14 */PFM_CMD_NONE,
4709/* 15 */PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4710/* 16 */PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
4711/* 17 */PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
4712/* 18 */PFM_CMD_NONE,
4713/* 19 */PFM_CMD_NONE,
4714/* 20 */PFM_CMD_NONE,
4715/* 21 */PFM_CMD_NONE,
4716/* 22 */PFM_CMD_NONE,
4717/* 23 */PFM_CMD_NONE,
4718/* 24 */PFM_CMD_NONE,
4719/* 25 */PFM_CMD_NONE,
4720/* 26 */PFM_CMD_NONE,
4721/* 27 */PFM_CMD_NONE,
4722/* 28 */PFM_CMD_NONE,
4723/* 29 */PFM_CMD_NONE,
4724/* 30 */PFM_CMD_NONE,
4725/* 31 */PFM_CMD_NONE,
4726/* 32 */PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
4727/* 33 */PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
4728};
4729#define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
4730
4731static int
4732pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
4733{
4734 struct task_struct *task;
4735 int state, old_state;
4736
4737recheck:
4738 state = ctx->ctx_state;
4739 task = ctx->ctx_task;
4740
4741 if (task == NULL) {
4742 DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
4743 return 0;
4744 }
4745
4746 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4747 ctx->ctx_fd,
4748 state,
19c5870c 4749 task_pid_nr(task),
1da177e4
LT
4750 task->state, PFM_CMD_STOPPED(cmd)));
4751
4752 /*
4753 * self-monitoring always ok.
4754 *
4755 * for system-wide the caller can either be the creator of the
4756 * context (to one to which the context is attached to) OR
4757 * a task running on the same CPU as the session.
4758 */
4759 if (task == current || ctx->ctx_fl_system) return 0;
4760
4761 /*
a5a70b75 4762 * we are monitoring another thread
1da177e4 4763 */
a5a70b75 4764 switch(state) {
4765 case PFM_CTX_UNLOADED:
4766 /*
4767 * if context is UNLOADED we are safe to go
4768 */
4769 return 0;
4770 case PFM_CTX_ZOMBIE:
4771 /*
4772 * no command can operate on a zombie context
4773 */
4774 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
4775 return -EINVAL;
4776 case PFM_CTX_MASKED:
4777 /*
4778 * PMU state has been saved to software even though
4779 * the thread may still be running.
4780 */
4781 if (cmd != PFM_UNLOAD_CONTEXT) return 0;
1da177e4
LT
4782 }
4783
4784 /*
4785 * context is LOADED or MASKED. Some commands may need to have
4786 * the task stopped.
4787 *
4788 * We could lift this restriction for UP but it would mean that
4789 * the user has no guarantee the task would not run between
4790 * two successive calls to perfmonctl(). That's probably OK.
4791 * If this user wants to ensure the task does not run, then
4792 * the task must be stopped.
4793 */
4794 if (PFM_CMD_STOPPED(cmd)) {
21498223 4795 if (!task_is_stopped_or_traced(task)) {
19c5870c 4796 DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
1da177e4
LT
4797 return -EBUSY;
4798 }
4799 /*
4800 * task is now stopped, wait for ctxsw out
4801 *
4802 * This is an interesting point in the code.
4803 * We need to unprotect the context because
4804 * the pfm_save_regs() routines needs to grab
4805 * the same lock. There are danger in doing
4806 * this because it leaves a window open for
4807 * another task to get access to the context
4808 * and possibly change its state. The one thing
4809 * that is not possible is for the context to disappear
4810 * because we are protected by the VFS layer, i.e.,
4811 * get_fd()/put_fd().
4812 */
4813 old_state = state;
4814
4815 UNPROTECT_CTX(ctx, flags);
4816
4817 wait_task_inactive(task);
4818
4819 PROTECT_CTX(ctx, flags);
4820
4821 /*
4822 * we must recheck to verify if state has changed
4823 */
4824 if (ctx->ctx_state != old_state) {
4825 DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
4826 goto recheck;
4827 }
4828 }
4829 return 0;
4830}
4831
4832/*
4833 * system-call entry point (must return long)
4834 */
4835asmlinkage long
4836sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
4837{
4838 struct file *file = NULL;
4839 pfm_context_t *ctx = NULL;
4840 unsigned long flags = 0UL;
4841 void *args_k = NULL;
4842 long ret; /* will expand int return types */
4843 size_t base_sz, sz, xtra_sz = 0;
4844 int narg, completed_args = 0, call_made = 0, cmd_flags;
4845 int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
4846 int (*getsize)(void *arg, size_t *sz);
4847#define PFM_MAX_ARGSIZE 4096
4848
4849 /*
4850 * reject any call if perfmon was disabled at initialization
4851 */
4852 if (unlikely(pmu_conf == NULL)) return -ENOSYS;
4853
4854 if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
4855 DPRINT(("invalid cmd=%d\n", cmd));
4856 return -EINVAL;
4857 }
4858
4859 func = pfm_cmd_tab[cmd].cmd_func;
4860 narg = pfm_cmd_tab[cmd].cmd_narg;
4861 base_sz = pfm_cmd_tab[cmd].cmd_argsize;
4862 getsize = pfm_cmd_tab[cmd].cmd_getsize;
4863 cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
4864
4865 if (unlikely(func == NULL)) {
4866 DPRINT(("invalid cmd=%d\n", cmd));
4867 return -EINVAL;
4868 }
4869
4870 DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
4871 PFM_CMD_NAME(cmd),
4872 cmd,
4873 narg,
4874 base_sz,
4875 count));
4876
4877 /*
4878 * check if number of arguments matches what the command expects
4879 */
4880 if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
4881 return -EINVAL;
4882
4883restart_args:
4884 sz = xtra_sz + base_sz*count;
4885 /*
4886 * limit abuse to min page size
4887 */
4888 if (unlikely(sz > PFM_MAX_ARGSIZE)) {
19c5870c 4889 printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
1da177e4
LT
4890 return -E2BIG;
4891 }
4892
4893 /*
4894 * allocate default-sized argument buffer
4895 */
4896 if (likely(count && args_k == NULL)) {
4897 args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
4898 if (args_k == NULL) return -ENOMEM;
4899 }
4900
4901 ret = -EFAULT;
4902
4903 /*
4904 * copy arguments
4905 *
4906 * assume sz = 0 for command without parameters
4907 */
4908 if (sz && copy_from_user(args_k, arg, sz)) {
4909 DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
4910 goto error_args;
4911 }
4912
4913 /*
4914 * check if command supports extra parameters
4915 */
4916 if (completed_args == 0 && getsize) {
4917 /*
4918 * get extra parameters size (based on main argument)
4919 */
4920 ret = (*getsize)(args_k, &xtra_sz);
4921 if (ret) goto error_args;
4922
4923 completed_args = 1;
4924
4925 DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
4926
4927 /* retry if necessary */
4928 if (likely(xtra_sz)) goto restart_args;
4929 }
4930
4931 if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
4932
4933 ret = -EBADF;
4934
4935 file = fget(fd);
4936 if (unlikely(file == NULL)) {
4937 DPRINT(("invalid fd %d\n", fd));
4938 goto error_args;
4939 }
4940 if (unlikely(PFM_IS_FILE(file) == 0)) {
4941 DPRINT(("fd %d not related to perfmon\n", fd));
4942 goto error_args;
4943 }
4944
4945 ctx = (pfm_context_t *)file->private_data;
4946 if (unlikely(ctx == NULL)) {
4947 DPRINT(("no context for fd %d\n", fd));
4948 goto error_args;
4949 }
4950 prefetch(&ctx->ctx_state);
4951
4952 PROTECT_CTX(ctx, flags);
4953
4954 /*
4955 * check task is stopped
4956 */
4957 ret = pfm_check_task_state(ctx, cmd, flags);
4958 if (unlikely(ret)) goto abort_locked;
4959
4960skip_fd:
6450578f 4961 ret = (*func)(ctx, args_k, count, task_pt_regs(current));
1da177e4
LT
4962
4963 call_made = 1;
4964
4965abort_locked:
4966 if (likely(ctx)) {
4967 DPRINT(("context unlocked\n"));
4968 UNPROTECT_CTX(ctx, flags);
1da177e4
LT
4969 }
4970
4971 /* copy argument back to user, if needed */
4972 if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
4973
4974error_args:
b8444d00
SE
4975 if (file)
4976 fput(file);
4977
b2325fe1 4978 kfree(args_k);
1da177e4
LT
4979
4980 DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
4981
4982 return ret;
4983}
4984
4985static void
4986pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
4987{
4988 pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
4989 pfm_ovfl_ctrl_t rst_ctrl;
4990 int state;
4991 int ret = 0;
4992
4993 state = ctx->ctx_state;
4994 /*
4995 * Unlock sampling buffer and reset index atomically
4996 * XXX: not really needed when blocking
4997 */
4998 if (CTX_HAS_SMPL(ctx)) {
4999
5000 rst_ctrl.bits.mask_monitoring = 0;
5001 rst_ctrl.bits.reset_ovfl_pmds = 0;
5002
5003 if (state == PFM_CTX_LOADED)
5004 ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
5005 else
5006 ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
5007 } else {
5008 rst_ctrl.bits.mask_monitoring = 0;
5009 rst_ctrl.bits.reset_ovfl_pmds = 1;
5010 }
5011
5012 if (ret == 0) {
5013 if (rst_ctrl.bits.reset_ovfl_pmds) {
5014 pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
5015 }
5016 if (rst_ctrl.bits.mask_monitoring == 0) {
5017 DPRINT(("resuming monitoring\n"));
5018 if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
5019 } else {
5020 DPRINT(("stopping monitoring\n"));
5021 //pfm_stop_monitoring(current, regs);
5022 }
5023 ctx->ctx_state = PFM_CTX_LOADED;
5024 }
5025}
5026
5027/*
5028 * context MUST BE LOCKED when calling
5029 * can only be called for current
5030 */
5031static void
5032pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
5033{
5034 int ret;
5035
19c5870c 5036 DPRINT(("entering for [%d]\n", task_pid_nr(current)));
1da177e4
LT
5037
5038 ret = pfm_context_unload(ctx, NULL, 0, regs);
5039 if (ret) {
19c5870c 5040 printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
1da177e4
LT
5041 }
5042
5043 /*
5044 * and wakeup controlling task, indicating we are now disconnected
5045 */
5046 wake_up_interruptible(&ctx->ctx_zombieq);
5047
5048 /*
5049 * given that context is still locked, the controlling
5050 * task will only get access when we return from
5051 * pfm_handle_work().
5052 */
5053}
5054
5055static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
4944930a
SE
5056 /*
5057 * pfm_handle_work() can be called with interrupts enabled
5058 * (TIF_NEED_RESCHED) or disabled. The down_interruptible
5059 * call may sleep, therefore we must re-enable interrupts
5060 * to avoid deadlocks. It is safe to do so because this function
5061 * is called ONLY when returning to user level (PUStk=1), in which case
5062 * there is no risk of kernel stack overflow due to deep
5063 * interrupt nesting.
5064 */
1da177e4
LT
5065void
5066pfm_handle_work(void)
5067{
5068 pfm_context_t *ctx;
5069 struct pt_regs *regs;
4944930a 5070 unsigned long flags, dummy_flags;
1da177e4
LT
5071 unsigned long ovfl_regs;
5072 unsigned int reason;
5073 int ret;
5074
5075 ctx = PFM_GET_CTX(current);
5076 if (ctx == NULL) {
19c5870c 5077 printk(KERN_ERR "perfmon: [%d] has no PFM context\n", task_pid_nr(current));
1da177e4
LT
5078 return;
5079 }
5080
5081 PROTECT_CTX(ctx, flags);
5082
5083 PFM_SET_WORK_PENDING(current, 0);
5084
5085 pfm_clear_task_notify();
5086
6450578f 5087 regs = task_pt_regs(current);
1da177e4
LT
5088
5089 /*
5090 * extract reason for being here and clear
5091 */
5092 reason = ctx->ctx_fl_trap_reason;
5093 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
5094 ovfl_regs = ctx->ctx_ovfl_regs[0];
5095
5096 DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
5097
5098 /*
5099 * must be done before we check for simple-reset mode
5100 */
5101 if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) goto do_zombie;
5102
5103
5104 //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
5105 if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
5106
4944930a
SE
5107 /*
5108 * restore interrupt mask to what it was on entry.
5109 * Could be enabled/diasbled.
5110 */
1da177e4
LT
5111 UNPROTECT_CTX(ctx, flags);
5112
4944930a
SE
5113 /*
5114 * force interrupt enable because of down_interruptible()
5115 */
1da177e4
LT
5116 local_irq_enable();
5117
5118 DPRINT(("before block sleeping\n"));
5119
5120 /*
5121 * may go through without blocking on SMP systems
5122 * if restart has been received already by the time we call down()
5123 */
60f1c444 5124 ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
1da177e4
LT
5125
5126 DPRINT(("after block sleeping ret=%d\n", ret));
5127
5128 /*
4944930a
SE
5129 * lock context and mask interrupts again
5130 * We save flags into a dummy because we may have
5131 * altered interrupts mask compared to entry in this
5132 * function.
1da177e4 5133 */
4944930a 5134 PROTECT_CTX(ctx, dummy_flags);
1da177e4
LT
5135
5136 /*
5137 * we need to read the ovfl_regs only after wake-up
5138 * because we may have had pfm_write_pmds() in between
5139 * and that can changed PMD values and therefore
5140 * ovfl_regs is reset for these new PMD values.
5141 */
5142 ovfl_regs = ctx->ctx_ovfl_regs[0];
5143
5144 if (ctx->ctx_fl_going_zombie) {
5145do_zombie:
5146 DPRINT(("context is zombie, bailing out\n"));
5147 pfm_context_force_terminate(ctx, regs);
5148 goto nothing_to_do;
5149 }
5150 /*
5151 * in case of interruption of down() we don't restart anything
5152 */
5153 if (ret < 0) goto nothing_to_do;
5154
5155skip_blocking:
5156 pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
5157 ctx->ctx_ovfl_regs[0] = 0UL;
5158
5159nothing_to_do:
4944930a
SE
5160 /*
5161 * restore flags as they were upon entry
5162 */
1da177e4
LT
5163 UNPROTECT_CTX(ctx, flags);
5164}
5165
5166static int
5167pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
5168{
5169 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5170 DPRINT(("ignoring overflow notification, owner is zombie\n"));
5171 return 0;
5172 }
5173
5174 DPRINT(("waking up somebody\n"));
5175
5176 if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
5177
5178 /*
5179 * safe, we are not in intr handler, nor in ctxsw when
5180 * we come here
5181 */
5182 kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
5183
5184 return 0;
5185}
5186
5187static int
5188pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
5189{
5190 pfm_msg_t *msg = NULL;
5191
5192 if (ctx->ctx_fl_no_msg == 0) {
5193 msg = pfm_get_new_msg(ctx);
5194 if (msg == NULL) {
5195 printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
5196 return -1;
5197 }
5198
5199 msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
5200 msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
5201 msg->pfm_ovfl_msg.msg_active_set = 0;
5202 msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
5203 msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
5204 msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
5205 msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
5206 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5207 }
5208
5209 DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
5210 msg,
5211 ctx->ctx_fl_no_msg,
5212 ctx->ctx_fd,
5213 ovfl_pmds));
5214
5215 return pfm_notify_user(ctx, msg);
5216}
5217
5218static int
5219pfm_end_notify_user(pfm_context_t *ctx)
5220{
5221 pfm_msg_t *msg;
5222
5223 msg = pfm_get_new_msg(ctx);
5224 if (msg == NULL) {
5225 printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
5226 return -1;
5227 }
5228 /* no leak */
5229 memset(msg, 0, sizeof(*msg));
5230
5231 msg->pfm_end_msg.msg_type = PFM_MSG_END;
5232 msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
5233 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5234
5235 DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
5236 msg,
5237 ctx->ctx_fl_no_msg,
5238 ctx->ctx_fd));
5239
5240 return pfm_notify_user(ctx, msg);
5241}
5242
5243/*
5244 * main overflow processing routine.
72fdbdce 5245 * it can be called from the interrupt path or explicitly during the context switch code
1da177e4
LT
5246 */
5247static void
5248pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
5249{
5250 pfm_ovfl_arg_t *ovfl_arg;
5251 unsigned long mask;
5252 unsigned long old_val, ovfl_val, new_val;
5253 unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
5254 unsigned long tstamp;
5255 pfm_ovfl_ctrl_t ovfl_ctrl;
5256 unsigned int i, has_smpl;
5257 int must_notify = 0;
5258
5259 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
5260
5261 /*
5262 * sanity test. Should never happen
5263 */
5264 if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
5265
5266 tstamp = ia64_get_itc();
5267 mask = pmc0 >> PMU_FIRST_COUNTER;
5268 ovfl_val = pmu_conf->ovfl_val;
5269 has_smpl = CTX_HAS_SMPL(ctx);
5270
5271 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5272 "used_pmds=0x%lx\n",
5273 pmc0,
19c5870c 5274 task ? task_pid_nr(task): -1,
1da177e4
LT
5275 (regs ? regs->cr_iip : 0),
5276 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
5277 ctx->ctx_used_pmds[0]));
5278
5279
5280 /*
5281 * first we update the virtual counters
5282 * assume there was a prior ia64_srlz_d() issued
5283 */
5284 for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
5285
5286 /* skip pmd which did not overflow */
5287 if ((mask & 0x1) == 0) continue;
5288
5289 /*
5290 * Note that the pmd is not necessarily 0 at this point as qualified events
5291 * may have happened before the PMU was frozen. The residual count is not
5292 * taken into consideration here but will be with any read of the pmd via
5293 * pfm_read_pmds().
5294 */
5295 old_val = new_val = ctx->ctx_pmds[i].val;
5296 new_val += 1 + ovfl_val;
5297 ctx->ctx_pmds[i].val = new_val;
5298
5299 /*
5300 * check for overflow condition
5301 */
5302 if (likely(old_val > new_val)) {
5303 ovfl_pmds |= 1UL << i;
5304 if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
5305 }
5306
5307 DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
5308 i,
5309 new_val,
5310 old_val,
5311 ia64_get_pmd(i) & ovfl_val,
5312 ovfl_pmds,
5313 ovfl_notify));
5314 }
5315
5316 /*
5317 * there was no 64-bit overflow, nothing else to do
5318 */
5319 if (ovfl_pmds == 0UL) return;
5320
5321 /*
5322 * reset all control bits
5323 */
5324 ovfl_ctrl.val = 0;
5325 reset_pmds = 0UL;
5326
5327 /*
5328 * if a sampling format module exists, then we "cache" the overflow by
5329 * calling the module's handler() routine.
5330 */
5331 if (has_smpl) {
5332 unsigned long start_cycles, end_cycles;
5333 unsigned long pmd_mask;
5334 int j, k, ret = 0;
5335 int this_cpu = smp_processor_id();
5336
5337 pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
5338 ovfl_arg = &ctx->ctx_ovfl_arg;
5339
5340 prefetch(ctx->ctx_smpl_hdr);
5341
5342 for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
5343
5344 mask = 1UL << i;
5345
5346 if ((pmd_mask & 0x1) == 0) continue;
5347
5348 ovfl_arg->ovfl_pmd = (unsigned char )i;
5349 ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
5350 ovfl_arg->active_set = 0;
5351 ovfl_arg->ovfl_ctrl.val = 0; /* module must fill in all fields */
5352 ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
5353
5354 ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
5355 ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
5356 ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
5357
5358 /*
5359 * copy values of pmds of interest. Sampling format may copy them
5360 * into sampling buffer.
5361 */
5362 if (smpl_pmds) {
5363 for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
5364 if ((smpl_pmds & 0x1) == 0) continue;
5365 ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
5366 DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
5367 }
5368 }
5369
5370 pfm_stats[this_cpu].pfm_smpl_handler_calls++;
5371
5372 start_cycles = ia64_get_itc();
5373
5374 /*
5375 * call custom buffer format record (handler) routine
5376 */
5377 ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
5378
5379 end_cycles = ia64_get_itc();
5380
5381 /*
5382 * For those controls, we take the union because they have
5383 * an all or nothing behavior.
5384 */
5385 ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
5386 ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
5387 ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
5388 /*
5389 * build the bitmask of pmds to reset now
5390 */
5391 if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
5392
5393 pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
5394 }
5395 /*
5396 * when the module cannot handle the rest of the overflows, we abort right here
5397 */
5398 if (ret && pmd_mask) {
5399 DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
5400 pmd_mask<<PMU_FIRST_COUNTER));
5401 }
5402 /*
5403 * remove the pmds we reset now from the set of pmds to reset in pfm_restart()
5404 */
5405 ovfl_pmds &= ~reset_pmds;
5406 } else {
5407 /*
5408 * when no sampling module is used, then the default
5409 * is to notify on overflow if requested by user
5410 */
5411 ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
5412 ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
5413 ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0; /* XXX: change for saturation */
5414 ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
5415 /*
5416 * if needed, we reset all overflowed pmds
5417 */
5418 if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
5419 }
5420
5421 DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
5422
5423 /*
5424 * reset the requested PMD registers using the short reset values
5425 */
5426 if (reset_pmds) {
5427 unsigned long bm = reset_pmds;
5428 pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
5429 }
5430
5431 if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
5432 /*
5433 * keep track of what to reset when unblocking
5434 */
5435 ctx->ctx_ovfl_regs[0] = ovfl_pmds;
5436
5437 /*
5438 * check for blocking context
5439 */
5440 if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
5441
5442 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
5443
5444 /*
5445 * set the perfmon specific checking pending work for the task
5446 */
5447 PFM_SET_WORK_PENDING(task, 1);
5448
5449 /*
5450 * when coming from ctxsw, current still points to the
5451 * previous task, therefore we must work with task and not current.
5452 */
5453 pfm_set_task_notify(task);
5454 }
5455 /*
5456 * defer until state is changed (shorten spin window). the context is locked
5457 * anyway, so the signal receiver would come spin for nothing.
5458 */
5459 must_notify = 1;
5460 }
5461
5462 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
19c5870c 5463 GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
1da177e4
LT
5464 PFM_GET_WORK_PENDING(task),
5465 ctx->ctx_fl_trap_reason,
5466 ovfl_pmds,
5467 ovfl_notify,
5468 ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
5469 /*
5470 * in case monitoring must be stopped, we toggle the psr bits
5471 */
5472 if (ovfl_ctrl.bits.mask_monitoring) {
5473 pfm_mask_monitoring(task);
5474 ctx->ctx_state = PFM_CTX_MASKED;
5475 ctx->ctx_fl_can_restart = 1;
5476 }
5477
5478 /*
5479 * send notification now
5480 */
5481 if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
5482
5483 return;
5484
5485sanity_check:
5486 printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5487 smp_processor_id(),
19c5870c 5488 task ? task_pid_nr(task) : -1,
1da177e4
LT
5489 pmc0);
5490 return;
5491
5492stop_monitoring:
5493 /*
5494 * in SMP, zombie context is never restored but reclaimed in pfm_load_regs().
5495 * Moreover, zombies are also reclaimed in pfm_save_regs(). Therefore we can
5496 * come here as zombie only if the task is the current task. In which case, we
5497 * can access the PMU hardware directly.
5498 *
5499 * Note that zombies do have PM_VALID set. So here we do the minimal.
5500 *
5501 * In case the context was zombified it could not be reclaimed at the time
5502 * the monitoring program exited. At this point, the PMU reservation has been
5503 * returned, the sampiing buffer has been freed. We must convert this call
5504 * into a spurious interrupt. However, we must also avoid infinite overflows
5505 * by stopping monitoring for this task. We can only come here for a per-task
5506 * context. All we need to do is to stop monitoring using the psr bits which
5507 * are always task private. By re-enabling secure montioring, we ensure that
5508 * the monitored task will not be able to re-activate monitoring.
5509 * The task will eventually be context switched out, at which point the context
5510 * will be reclaimed (that includes releasing ownership of the PMU).
5511 *
5512 * So there might be a window of time where the number of per-task session is zero
5513 * yet one PMU might have a owner and get at most one overflow interrupt for a zombie
5514 * context. This is safe because if a per-task session comes in, it will push this one
5515 * out and by the virtue on pfm_save_regs(), this one will disappear. If a system wide
5516 * session is force on that CPU, given that we use task pinning, pfm_save_regs() will
5517 * also push our zombie context out.
5518 *
5519 * Overall pretty hairy stuff....
5520 */
19c5870c 5521 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
1da177e4
LT
5522 pfm_clear_psr_up();
5523 ia64_psr(regs)->up = 0;
5524 ia64_psr(regs)->sp = 1;
5525 return;
5526}
5527
5528static int
5529pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
5530{
5531 struct task_struct *task;
5532 pfm_context_t *ctx;
5533 unsigned long flags;
5534 u64 pmc0;
5535 int this_cpu = smp_processor_id();
5536 int retval = 0;
5537
5538 pfm_stats[this_cpu].pfm_ovfl_intr_count++;
5539
5540 /*
5541 * srlz.d done before arriving here
5542 */
5543 pmc0 = ia64_get_pmc(0);
5544
5545 task = GET_PMU_OWNER();
5546 ctx = GET_PMU_CTX();
5547
5548 /*
5549 * if we have some pending bits set
5550 * assumes : if any PMC0.bit[63-1] is set, then PMC0.fr = 1
5551 */
5552 if (PMC0_HAS_OVFL(pmc0) && task) {
5553 /*
5554 * we assume that pmc0.fr is always set here
5555 */
5556
5557 /* sanity check */
5558 if (!ctx) goto report_spurious1;
5559
5560 if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
5561 goto report_spurious2;
5562
5563 PROTECT_CTX_NOPRINT(ctx, flags);
5564
5565 pfm_overflow_handler(task, ctx, pmc0, regs);
5566
5567 UNPROTECT_CTX_NOPRINT(ctx, flags);
5568
5569 } else {
5570 pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
5571 retval = -1;
5572 }
5573 /*
5574 * keep it unfrozen at all times
5575 */
5576 pfm_unfreeze_pmu();
5577
5578 return retval;
5579
5580report_spurious1:
5581 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
19c5870c 5582 this_cpu, task_pid_nr(task));
1da177e4
LT
5583 pfm_unfreeze_pmu();
5584 return -1;
5585report_spurious2:
5586 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5587 this_cpu,
19c5870c 5588 task_pid_nr(task));
1da177e4
LT
5589 pfm_unfreeze_pmu();
5590 return -1;
5591}
5592
5593static irqreturn_t
3bbe486b 5594pfm_interrupt_handler(int irq, void *arg)
1da177e4
LT
5595{
5596 unsigned long start_cycles, total_cycles;
5597 unsigned long min, max;
5598 int this_cpu;
5599 int ret;
3bbe486b 5600 struct pt_regs *regs = get_irq_regs();
1da177e4
LT
5601
5602 this_cpu = get_cpu();
a1ecf7f6
TL
5603 if (likely(!pfm_alt_intr_handler)) {
5604 min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
5605 max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
1da177e4 5606
a1ecf7f6 5607 start_cycles = ia64_get_itc();
1da177e4 5608
a1ecf7f6 5609 ret = pfm_do_interrupt_handler(irq, arg, regs);
1da177e4 5610
a1ecf7f6 5611 total_cycles = ia64_get_itc();
1da177e4 5612
a1ecf7f6
TL
5613 /*
5614 * don't measure spurious interrupts
5615 */
5616 if (likely(ret == 0)) {
5617 total_cycles -= start_cycles;
1da177e4 5618
a1ecf7f6
TL
5619 if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
5620 if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
1da177e4 5621
a1ecf7f6
TL
5622 pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
5623 }
5624 }
5625 else {
5626 (*pfm_alt_intr_handler->handler)(irq, arg, regs);
1da177e4 5627 }
a1ecf7f6 5628
1da177e4
LT
5629 put_cpu_no_resched();
5630 return IRQ_HANDLED;
5631}
5632
5633/*
5634 * /proc/perfmon interface, for debug only
5635 */
5636
5637#define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1)
5638
5639static void *
5640pfm_proc_start(struct seq_file *m, loff_t *pos)
5641{
5642 if (*pos == 0) {
5643 return PFM_PROC_SHOW_HEADER;
5644 }
5645
5646 while (*pos <= NR_CPUS) {
5647 if (cpu_online(*pos - 1)) {
5648 return (void *)*pos;
5649 }
5650 ++*pos;
5651 }
5652 return NULL;
5653}
5654
5655static void *
5656pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
5657{
5658 ++*pos;
5659 return pfm_proc_start(m, pos);
5660}
5661
5662static void
5663pfm_proc_stop(struct seq_file *m, void *v)
5664{
5665}
5666
5667static void
5668pfm_proc_show_header(struct seq_file *m)
5669{
5670 struct list_head * pos;
5671 pfm_buffer_fmt_t * entry;
5672 unsigned long flags;
5673
5674 seq_printf(m,
5675 "perfmon version : %u.%u\n"
5676 "model : %s\n"
5677 "fastctxsw : %s\n"
5678 "expert mode : %s\n"
5679 "ovfl_mask : 0x%lx\n"
5680 "PMU flags : 0x%x\n",
5681 PFM_VERSION_MAJ, PFM_VERSION_MIN,
5682 pmu_conf->pmu_name,
5683 pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
5684 pfm_sysctl.expert_mode > 0 ? "Yes": "No",
5685 pmu_conf->ovfl_val,
5686 pmu_conf->flags);
5687
5688 LOCK_PFS(flags);
5689
5690 seq_printf(m,
5691 "proc_sessions : %u\n"
5692 "sys_sessions : %u\n"
5693 "sys_use_dbregs : %u\n"
5694 "ptrace_use_dbregs : %u\n",
5695 pfm_sessions.pfs_task_sessions,
5696 pfm_sessions.pfs_sys_sessions,
5697 pfm_sessions.pfs_sys_use_dbregs,
5698 pfm_sessions.pfs_ptrace_use_dbregs);
5699
5700 UNLOCK_PFS(flags);
5701
5702 spin_lock(&pfm_buffer_fmt_lock);
5703
5704 list_for_each(pos, &pfm_buffer_fmt_list) {
5705 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
5706 seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
5707 entry->fmt_uuid[0],
5708 entry->fmt_uuid[1],
5709 entry->fmt_uuid[2],
5710 entry->fmt_uuid[3],
5711 entry->fmt_uuid[4],
5712 entry->fmt_uuid[5],
5713 entry->fmt_uuid[6],
5714 entry->fmt_uuid[7],
5715 entry->fmt_uuid[8],
5716 entry->fmt_uuid[9],
5717 entry->fmt_uuid[10],
5718 entry->fmt_uuid[11],
5719 entry->fmt_uuid[12],
5720 entry->fmt_uuid[13],
5721 entry->fmt_uuid[14],
5722 entry->fmt_uuid[15],
5723 entry->fmt_name);
5724 }
5725 spin_unlock(&pfm_buffer_fmt_lock);
5726
5727}
5728
5729static int
5730pfm_proc_show(struct seq_file *m, void *v)
5731{
5732 unsigned long psr;
5733 unsigned int i;
5734 int cpu;
5735
5736 if (v == PFM_PROC_SHOW_HEADER) {
5737 pfm_proc_show_header(m);
5738 return 0;
5739 }
5740
5741 /* show info for CPU (v - 1) */
5742
5743 cpu = (long)v - 1;
5744 seq_printf(m,
5745 "CPU%-2d overflow intrs : %lu\n"
5746 "CPU%-2d overflow cycles : %lu\n"
5747 "CPU%-2d overflow min : %lu\n"
5748 "CPU%-2d overflow max : %lu\n"
5749 "CPU%-2d smpl handler calls : %lu\n"
5750 "CPU%-2d smpl handler cycles : %lu\n"
5751 "CPU%-2d spurious intrs : %lu\n"
5752 "CPU%-2d replay intrs : %lu\n"
5753 "CPU%-2d syst_wide : %d\n"
5754 "CPU%-2d dcr_pp : %d\n"
5755 "CPU%-2d exclude idle : %d\n"
5756 "CPU%-2d owner : %d\n"
5757 "CPU%-2d context : %p\n"
5758 "CPU%-2d activations : %lu\n",
5759 cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
5760 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
5761 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
5762 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
5763 cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
5764 cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
5765 cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
5766 cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
5767 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
5768 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
5769 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
5770 cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
5771 cpu, pfm_get_cpu_data(pmu_ctx, cpu),
5772 cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
5773
5774 if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
5775
5776 psr = pfm_get_psr();
5777
5778 ia64_srlz_d();
5779
5780 seq_printf(m,
5781 "CPU%-2d psr : 0x%lx\n"
5782 "CPU%-2d pmc0 : 0x%lx\n",
5783 cpu, psr,
5784 cpu, ia64_get_pmc(0));
5785
5786 for (i=0; PMC_IS_LAST(i) == 0; i++) {
5787 if (PMC_IS_COUNTING(i) == 0) continue;
5788 seq_printf(m,
5789 "CPU%-2d pmc%u : 0x%lx\n"
5790 "CPU%-2d pmd%u : 0x%lx\n",
5791 cpu, i, ia64_get_pmc(i),
5792 cpu, i, ia64_get_pmd(i));
5793 }
5794 }
5795 return 0;
5796}
5797
a23fe55e 5798const struct seq_operations pfm_seq_ops = {
1da177e4
LT
5799 .start = pfm_proc_start,
5800 .next = pfm_proc_next,
5801 .stop = pfm_proc_stop,
5802 .show = pfm_proc_show
5803};
5804
5805static int
5806pfm_proc_open(struct inode *inode, struct file *file)
5807{
5808 return seq_open(file, &pfm_seq_ops);
5809}
5810
5811
5812/*
5813 * we come here as soon as local_cpu_data->pfm_syst_wide is set. this happens
5814 * during pfm_enable() hence before pfm_start(). We cannot assume monitoring
5815 * is active or inactive based on mode. We must rely on the value in
5816 * local_cpu_data->pfm_syst_info
5817 */
5818void
5819pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
5820{
5821 struct pt_regs *regs;
5822 unsigned long dcr;
5823 unsigned long dcr_pp;
5824
5825 dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
5826
5827 /*
5828 * pid 0 is guaranteed to be the idle task. There is one such task with pid 0
5829 * on every CPU, so we can rely on the pid to identify the idle task.
5830 */
5831 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
6450578f 5832 regs = task_pt_regs(task);
1da177e4
LT
5833 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
5834 return;
5835 }
5836 /*
5837 * if monitoring has started
5838 */
5839 if (dcr_pp) {
5840 dcr = ia64_getreg(_IA64_REG_CR_DCR);
5841 /*
5842 * context switching in?
5843 */
5844 if (is_ctxswin) {
5845 /* mask monitoring for the idle task */
5846 ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
5847 pfm_clear_psr_pp();
5848 ia64_srlz_i();
5849 return;
5850 }
5851 /*
5852 * context switching out
5853 * restore monitoring for next task
5854 *
5855 * Due to inlining this odd if-then-else construction generates
5856 * better code.
5857 */
5858 ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
5859 pfm_set_psr_pp();
5860 ia64_srlz_i();
5861 }
5862}
5863
5864#ifdef CONFIG_SMP
5865
5866static void
5867pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
5868{
5869 struct task_struct *task = ctx->ctx_task;
5870
5871 ia64_psr(regs)->up = 0;
5872 ia64_psr(regs)->sp = 1;
5873
5874 if (GET_PMU_OWNER() == task) {
19c5870c
AD
5875 DPRINT(("cleared ownership for [%d]\n",
5876 task_pid_nr(ctx->ctx_task)));
1da177e4
LT
5877 SET_PMU_OWNER(NULL, NULL);
5878 }
5879
5880 /*
5881 * disconnect the task from the context and vice-versa
5882 */
5883 PFM_SET_WORK_PENDING(task, 0);
5884
5885 task->thread.pfm_context = NULL;
5886 task->thread.flags &= ~IA64_THREAD_PM_VALID;
5887
19c5870c 5888 DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
1da177e4
LT
5889}
5890
5891
5892/*
5893 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
5894 */
5895void
5896pfm_save_regs(struct task_struct *task)
5897{
5898 pfm_context_t *ctx;
1da177e4
LT
5899 unsigned long flags;
5900 u64 psr;
5901
5902
5903 ctx = PFM_GET_CTX(task);
5904 if (ctx == NULL) return;
1da177e4
LT
5905
5906 /*
5907 * we always come here with interrupts ALREADY disabled by
5908 * the scheduler. So we simply need to protect against concurrent
5909 * access, not CPU concurrency.
5910 */
5911 flags = pfm_protect_ctx_ctxsw(ctx);
5912
5913 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
6450578f 5914 struct pt_regs *regs = task_pt_regs(task);
1da177e4
LT
5915
5916 pfm_clear_psr_up();
5917
5918 pfm_force_cleanup(ctx, regs);
5919
5920 BUG_ON(ctx->ctx_smpl_hdr);
5921
5922 pfm_unprotect_ctx_ctxsw(ctx, flags);
5923
5924 pfm_context_free(ctx);
5925 return;
5926 }
5927
5928 /*
5929 * save current PSR: needed because we modify it
5930 */
5931 ia64_srlz_d();
5932 psr = pfm_get_psr();
5933
5934 BUG_ON(psr & (IA64_PSR_I));
5935
5936 /*
5937 * stop monitoring:
5938 * This is the last instruction which may generate an overflow
5939 *
5940 * We do not need to set psr.sp because, it is irrelevant in kernel.
5941 * It will be restored from ipsr when going back to user level
5942 */
5943 pfm_clear_psr_up();
5944
5945 /*
5946 * keep a copy of psr.up (for reload)
5947 */
5948 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5949
5950 /*
5951 * release ownership of this PMU.
5952 * PM interrupts are masked, so nothing
5953 * can happen.
5954 */
5955 SET_PMU_OWNER(NULL, NULL);
5956
5957 /*
5958 * we systematically save the PMD as we have no
5959 * guarantee we will be schedule at that same
5960 * CPU again.
5961 */
35589a8f 5962 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
1da177e4
LT
5963
5964 /*
5965 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
5966 * we will need it on the restore path to check
5967 * for pending overflow.
5968 */
35589a8f 5969 ctx->th_pmcs[0] = ia64_get_pmc(0);
1da177e4
LT
5970
5971 /*
5972 * unfreeze PMU if had pending overflows
5973 */
35589a8f 5974 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
1da177e4
LT
5975
5976 /*
5977 * finally, allow context access.
5978 * interrupts will still be masked after this call.
5979 */
5980 pfm_unprotect_ctx_ctxsw(ctx, flags);
5981}
5982
5983#else /* !CONFIG_SMP */
5984void
5985pfm_save_regs(struct task_struct *task)
5986{
5987 pfm_context_t *ctx;
5988 u64 psr;
5989
5990 ctx = PFM_GET_CTX(task);
5991 if (ctx == NULL) return;
5992
5993 /*
5994 * save current PSR: needed because we modify it
5995 */
5996 psr = pfm_get_psr();
5997
5998 BUG_ON(psr & (IA64_PSR_I));
5999
6000 /*
6001 * stop monitoring:
6002 * This is the last instruction which may generate an overflow
6003 *
6004 * We do not need to set psr.sp because, it is irrelevant in kernel.
6005 * It will be restored from ipsr when going back to user level
6006 */
6007 pfm_clear_psr_up();
6008
6009 /*
6010 * keep a copy of psr.up (for reload)
6011 */
6012 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
6013}
6014
6015static void
6016pfm_lazy_save_regs (struct task_struct *task)
6017{
6018 pfm_context_t *ctx;
1da177e4
LT
6019 unsigned long flags;
6020
6021 { u64 psr = pfm_get_psr();
6022 BUG_ON(psr & IA64_PSR_UP);
6023 }
6024
6025 ctx = PFM_GET_CTX(task);
1da177e4
LT
6026
6027 /*
6028 * we need to mask PMU overflow here to
6029 * make sure that we maintain pmc0 until
6030 * we save it. overflow interrupts are
6031 * treated as spurious if there is no
6032 * owner.
6033 *
6034 * XXX: I don't think this is necessary
6035 */
6036 PROTECT_CTX(ctx,flags);
6037
6038 /*
6039 * release ownership of this PMU.
6040 * must be done before we save the registers.
6041 *
6042 * after this call any PMU interrupt is treated
6043 * as spurious.
6044 */
6045 SET_PMU_OWNER(NULL, NULL);
6046
6047 /*
6048 * save all the pmds we use
6049 */
35589a8f 6050 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
1da177e4
LT
6051
6052 /*
6053 * save pmc0 ia64_srlz_d() done in pfm_save_pmds()
6054 * it is needed to check for pended overflow
6055 * on the restore path
6056 */
35589a8f 6057 ctx->th_pmcs[0] = ia64_get_pmc(0);
1da177e4
LT
6058
6059 /*
6060 * unfreeze PMU if had pending overflows
6061 */
35589a8f 6062 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
1da177e4
LT
6063
6064 /*
6065 * now get can unmask PMU interrupts, they will
6066 * be treated as purely spurious and we will not
6067 * lose any information
6068 */
6069 UNPROTECT_CTX(ctx,flags);
6070}
6071#endif /* CONFIG_SMP */
6072
6073#ifdef CONFIG_SMP
6074/*
6075 * in 2.6, interrupts are masked when we come here and the runqueue lock is held
6076 */
6077void
6078pfm_load_regs (struct task_struct *task)
6079{
6080 pfm_context_t *ctx;
1da177e4
LT
6081 unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
6082 unsigned long flags;
6083 u64 psr, psr_up;
6084 int need_irq_resend;
6085
6086 ctx = PFM_GET_CTX(task);
6087 if (unlikely(ctx == NULL)) return;
6088
6089 BUG_ON(GET_PMU_OWNER());
6090
1da177e4
LT
6091 /*
6092 * possible on unload
6093 */
35589a8f 6094 if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
1da177e4
LT
6095
6096 /*
6097 * we always come here with interrupts ALREADY disabled by
6098 * the scheduler. So we simply need to protect against concurrent
6099 * access, not CPU concurrency.
6100 */
6101 flags = pfm_protect_ctx_ctxsw(ctx);
6102 psr = pfm_get_psr();
6103
6104 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6105
6106 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6107 BUG_ON(psr & IA64_PSR_I);
6108
6109 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
6450578f 6110 struct pt_regs *regs = task_pt_regs(task);
1da177e4
LT
6111
6112 BUG_ON(ctx->ctx_smpl_hdr);
6113
6114 pfm_force_cleanup(ctx, regs);
6115
6116 pfm_unprotect_ctx_ctxsw(ctx, flags);
6117
6118 /*
6119 * this one (kmalloc'ed) is fine with interrupts disabled
6120 */
6121 pfm_context_free(ctx);
6122
6123 return;
6124 }
6125
6126 /*
6127 * we restore ALL the debug registers to avoid picking up
6128 * stale state.
6129 */
6130 if (ctx->ctx_fl_using_dbreg) {
6131 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6132 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6133 }
6134 /*
6135 * retrieve saved psr.up
6136 */
6137 psr_up = ctx->ctx_saved_psr_up;
6138
6139 /*
6140 * if we were the last user of the PMU on that CPU,
6141 * then nothing to do except restore psr
6142 */
6143 if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
6144
6145 /*
6146 * retrieve partial reload masks (due to user modifications)
6147 */
6148 pmc_mask = ctx->ctx_reload_pmcs[0];
6149 pmd_mask = ctx->ctx_reload_pmds[0];
6150
6151 } else {
6152 /*
6153 * To avoid leaking information to the user level when psr.sp=0,
6154 * we must reload ALL implemented pmds (even the ones we don't use).
6155 * In the kernel we only allow PFM_READ_PMDS on registers which
6156 * we initialized or requested (sampling) so there is no risk there.
6157 */
6158 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6159
6160 /*
6161 * ALL accessible PMCs are systematically reloaded, unused registers
6162 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6163 * up stale configuration.
6164 *
6165 * PMC0 is never in the mask. It is always restored separately.
6166 */
6167 pmc_mask = ctx->ctx_all_pmcs[0];
6168 }
6169 /*
6170 * when context is MASKED, we will restore PMC with plm=0
6171 * and PMD with stale information, but that's ok, nothing
6172 * will be captured.
6173 *
6174 * XXX: optimize here
6175 */
35589a8f
KA
6176 if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6177 if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
1da177e4
LT
6178
6179 /*
6180 * check for pending overflow at the time the state
6181 * was saved.
6182 */
35589a8f 6183 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
1da177e4
LT
6184 /*
6185 * reload pmc0 with the overflow information
6186 * On McKinley PMU, this will trigger a PMU interrupt
6187 */
35589a8f 6188 ia64_set_pmc(0, ctx->th_pmcs[0]);
1da177e4 6189 ia64_srlz_d();
35589a8f 6190 ctx->th_pmcs[0] = 0UL;
1da177e4
LT
6191
6192 /*
6193 * will replay the PMU interrupt
6194 */
c0ad90a3 6195 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
1da177e4
LT
6196
6197 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6198 }
6199
6200 /*
6201 * we just did a reload, so we reset the partial reload fields
6202 */
6203 ctx->ctx_reload_pmcs[0] = 0UL;
6204 ctx->ctx_reload_pmds[0] = 0UL;
6205
6206 SET_LAST_CPU(ctx, smp_processor_id());
6207
6208 /*
6209 * dump activation value for this PMU
6210 */
6211 INC_ACTIVATION();
6212 /*
6213 * record current activation for this context
6214 */
6215 SET_ACTIVATION(ctx);
6216
6217 /*
6218 * establish new ownership.
6219 */
6220 SET_PMU_OWNER(task, ctx);
6221
6222 /*
6223 * restore the psr.up bit. measurement
6224 * is active again.
6225 * no PMU interrupt can happen at this point
6226 * because we still have interrupts disabled.
6227 */
6228 if (likely(psr_up)) pfm_set_psr_up();
6229
6230 /*
6231 * allow concurrent access to context
6232 */
6233 pfm_unprotect_ctx_ctxsw(ctx, flags);
6234}
6235#else /* !CONFIG_SMP */
6236/*
6237 * reload PMU state for UP kernels
6238 * in 2.5 we come here with interrupts disabled
6239 */
6240void
6241pfm_load_regs (struct task_struct *task)
6242{
1da177e4
LT
6243 pfm_context_t *ctx;
6244 struct task_struct *owner;
6245 unsigned long pmd_mask, pmc_mask;
6246 u64 psr, psr_up;
6247 int need_irq_resend;
6248
6249 owner = GET_PMU_OWNER();
6250 ctx = PFM_GET_CTX(task);
1da177e4
LT
6251 psr = pfm_get_psr();
6252
6253 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6254 BUG_ON(psr & IA64_PSR_I);
6255
6256 /*
6257 * we restore ALL the debug registers to avoid picking up
6258 * stale state.
6259 *
6260 * This must be done even when the task is still the owner
6261 * as the registers may have been modified via ptrace()
6262 * (not perfmon) by the previous task.
6263 */
6264 if (ctx->ctx_fl_using_dbreg) {
6265 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6266 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6267 }
6268
6269 /*
6270 * retrieved saved psr.up
6271 */
6272 psr_up = ctx->ctx_saved_psr_up;
6273 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6274
6275 /*
6276 * short path, our state is still there, just
6277 * need to restore psr and we go
6278 *
6279 * we do not touch either PMC nor PMD. the psr is not touched
6280 * by the overflow_handler. So we are safe w.r.t. to interrupt
6281 * concurrency even without interrupt masking.
6282 */
6283 if (likely(owner == task)) {
6284 if (likely(psr_up)) pfm_set_psr_up();
6285 return;
6286 }
6287
6288 /*
6289 * someone else is still using the PMU, first push it out and
6290 * then we'll be able to install our stuff !
6291 *
6292 * Upon return, there will be no owner for the current PMU
6293 */
6294 if (owner) pfm_lazy_save_regs(owner);
6295
6296 /*
6297 * To avoid leaking information to the user level when psr.sp=0,
6298 * we must reload ALL implemented pmds (even the ones we don't use).
6299 * In the kernel we only allow PFM_READ_PMDS on registers which
6300 * we initialized or requested (sampling) so there is no risk there.
6301 */
6302 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6303
6304 /*
6305 * ALL accessible PMCs are systematically reloaded, unused registers
6306 * get their default (from pfm_reset_pmu_state()) values to avoid picking
6307 * up stale configuration.
6308 *
6309 * PMC0 is never in the mask. It is always restored separately
6310 */
6311 pmc_mask = ctx->ctx_all_pmcs[0];
6312
35589a8f
KA
6313 pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6314 pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
1da177e4
LT
6315
6316 /*
6317 * check for pending overflow at the time the state
6318 * was saved.
6319 */
35589a8f 6320 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
1da177e4
LT
6321 /*
6322 * reload pmc0 with the overflow information
6323 * On McKinley PMU, this will trigger a PMU interrupt
6324 */
35589a8f 6325 ia64_set_pmc(0, ctx->th_pmcs[0]);
1da177e4
LT
6326 ia64_srlz_d();
6327
35589a8f 6328 ctx->th_pmcs[0] = 0UL;
1da177e4
LT
6329
6330 /*
6331 * will replay the PMU interrupt
6332 */
c0ad90a3 6333 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
1da177e4
LT
6334
6335 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6336 }
6337
6338 /*
6339 * establish new ownership.
6340 */
6341 SET_PMU_OWNER(task, ctx);
6342
6343 /*
6344 * restore the psr.up bit. measurement
6345 * is active again.
6346 * no PMU interrupt can happen at this point
6347 * because we still have interrupts disabled.
6348 */
6349 if (likely(psr_up)) pfm_set_psr_up();
6350}
6351#endif /* CONFIG_SMP */
6352
6353/*
6354 * this function assumes monitoring is stopped
6355 */
6356static void
6357pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6358{
6359 u64 pmc0;
6360 unsigned long mask2, val, pmd_val, ovfl_val;
6361 int i, can_access_pmu = 0;
6362 int is_self;
6363
6364 /*
6365 * is the caller the task being monitored (or which initiated the
6366 * session for system wide measurements)
6367 */
6368 is_self = ctx->ctx_task == task ? 1 : 0;
6369
6370 /*
6371 * can access PMU is task is the owner of the PMU state on the current CPU
6372 * or if we are running on the CPU bound to the context in system-wide mode
6373 * (that is not necessarily the task the context is attached to in this mode).
6374 * In system-wide we always have can_access_pmu true because a task running on an
6375 * invalid processor is flagged earlier in the call stack (see pfm_stop).
6376 */
6377 can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
6378 if (can_access_pmu) {
6379 /*
6380 * Mark the PMU as not owned
6381 * This will cause the interrupt handler to do nothing in case an overflow
6382 * interrupt was in-flight
6383 * This also guarantees that pmc0 will contain the final state
6384 * It virtually gives us full control on overflow processing from that point
6385 * on.
6386 */
6387 SET_PMU_OWNER(NULL, NULL);
6388 DPRINT(("releasing ownership\n"));
6389
6390 /*
6391 * read current overflow status:
6392 *
6393 * we are guaranteed to read the final stable state
6394 */
6395 ia64_srlz_d();
6396 pmc0 = ia64_get_pmc(0); /* slow */
6397
6398 /*
6399 * reset freeze bit, overflow status information destroyed
6400 */
6401 pfm_unfreeze_pmu();
6402 } else {
35589a8f 6403 pmc0 = ctx->th_pmcs[0];
1da177e4
LT
6404 /*
6405 * clear whatever overflow status bits there were
6406 */
35589a8f 6407 ctx->th_pmcs[0] = 0;
1da177e4
LT
6408 }
6409 ovfl_val = pmu_conf->ovfl_val;
6410 /*
6411 * we save all the used pmds
6412 * we take care of overflows for counting PMDs
6413 *
6414 * XXX: sampling situation is not taken into account here
6415 */
6416 mask2 = ctx->ctx_used_pmds[0];
6417
6418 DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
6419
6420 for (i = 0; mask2; i++, mask2>>=1) {
6421
6422 /* skip non used pmds */
6423 if ((mask2 & 0x1) == 0) continue;
6424
6425 /*
6426 * can access PMU always true in system wide mode
6427 */
35589a8f 6428 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
1da177e4
LT
6429
6430 if (PMD_IS_COUNTING(i)) {
6431 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
19c5870c 6432 task_pid_nr(task),
1da177e4
LT
6433 i,
6434 ctx->ctx_pmds[i].val,
6435 val & ovfl_val));
6436
6437 /*
6438 * we rebuild the full 64 bit value of the counter
6439 */
6440 val = ctx->ctx_pmds[i].val + (val & ovfl_val);
6441
6442 /*
6443 * now everything is in ctx_pmds[] and we need
6444 * to clear the saved context from save_regs() such that
6445 * pfm_read_pmds() gets the correct value
6446 */
6447 pmd_val = 0UL;
6448
6449 /*
6450 * take care of overflow inline
6451 */
6452 if (pmc0 & (1UL << i)) {
6453 val += 1 + ovfl_val;
19c5870c 6454 DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
1da177e4
LT
6455 }
6456 }
6457
19c5870c 6458 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
1da177e4 6459
35589a8f 6460 if (is_self) ctx->th_pmds[i] = pmd_val;
1da177e4
LT
6461
6462 ctx->ctx_pmds[i].val = val;
6463 }
6464}
6465
6466static struct irqaction perfmon_irqaction = {
6467 .handler = pfm_interrupt_handler,
121a4226 6468 .flags = IRQF_DISABLED,
1da177e4
LT
6469 .name = "perfmon"
6470};
6471
a1ecf7f6
TL
6472static void
6473pfm_alt_save_pmu_state(void *data)
6474{
6475 struct pt_regs *regs;
6476
6450578f 6477 regs = task_pt_regs(current);
a1ecf7f6
TL
6478
6479 DPRINT(("called\n"));
6480
6481 /*
6482 * should not be necessary but
6483 * let's take not risk
6484 */
6485 pfm_clear_psr_up();
6486 pfm_clear_psr_pp();
6487 ia64_psr(regs)->pp = 0;
6488
6489 /*
6490 * This call is required
6491 * May cause a spurious interrupt on some processors
6492 */
6493 pfm_freeze_pmu();
6494
6495 ia64_srlz_d();
6496}
6497
6498void
6499pfm_alt_restore_pmu_state(void *data)
6500{
6501 struct pt_regs *regs;
6502
6450578f 6503 regs = task_pt_regs(current);
a1ecf7f6
TL
6504
6505 DPRINT(("called\n"));
6506
6507 /*
6508 * put PMU back in state expected
6509 * by perfmon
6510 */
6511 pfm_clear_psr_up();
6512 pfm_clear_psr_pp();
6513 ia64_psr(regs)->pp = 0;
6514
6515 /*
6516 * perfmon runs with PMU unfrozen at all times
6517 */
6518 pfm_unfreeze_pmu();
6519
6520 ia64_srlz_d();
6521}
6522
6523int
6524pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6525{
6526 int ret, i;
6527 int reserve_cpu;
6528
6529 /* some sanity checks */
6530 if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
6531
6532 /* do the easy test first */
6533 if (pfm_alt_intr_handler) return -EBUSY;
6534
6535 /* one at a time in the install or remove, just fail the others */
6536 if (!spin_trylock(&pfm_alt_install_check)) {
6537 return -EBUSY;
6538 }
6539
6540 /* reserve our session */
6541 for_each_online_cpu(reserve_cpu) {
6542 ret = pfm_reserve_session(NULL, 1, reserve_cpu);
6543 if (ret) goto cleanup_reserve;
6544 }
6545
6546 /* save the current system wide pmu states */
6547 ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1);
6548 if (ret) {
6549 DPRINT(("on_each_cpu() failed: %d\n", ret));
6550 goto cleanup_reserve;
6551 }
6552
6553 /* officially change to the alternate interrupt handler */
6554 pfm_alt_intr_handler = hdl;
6555
6556 spin_unlock(&pfm_alt_install_check);
6557
6558 return 0;
6559
6560cleanup_reserve:
6561 for_each_online_cpu(i) {
6562 /* don't unreserve more than we reserved */
6563 if (i >= reserve_cpu) break;
6564
6565 pfm_unreserve_session(NULL, 1, i);
6566 }
6567
6568 spin_unlock(&pfm_alt_install_check);
6569
6570 return ret;
6571}
6572EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
6573
6574int
6575pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6576{
6577 int i;
6578 int ret;
6579
6580 if (hdl == NULL) return -EINVAL;
6581
6582 /* cannot remove someone else's handler! */
6583 if (pfm_alt_intr_handler != hdl) return -EINVAL;
6584
6585 /* one at a time in the install or remove, just fail the others */
6586 if (!spin_trylock(&pfm_alt_install_check)) {
6587 return -EBUSY;
6588 }
6589
6590 pfm_alt_intr_handler = NULL;
6591
6592 ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1);
6593 if (ret) {
6594 DPRINT(("on_each_cpu() failed: %d\n", ret));
6595 }
6596
6597 for_each_online_cpu(i) {
6598 pfm_unreserve_session(NULL, 1, i);
6599 }
6600
6601 spin_unlock(&pfm_alt_install_check);
6602
6603 return 0;
6604}
6605EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
6606
1da177e4
LT
6607/*
6608 * perfmon initialization routine, called from the initcall() table
6609 */
6610static int init_pfm_fs(void);
6611
6612static int __init
6613pfm_probe_pmu(void)
6614{
6615 pmu_config_t **p;
6616 int family;
6617
6618 family = local_cpu_data->family;
6619 p = pmu_confs;
6620
6621 while(*p) {
6622 if ((*p)->probe) {
6623 if ((*p)->probe() == 0) goto found;
6624 } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
6625 goto found;
6626 }
6627 p++;
6628 }
6629 return -1;
6630found:
6631 pmu_conf = *p;
6632 return 0;
6633}
6634
5dfe4c96 6635static const struct file_operations pfm_proc_fops = {
1da177e4
LT
6636 .open = pfm_proc_open,
6637 .read = seq_read,
6638 .llseek = seq_lseek,
6639 .release = seq_release,
6640};
6641
6642int __init
6643pfm_init(void)
6644{
6645 unsigned int n, n_counters, i;
6646
6647 printk("perfmon: version %u.%u IRQ %u\n",
6648 PFM_VERSION_MAJ,
6649 PFM_VERSION_MIN,
6650 IA64_PERFMON_VECTOR);
6651
6652 if (pfm_probe_pmu()) {
6653 printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
6654 local_cpu_data->family);
6655 return -ENODEV;
6656 }
6657
6658 /*
6659 * compute the number of implemented PMD/PMC from the
6660 * description tables
6661 */
6662 n = 0;
6663 for (i=0; PMC_IS_LAST(i) == 0; i++) {
6664 if (PMC_IS_IMPL(i) == 0) continue;
6665 pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
6666 n++;
6667 }
6668 pmu_conf->num_pmcs = n;
6669
6670 n = 0; n_counters = 0;
6671 for (i=0; PMD_IS_LAST(i) == 0; i++) {
6672 if (PMD_IS_IMPL(i) == 0) continue;
6673 pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
6674 n++;
6675 if (PMD_IS_COUNTING(i)) n_counters++;
6676 }
6677 pmu_conf->num_pmds = n;
6678 pmu_conf->num_counters = n_counters;
6679
6680 /*
6681 * sanity checks on the number of debug registers
6682 */
6683 if (pmu_conf->use_rr_dbregs) {
6684 if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
6685 printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
6686 pmu_conf = NULL;
6687 return -1;
6688 }
6689 if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
6690 printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
6691 pmu_conf = NULL;
6692 return -1;
6693 }
6694 }
6695
6696 printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
6697 pmu_conf->pmu_name,
6698 pmu_conf->num_pmcs,
6699 pmu_conf->num_pmds,
6700 pmu_conf->num_counters,
6701 ffz(pmu_conf->ovfl_val));
6702
6703 /* sanity check */
35589a8f 6704 if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
1da177e4
LT
6705 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
6706 pmu_conf = NULL;
6707 return -1;
6708 }
6709
6710 /*
6711 * create /proc/perfmon (mostly for debugging purposes)
6712 */
6713 perfmon_dir = create_proc_entry("perfmon", S_IRUGO, NULL);
6714 if (perfmon_dir == NULL) {
6715 printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
6716 pmu_conf = NULL;
6717 return -1;
6718 }
6719 /*
6720 * install customized file operations for /proc/perfmon entry
6721 */
6722 perfmon_dir->proc_fops = &pfm_proc_fops;
6723
6724 /*
6725 * create /proc/sys/kernel/perfmon (for debugging purposes)
6726 */
0b4d4147 6727 pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
1da177e4
LT
6728
6729 /*
6730 * initialize all our spinlocks
6731 */
6732 spin_lock_init(&pfm_sessions.pfs_lock);
6733 spin_lock_init(&pfm_buffer_fmt_lock);
6734
6735 init_pfm_fs();
6736
6737 for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
6738
6739 return 0;
6740}
6741
6742__initcall(pfm_init);
6743
6744/*
6745 * this function is called before pfm_init()
6746 */
6747void
6748pfm_init_percpu (void)
6749{
ff741906 6750 static int first_time=1;
1da177e4
LT
6751 /*
6752 * make sure no measurement is active
6753 * (may inherit programmed PMCs from EFI).
6754 */
6755 pfm_clear_psr_pp();
6756 pfm_clear_psr_up();
6757
6758 /*
6759 * we run with the PMU not frozen at all times
6760 */
6761 pfm_unfreeze_pmu();
6762
ff741906 6763 if (first_time) {
1da177e4 6764 register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
ff741906
AR
6765 first_time=0;
6766 }
1da177e4
LT
6767
6768 ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
6769 ia64_srlz_d();
6770}
6771
6772/*
6773 * used for debug purposes only
6774 */
6775void
6776dump_pmu_state(const char *from)
6777{
6778 struct task_struct *task;
1da177e4
LT
6779 struct pt_regs *regs;
6780 pfm_context_t *ctx;
6781 unsigned long psr, dcr, info, flags;
6782 int i, this_cpu;
6783
6784 local_irq_save(flags);
6785
6786 this_cpu = smp_processor_id();
6450578f 6787 regs = task_pt_regs(current);
1da177e4
LT
6788 info = PFM_CPUINFO_GET();
6789 dcr = ia64_getreg(_IA64_REG_CR_DCR);
6790
6791 if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
6792 local_irq_restore(flags);
6793 return;
6794 }
6795
6796 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6797 this_cpu,
6798 from,
19c5870c 6799 task_pid_nr(current),
1da177e4
LT
6800 regs->cr_iip,
6801 current->comm);
6802
6803 task = GET_PMU_OWNER();
6804 ctx = GET_PMU_CTX();
6805
19c5870c 6806 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
1da177e4
LT
6807
6808 psr = pfm_get_psr();
6809
6810 printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
6811 this_cpu,
6812 ia64_get_pmc(0),
6813 psr & IA64_PSR_PP ? 1 : 0,
6814 psr & IA64_PSR_UP ? 1 : 0,
6815 dcr & IA64_DCR_PP ? 1 : 0,
6816 info,
6817 ia64_psr(regs)->up,
6818 ia64_psr(regs)->pp);
6819
6820 ia64_psr(regs)->up = 0;
6821 ia64_psr(regs)->pp = 0;
6822
1da177e4
LT
6823 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6824 if (PMC_IS_IMPL(i) == 0) continue;
35589a8f 6825 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
1da177e4
LT
6826 }
6827
6828 for (i=1; PMD_IS_LAST(i) == 0; i++) {
6829 if (PMD_IS_IMPL(i) == 0) continue;
35589a8f 6830 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
1da177e4
LT
6831 }
6832
6833 if (ctx) {
6834 printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
6835 this_cpu,
6836 ctx->ctx_state,
6837 ctx->ctx_smpl_vaddr,
6838 ctx->ctx_smpl_hdr,
6839 ctx->ctx_msgq_head,
6840 ctx->ctx_msgq_tail,
6841 ctx->ctx_saved_psr_up);
6842 }
6843 local_irq_restore(flags);
6844}
6845
6846/*
6847 * called from process.c:copy_thread(). task is new child.
6848 */
6849void
6850pfm_inherit(struct task_struct *task, struct pt_regs *regs)
6851{
6852 struct thread_struct *thread;
6853
19c5870c 6854 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
1da177e4
LT
6855
6856 thread = &task->thread;
6857
6858 /*
6859 * cut links inherited from parent (current)
6860 */
6861 thread->pfm_context = NULL;
6862
6863 PFM_SET_WORK_PENDING(task, 0);
6864
6865 /*
6866 * the psr bits are already set properly in copy_threads()
6867 */
6868}
6869#else /* !CONFIG_PERFMON */
6870asmlinkage long
6871sys_perfmonctl (int fd, int cmd, void *arg, int count)
6872{
6873 return -ENOSYS;
6874}
6875#endif /* CONFIG_PERFMON */