]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/arm/oprofile/common.c
oprofile, ARM: Release resources on failure
[net-next-2.6.git] / arch / arm / oprofile / common.c
CommitLineData
1da177e4
LT
1/**
2 * @file common.c
3 *
4 * @remark Copyright 2004 Oprofile Authors
8c1fc96f 5 * @remark Copyright 2010 ARM Ltd.
1da177e4
LT
6 * @remark Read the file COPYING
7 *
8 * @author Zwane Mwaikambo
8c1fc96f 9 * @author Will Deacon [move to perf]
1da177e4
LT
10 */
11
8c1fc96f 12#include <linux/cpumask.h>
d1e86d64 13#include <linux/err.h>
8c1fc96f 14#include <linux/errno.h>
1da177e4 15#include <linux/init.h>
8c1fc96f 16#include <linux/mutex.h>
1da177e4 17#include <linux/oprofile.h>
8c1fc96f 18#include <linux/perf_event.h>
d1e86d64 19#include <linux/platform_device.h>
ae92dc9f 20#include <linux/slab.h>
8c1fc96f
WD
21#include <asm/stacktrace.h>
22#include <linux/uaccess.h>
23
24#include <asm/perf_event.h>
25#include <asm/ptrace.h>
1da177e4 26
8c1fc96f
WD
27#ifdef CONFIG_HW_PERF_EVENTS
28/*
29 * Per performance monitor configuration as set via oprofilefs.
30 */
31struct op_counter_config {
32 unsigned long count;
33 unsigned long enabled;
34 unsigned long event;
35 unsigned long unit_mask;
36 unsigned long kernel;
37 unsigned long user;
38 struct perf_event_attr attr;
39};
1da177e4 40
55f05234 41static int op_arm_enabled;
93ad7949 42static DEFINE_MUTEX(op_arm_mutex);
1da177e4 43
8c1fc96f
WD
44static struct op_counter_config *counter_config;
45static struct perf_event **perf_events[nr_cpumask_bits];
46static int perf_num_counters;
47
48/*
49 * Overflow callback for oprofile.
50 */
51static void op_overflow_handler(struct perf_event *event, int unused,
52 struct perf_sample_data *data, struct pt_regs *regs)
53{
54 int id;
55 u32 cpu = smp_processor_id();
56
57 for (id = 0; id < perf_num_counters; ++id)
58 if (perf_events[cpu][id] == event)
59 break;
60
61 if (id != perf_num_counters)
62 oprofile_add_sample(regs, id);
63 else
64 pr_warning("oprofile: ignoring spurious overflow "
65 "on cpu %u\n", cpu);
66}
67
68/*
69 * Called by op_arm_setup to create perf attributes to mirror the oprofile
70 * settings in counter_config. Attributes are created as `pinned' events and
71 * so are permanently scheduled on the PMU.
72 */
73static void op_perf_setup(void)
74{
75 int i;
76 u32 size = sizeof(struct perf_event_attr);
77 struct perf_event_attr *attr;
78
79 for (i = 0; i < perf_num_counters; ++i) {
80 attr = &counter_config[i].attr;
81 memset(attr, 0, size);
82 attr->type = PERF_TYPE_RAW;
83 attr->size = size;
84 attr->config = counter_config[i].event;
85 attr->sample_period = counter_config[i].count;
86 attr->pinned = 1;
87 }
88}
89
90static int op_create_counter(int cpu, int event)
91{
92 int ret = 0;
93 struct perf_event *pevent;
94
95 if (!counter_config[event].enabled || (perf_events[cpu][event] != NULL))
96 return ret;
97
98 pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
99 cpu, -1,
100 op_overflow_handler);
101
102 if (IS_ERR(pevent)) {
103 ret = PTR_ERR(pevent);
104 } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
98d943b0 105 perf_event_release_kernel(pevent);
8c1fc96f
WD
106 pr_warning("oprofile: failed to enable event %d "
107 "on CPU %d\n", event, cpu);
108 ret = -EBUSY;
109 } else {
110 perf_events[cpu][event] = pevent;
111 }
112
113 return ret;
114}
115
116static void op_destroy_counter(int cpu, int event)
117{
118 struct perf_event *pevent = perf_events[cpu][event];
119
120 if (pevent) {
121 perf_event_release_kernel(pevent);
122 perf_events[cpu][event] = NULL;
123 }
124}
125
126/*
127 * Called by op_arm_start to create active perf events based on the
128 * perviously configured attributes.
129 */
130static int op_perf_start(void)
131{
132 int cpu, event, ret = 0;
133
134 for_each_online_cpu(cpu) {
135 for (event = 0; event < perf_num_counters; ++event) {
136 ret = op_create_counter(cpu, event);
137 if (ret)
138 goto out;
139 }
140 }
141
142out:
143 return ret;
144}
145
146/*
147 * Called by op_arm_stop at the end of a profiling run.
148 */
149static void op_perf_stop(void)
150{
151 int cpu, event;
152
153 for_each_online_cpu(cpu)
154 for (event = 0; event < perf_num_counters; ++event)
155 op_destroy_counter(cpu, event);
156}
157
158
159static char *op_name_from_perf_id(enum arm_perf_pmu_ids id)
160{
161 switch (id) {
162 case ARM_PERF_PMU_ID_XSCALE1:
163 return "arm/xscale1";
164 case ARM_PERF_PMU_ID_XSCALE2:
165 return "arm/xscale2";
166 case ARM_PERF_PMU_ID_V6:
167 return "arm/armv6";
168 case ARM_PERF_PMU_ID_V6MP:
169 return "arm/mpcore";
170 case ARM_PERF_PMU_ID_CA8:
171 return "arm/armv7";
172 case ARM_PERF_PMU_ID_CA9:
173 return "arm/armv7-ca9";
174 default:
175 return NULL;
176 }
177}
1da177e4 178
55f05234 179static int op_arm_create_files(struct super_block *sb, struct dentry *root)
1da177e4
LT
180{
181 unsigned int i;
182
8c1fc96f 183 for (i = 0; i < perf_num_counters; i++) {
1da177e4 184 struct dentry *dir;
ae92dc9f 185 char buf[4];
1da177e4
LT
186
187 snprintf(buf, sizeof buf, "%d", i);
188 dir = oprofilefs_mkdir(sb, root, buf);
189 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
190 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
191 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
192 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
193 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
194 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
195 }
196
197 return 0;
198}
199
55f05234 200static int op_arm_setup(void)
1da177e4 201{
1da177e4 202 spin_lock(&oprofilefs_lock);
8c1fc96f 203 op_perf_setup();
1da177e4 204 spin_unlock(&oprofilefs_lock);
8c1fc96f 205 return 0;
1da177e4
LT
206}
207
55f05234 208static int op_arm_start(void)
1da177e4
LT
209{
210 int ret = -EBUSY;
211
93ad7949 212 mutex_lock(&op_arm_mutex);
55f05234 213 if (!op_arm_enabled) {
8c1fc96f
WD
214 ret = 0;
215 op_perf_start();
216 op_arm_enabled = 1;
1da177e4 217 }
93ad7949 218 mutex_unlock(&op_arm_mutex);
1da177e4
LT
219 return ret;
220}
221
55f05234 222static void op_arm_stop(void)
1da177e4 223{
93ad7949 224 mutex_lock(&op_arm_mutex);
55f05234 225 if (op_arm_enabled)
8c1fc96f 226 op_perf_stop();
55f05234 227 op_arm_enabled = 0;
93ad7949 228 mutex_unlock(&op_arm_mutex);
1da177e4
LT
229}
230
b5893c56 231#ifdef CONFIG_PM
d1e86d64 232static int op_arm_suspend(struct platform_device *dev, pm_message_t state)
b5893c56 233{
93ad7949 234 mutex_lock(&op_arm_mutex);
55f05234 235 if (op_arm_enabled)
8c1fc96f 236 op_perf_stop();
93ad7949 237 mutex_unlock(&op_arm_mutex);
b5893c56
RK
238 return 0;
239}
240
d1e86d64 241static int op_arm_resume(struct platform_device *dev)
b5893c56 242{
93ad7949 243 mutex_lock(&op_arm_mutex);
8c1fc96f 244 if (op_arm_enabled && op_perf_start())
55f05234 245 op_arm_enabled = 0;
93ad7949 246 mutex_unlock(&op_arm_mutex);
b5893c56
RK
247 return 0;
248}
249
d1e86d64
WD
250static struct platform_driver oprofile_driver = {
251 .driver = {
252 .name = "arm-oprofile",
253 },
55f05234
RK
254 .resume = op_arm_resume,
255 .suspend = op_arm_suspend,
b5893c56
RK
256};
257
d1e86d64 258static struct platform_device *oprofile_pdev;
b5893c56
RK
259
260static int __init init_driverfs(void)
261{
262 int ret;
263
d1e86d64
WD
264 ret = platform_driver_register(&oprofile_driver);
265 if (ret)
266 goto out;
267
268 oprofile_pdev = platform_device_register_simple(
269 oprofile_driver.driver.name, 0, NULL, 0);
270 if (IS_ERR(oprofile_pdev)) {
271 ret = PTR_ERR(oprofile_pdev);
272 platform_driver_unregister(&oprofile_driver);
273 }
b5893c56 274
d1e86d64 275out:
b5893c56
RK
276 return ret;
277}
278
279static void exit_driverfs(void)
280{
d1e86d64
WD
281 platform_device_unregister(oprofile_pdev);
282 platform_driver_unregister(&oprofile_driver);
b5893c56
RK
283}
284#else
d1e86d64 285static int __init init_driverfs(void) { return 0; }
b5893c56
RK
286#define exit_driverfs() do { } while (0)
287#endif /* CONFIG_PM */
288
8c1fc96f 289static int report_trace(struct stackframe *frame, void *d)
1da177e4 290{
8c1fc96f 291 unsigned int *depth = d;
c6b9dafc 292
8c1fc96f
WD
293 if (*depth) {
294 oprofile_add_trace(frame->pc);
295 (*depth)--;
296 }
1b7b5698 297
8c1fc96f
WD
298 return *depth == 0;
299}
c6b9dafc 300
8c1fc96f
WD
301/*
302 * The registers we're interested in are at the end of the variable
303 * length saved register structure. The fp points at the end of this
304 * structure so the address of this struct is:
305 * (struct frame_tail *)(xxx->fp)-1
306 */
307struct frame_tail {
308 struct frame_tail *fp;
309 unsigned long sp;
310 unsigned long lr;
311} __attribute__((packed));
2d9e1ae0 312
8c1fc96f
WD
313static struct frame_tail* user_backtrace(struct frame_tail *tail)
314{
315 struct frame_tail buftail[2];
10c03f69 316
8c1fc96f
WD
317 /* Also check accessibility of one struct frame_tail beyond */
318 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
319 return NULL;
320 if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail)))
321 return NULL;
d7ac4e28 322
8c1fc96f 323 oprofile_add_trace(buftail[0].lr);
c6b9dafc 324
8c1fc96f
WD
325 /* frame pointers should strictly progress back up the stack
326 * (towards higher addresses) */
327 if (tail >= buftail[0].fp)
328 return NULL;
329
330 return buftail[0].fp-1;
331}
332
333static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
334{
335 struct frame_tail *tail = ((struct frame_tail *) regs->ARM_fp) - 1;
336
337 if (!user_mode(regs)) {
338 struct stackframe frame;
339 frame.fp = regs->ARM_fp;
340 frame.sp = regs->ARM_sp;
341 frame.lr = regs->ARM_lr;
342 frame.pc = regs->ARM_pc;
343 walk_stackframe(&frame, report_trace, &depth);
344 return;
345 }
346
347 while (depth-- && tail && !((unsigned long) tail & 3))
348 tail = user_backtrace(tail);
349}
350
351int __init oprofile_arch_init(struct oprofile_operations *ops)
352{
353 int cpu, ret = 0;
354
355 perf_num_counters = armpmu_get_max_events();
356
357 counter_config = kcalloc(perf_num_counters,
358 sizeof(struct op_counter_config), GFP_KERNEL);
ae92dc9f 359
8c1fc96f
WD
360 if (!counter_config) {
361 pr_info("oprofile: failed to allocate %d "
362 "counters\n", perf_num_counters);
363 return -ENOMEM;
c6b9dafc 364 }
1da177e4 365
d1e86d64
WD
366 ret = init_driverfs();
367 if (ret) {
368 kfree(counter_config);
98d943b0 369 counter_config = NULL;
d1e86d64
WD
370 return ret;
371 }
372
8c1fc96f
WD
373 for_each_possible_cpu(cpu) {
374 perf_events[cpu] = kcalloc(perf_num_counters,
375 sizeof(struct perf_event *), GFP_KERNEL);
376 if (!perf_events[cpu]) {
377 pr_info("oprofile: failed to allocate %d perf events "
378 "for cpu %d\n", perf_num_counters, cpu);
379 while (--cpu >= 0)
380 kfree(perf_events[cpu]);
381 return -ENOMEM;
382 }
383 }
384
8c1fc96f
WD
385 ops->backtrace = arm_backtrace;
386 ops->create_files = op_arm_create_files;
387 ops->setup = op_arm_setup;
388 ops->start = op_arm_start;
389 ops->stop = op_arm_stop;
390 ops->shutdown = op_arm_stop;
391 ops->cpu_type = op_name_from_perf_id(armpmu_get_pmu_id());
392
393 if (!ops->cpu_type)
394 ret = -ENODEV;
395 else
396 pr_info("oprofile: using %s\n", ops->cpu_type);
397
c6b9dafc 398 return ret;
1da177e4
LT
399}
400
c6b9dafc 401void oprofile_arch_exit(void)
1da177e4 402{
8c1fc96f
WD
403 int cpu, id;
404 struct perf_event *event;
405
406 if (*perf_events) {
8c1fc96f
WD
407 for_each_possible_cpu(cpu) {
408 for (id = 0; id < perf_num_counters; ++id) {
409 event = perf_events[cpu][id];
410 if (event != NULL)
411 perf_event_release_kernel(event);
412 }
413 kfree(perf_events[cpu]);
414 }
1da177e4 415 }
8c1fc96f 416
98d943b0 417 if (counter_config) {
8c1fc96f 418 kfree(counter_config);
98d943b0
RR
419 exit_driverfs();
420 }
8c1fc96f
WD
421}
422#else
423int __init oprofile_arch_init(struct oprofile_operations *ops)
424{
425 pr_info("oprofile: hardware counters not available\n");
426 return -ENODEV;
1da177e4 427}
8c1fc96f
WD
428void oprofile_arch_exit(void) {}
429#endif /* CONFIG_HW_PERF_EVENTS */