]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/trace/trace_stack.c
ftrace: remove warning of old objcopy and local functions
[net-next-2.6.git] / kernel / trace / trace_stack.c
CommitLineData
e5a81b62
SR
1/*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3 *
4 */
5#include <linux/stacktrace.h>
6#include <linux/kallsyms.h>
7#include <linux/seq_file.h>
8#include <linux/spinlock.h>
9#include <linux/uaccess.h>
10#include <linux/debugfs.h>
11#include <linux/ftrace.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/fs.h>
15#include "trace.h"
16
17#define STACK_TRACE_ENTRIES 500
18
19static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES] =
20 { [0 ... (STACK_TRACE_ENTRIES-1)] = ULONG_MAX };
21static struct stack_trace max_stack_trace = {
22 .max_entries = STACK_TRACE_ENTRIES,
23 .entries = stack_dump_trace,
24};
25
26static unsigned long max_stack_size;
27static raw_spinlock_t max_stack_lock =
28 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
29
30static int stack_trace_disabled __read_mostly;
31static DEFINE_PER_CPU(int, trace_active);
32
33static inline void check_stack(void)
34{
35 unsigned long this_size;
36 unsigned long flags;
37
38 this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
39 this_size = THREAD_SIZE - this_size;
40
41 if (this_size <= max_stack_size)
42 return;
43
44 raw_local_irq_save(flags);
45 __raw_spin_lock(&max_stack_lock);
46
47 /* a race could have already updated it */
48 if (this_size <= max_stack_size)
49 goto out;
50
51 max_stack_size = this_size;
52
53 max_stack_trace.nr_entries = 0;
54 max_stack_trace.skip = 1;
55
56 save_stack_trace(&max_stack_trace);
57
58 out:
59 __raw_spin_unlock(&max_stack_lock);
60 raw_local_irq_restore(flags);
61}
62
63static void
64stack_trace_call(unsigned long ip, unsigned long parent_ip)
65{
66 int cpu, resched;
67
68 if (unlikely(!ftrace_enabled || stack_trace_disabled))
69 return;
70
71 resched = need_resched();
72 preempt_disable_notrace();
73
74 cpu = raw_smp_processor_id();
75 /* no atomic needed, we only modify this variable by this cpu */
76 if (per_cpu(trace_active, cpu)++ != 0)
77 goto out;
78
79 check_stack();
80
81 out:
82 per_cpu(trace_active, cpu)--;
83 /* prevent recursion in schedule */
84 if (resched)
85 preempt_enable_no_resched_notrace();
86 else
87 preempt_enable_notrace();
88}
89
90static struct ftrace_ops trace_ops __read_mostly =
91{
92 .func = stack_trace_call,
93};
94
95static ssize_t
96stack_max_size_read(struct file *filp, char __user *ubuf,
97 size_t count, loff_t *ppos)
98{
99 unsigned long *ptr = filp->private_data;
100 char buf[64];
101 int r;
102
103 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
104 if (r > sizeof(buf))
105 r = sizeof(buf);
106 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
107}
108
109static ssize_t
110stack_max_size_write(struct file *filp, const char __user *ubuf,
111 size_t count, loff_t *ppos)
112{
113 long *ptr = filp->private_data;
114 unsigned long val, flags;
115 char buf[64];
116 int ret;
117
118 if (count >= sizeof(buf))
119 return -EINVAL;
120
121 if (copy_from_user(&buf, ubuf, count))
122 return -EFAULT;
123
124 buf[count] = 0;
125
126 ret = strict_strtoul(buf, 10, &val);
127 if (ret < 0)
128 return ret;
129
130 raw_local_irq_save(flags);
131 __raw_spin_lock(&max_stack_lock);
132 *ptr = val;
133 __raw_spin_unlock(&max_stack_lock);
134 raw_local_irq_restore(flags);
135
136 return count;
137}
138
139static struct file_operations stack_max_size_fops = {
140 .open = tracing_open_generic,
141 .read = stack_max_size_read,
142 .write = stack_max_size_write,
143};
144
145static void *
146t_next(struct seq_file *m, void *v, loff_t *pos)
147{
148 unsigned long *t = m->private;
149
150 (*pos)++;
151
152 if (!t || *t == ULONG_MAX)
153 return NULL;
154
155 t++;
156 m->private = t;
157
158 return t;
159}
160
161static void *t_start(struct seq_file *m, loff_t *pos)
162{
163 unsigned long *t = m->private;
164 loff_t l = 0;
165
166 local_irq_disable();
167 __raw_spin_lock(&max_stack_lock);
168
169 for (; t && l < *pos; t = t_next(m, t, &l))
170 ;
171
172 return t;
173}
174
175static void t_stop(struct seq_file *m, void *p)
176{
177 __raw_spin_unlock(&max_stack_lock);
178 local_irq_enable();
179}
180
181static int trace_lookup_stack(struct seq_file *m, unsigned long addr)
182{
183#ifdef CONFIG_KALLSYMS
184 char str[KSYM_SYMBOL_LEN];
185
186 sprint_symbol(str, addr);
187
188 return seq_printf(m, "[<%p>] %s\n", (void*)addr, str);
189#else
190 return seq_printf(m, "%p\n", (void*)addr);
191#endif
192}
193
194static int t_show(struct seq_file *m, void *v)
195{
196 unsigned long *t = v;
197
198 if (!t || *t == ULONG_MAX)
199 return 0;
200
201 trace_lookup_stack(m, *t);
202
203 return 0;
204}
205
206static struct seq_operations stack_trace_seq_ops = {
207 .start = t_start,
208 .next = t_next,
209 .stop = t_stop,
210 .show = t_show,
211};
212
213static int stack_trace_open(struct inode *inode, struct file *file)
214{
215 int ret;
216
217 ret = seq_open(file, &stack_trace_seq_ops);
218 if (!ret) {
219 struct seq_file *m = file->private_data;
220 m->private = stack_dump_trace;
221 }
222
223 return ret;
224}
225
226static struct file_operations stack_trace_fops = {
227 .open = stack_trace_open,
228 .read = seq_read,
229 .llseek = seq_lseek,
230};
231
232static __init int stack_trace_init(void)
233{
234 struct dentry *d_tracer;
235 struct dentry *entry;
236
237 d_tracer = tracing_init_dentry();
238
239 entry = debugfs_create_file("stack_max_size", 0644, d_tracer,
240 &max_stack_size, &stack_max_size_fops);
241 if (!entry)
242 pr_warning("Could not create debugfs 'stack_max_size' entry\n");
243
244 entry = debugfs_create_file("stack_trace", 0444, d_tracer,
245 NULL, &stack_trace_fops);
246 if (!entry)
247 pr_warning("Could not create debugfs 'stack_trace' entry\n");
248
249 register_ftrace_function(&trace_ops);
250
251 return 0;
252}
253
254device_initcall(stack_trace_init);