]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/x86/kernel/ftrace.c
ftrace: nmi update statistics
[net-next-2.6.git] / arch / x86 / kernel / ftrace.c
CommitLineData
3d083395
SR
1/*
2 * Code for replacing ftrace calls with jumps.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 *
6 * Thanks goes to Ingo Molnar, for suggesting the idea.
7 * Mathieu Desnoyers, for suggesting postponing the modifications.
8 * Arjan van de Ven, for keeping me straight, and explaining to me
9 * the dangers of modifying code on the run.
10 */
11
12#include <linux/spinlock.h>
13#include <linux/hardirq.h>
6f93fc07 14#include <linux/uaccess.h>
3d083395
SR
15#include <linux/ftrace.h>
16#include <linux/percpu.h>
17#include <linux/init.h>
18#include <linux/list.h>
19
395a59d0 20#include <asm/ftrace.h>
732f3ca7 21#include <asm/nops.h>
3d083395 22
3d083395 23
8115f3f0 24static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
3d083395 25
3d083395 26union ftrace_code_union {
395a59d0 27 char code[MCOUNT_INSN_SIZE];
3d083395
SR
28 struct {
29 char e8;
30 int offset;
31 } __attribute__((packed));
32};
33
395a59d0 34
15adc048 35static int ftrace_calc_offset(long ip, long addr)
3c1720f0
SR
36{
37 return (int)(addr - ip);
38}
3d083395 39
15adc048 40unsigned char *ftrace_nop_replace(void)
3c1720f0 41{
8115f3f0 42 return ftrace_nop;
3c1720f0
SR
43}
44
15adc048 45unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
3c1720f0
SR
46{
47 static union ftrace_code_union calc;
3d083395 48
3c1720f0 49 calc.e8 = 0xe8;
395a59d0 50 calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
3c1720f0
SR
51
52 /*
53 * No locking needed, this must be called via kstop_machine
54 * which in essence is like running on a uniprocessor machine.
55 */
56 return calc.code;
3d083395
SR
57}
58
17666f02
SR
59/*
60 * Modifying code must take extra care. On an SMP machine, if
61 * the code being modified is also being executed on another CPU
62 * that CPU will have undefined results and possibly take a GPF.
63 * We use kstop_machine to stop other CPUS from exectuing code.
64 * But this does not stop NMIs from happening. We still need
65 * to protect against that. We separate out the modification of
66 * the code to take care of this.
67 *
68 * Two buffers are added: An IP buffer and a "code" buffer.
69 *
70 * 1) Put in the instruction pointer into the IP buffer
71 * and the new code into the "code" buffer.
72 * 2) Set a flag that says we are modifying code
73 * 3) Wait for any running NMIs to finish.
74 * 4) Write the code
75 * 5) clear the flag.
76 * 6) Wait for any running NMIs to finish.
77 *
78 * If an NMI is executed, the first thing it does is to call
79 * "ftrace_nmi_enter". This will check if the flag is set to write
80 * and if it is, it will write what is in the IP and "code" buffers.
81 *
82 * The trick is, it does not matter if everyone is writing the same
83 * content to the code location. Also, if a CPU is executing code
84 * it is OK to write to that code location if the contents being written
85 * are the same as what exists.
86 */
87
88static atomic_t in_nmi;
89static int mod_code_status;
90static int mod_code_write;
91static void *mod_code_ip;
92static void *mod_code_newcode;
93
b807c3d0
SR
94static int nmi_wait_count;
95static atomic_t nmi_update_count;
96
97int ftrace_arch_read_dyn_info(char *buf, int size)
98{
99 int r;
100
101 r = snprintf(buf, size, "%u %u",
102 nmi_wait_count,
103 atomic_read(&nmi_update_count));
104 return r;
105}
106
17666f02
SR
107static void ftrace_mod_code(void)
108{
109 /*
110 * Yes, more than one CPU process can be writing to mod_code_status.
111 * (and the code itself)
112 * But if one were to fail, then they all should, and if one were
113 * to succeed, then they all should.
114 */
115 mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
116 MCOUNT_INSN_SIZE);
117
118}
119
120void ftrace_nmi_enter(void)
121{
122 atomic_inc(&in_nmi);
123 /* Must have in_nmi seen before reading write flag */
124 smp_mb();
b807c3d0 125 if (mod_code_write) {
17666f02 126 ftrace_mod_code();
b807c3d0
SR
127 atomic_inc(&nmi_update_count);
128 }
17666f02
SR
129}
130
131void ftrace_nmi_exit(void)
132{
133 /* Finish all executions before clearing in_nmi */
134 smp_wmb();
135 atomic_dec(&in_nmi);
136}
137
138static void wait_for_nmi(void)
139{
b807c3d0
SR
140 int waited = 0;
141
142 while (atomic_read(&in_nmi)) {
143 waited = 1;
17666f02 144 cpu_relax();
b807c3d0
SR
145 }
146
147 if (waited)
148 nmi_wait_count++;
17666f02
SR
149}
150
151static int
152do_ftrace_mod_code(unsigned long ip, void *new_code)
153{
154 mod_code_ip = (void *)ip;
155 mod_code_newcode = new_code;
156
157 /* The buffers need to be visible before we let NMIs write them */
158 smp_wmb();
159
160 mod_code_write = 1;
161
162 /* Make sure write bit is visible before we wait on NMIs */
163 smp_mb();
164
165 wait_for_nmi();
166
167 /* Make sure all running NMIs have finished before we write the code */
168 smp_mb();
169
170 ftrace_mod_code();
171
172 /* Make sure the write happens before clearing the bit */
173 smp_wmb();
174
175 mod_code_write = 0;
176
177 /* make sure NMIs see the cleared bit */
178 smp_mb();
179
180 wait_for_nmi();
181
182 return mod_code_status;
183}
184
185
15adc048 186int
3d083395
SR
187ftrace_modify_code(unsigned long ip, unsigned char *old_code,
188 unsigned char *new_code)
189{
6f93fc07 190 unsigned char replaced[MCOUNT_INSN_SIZE];
3d083395
SR
191
192 /*
193 * Note: Due to modules and __init, code can
194 * disappear and change, we need to protect against faulting
76aefee5 195 * as well as code changing. We do this by using the
ab9a0918 196 * probe_kernel_* functions.
3d083395
SR
197 *
198 * No real locking needed, this code is run through
6f93fc07 199 * kstop_machine, or before SMP starts.
3d083395 200 */
76aefee5
SR
201
202 /* read the text we want to modify */
ab9a0918 203 if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
593eb8a2 204 return -EFAULT;
6f93fc07 205
76aefee5 206 /* Make sure it is what we expect it to be */
6f93fc07 207 if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
593eb8a2 208 return -EINVAL;
3d083395 209
76aefee5 210 /* replace the text with the new text */
17666f02 211 if (do_ftrace_mod_code(ip, new_code))
593eb8a2 212 return -EPERM;
6f93fc07
SR
213
214 sync_core();
3d083395 215
6f93fc07 216 return 0;
3d083395
SR
217}
218
15adc048 219int ftrace_update_ftrace_func(ftrace_func_t func)
d61f82d0
SR
220{
221 unsigned long ip = (unsigned long)(&ftrace_call);
395a59d0 222 unsigned char old[MCOUNT_INSN_SIZE], *new;
d61f82d0
SR
223 int ret;
224
395a59d0 225 memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
d61f82d0
SR
226 new = ftrace_call_replace(ip, (unsigned long)func);
227 ret = ftrace_modify_code(ip, old, new);
228
229 return ret;
230}
231
d61f82d0 232int __init ftrace_dyn_arch_init(void *data)
3d083395 233{
732f3ca7
SR
234 extern const unsigned char ftrace_test_p6nop[];
235 extern const unsigned char ftrace_test_nop5[];
236 extern const unsigned char ftrace_test_jmp[];
237 int faulted = 0;
d61f82d0 238
732f3ca7
SR
239 /*
240 * There is no good nop for all x86 archs.
241 * We will default to using the P6_NOP5, but first we
242 * will test to make sure that the nop will actually
243 * work on this CPU. If it faults, we will then
244 * go to a lesser efficient 5 byte nop. If that fails
245 * we then just use a jmp as our nop. This isn't the most
246 * efficient nop, but we can not use a multi part nop
247 * since we would then risk being preempted in the middle
248 * of that nop, and if we enabled tracing then, it might
249 * cause a system crash.
250 *
251 * TODO: check the cpuid to determine the best nop.
252 */
253 asm volatile (
732f3ca7
SR
254 "ftrace_test_jmp:"
255 "jmp ftrace_test_p6nop\n"
8b27386a
AK
256 "nop\n"
257 "nop\n"
258 "nop\n" /* 2 byte jmp + 3 bytes */
732f3ca7
SR
259 "ftrace_test_p6nop:"
260 P6_NOP5
261 "jmp 1f\n"
262 "ftrace_test_nop5:"
263 ".byte 0x66,0x66,0x66,0x66,0x90\n"
732f3ca7
SR
264 "1:"
265 ".section .fixup, \"ax\"\n"
266 "2: movl $1, %0\n"
267 " jmp ftrace_test_nop5\n"
268 "3: movl $2, %0\n"
269 " jmp 1b\n"
270 ".previous\n"
271 _ASM_EXTABLE(ftrace_test_p6nop, 2b)
272 _ASM_EXTABLE(ftrace_test_nop5, 3b)
273 : "=r"(faulted) : "0" (faulted));
274
275 switch (faulted) {
276 case 0:
277 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
8115f3f0 278 memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
732f3ca7
SR
279 break;
280 case 1:
281 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
8115f3f0 282 memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
732f3ca7
SR
283 break;
284 case 2:
8b27386a 285 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
8115f3f0 286 memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
732f3ca7
SR
287 break;
288 }
289
290 /* The return code is retured via data */
291 *(unsigned long *)data = 0;
dfa60aba 292
3d083395
SR
293 return 0;
294}