]> bbs.cooldavid.org Git - net-next-2.6.git/blob - arch/mips/kernel/ftrace.c
MIPS: Tracing: Reduce the overhead of dynamic Function Tracer
[net-next-2.6.git] / arch / mips / kernel / ftrace.c
1 /*
2  * Code for replacing ftrace calls with jumps.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
6  * Author: Wu Zhangjin <wuzhangjin@gmail.com>
7  *
8  * Thanks goes to Steven Rostedt for writing the original x86 version.
9  */
10
11 #include <linux/uaccess.h>
12 #include <linux/init.h>
13 #include <linux/ftrace.h>
14
15 #include <asm/asm.h>
16 #include <asm/asm-offsets.h>
17 #include <asm/cacheflush.h>
18 #include <asm/uasm.h>
19
20 #ifdef CONFIG_DYNAMIC_FTRACE
21
22 #define JAL 0x0c000000          /* jump & link: ip --> ra, jump to target */
23 #define ADDR_MASK 0x03ffffff    /*  op_code|addr : 31...26|25 ....0 */
24
25 #define INSN_B_1F_4 0x10000004  /* b 1f; offset = 4 */
26 #define INSN_B_1F_5 0x10000005  /* b 1f; offset = 5 */
27 #define INSN_NOP 0x00000000     /* nop */
28 #define INSN_JAL(addr)  \
29         ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
30
31 static unsigned int insn_jal_ftrace_caller __read_mostly;
32 static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
33 static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
34
35 static inline void ftrace_dyn_arch_init_insns(void)
36 {
37         u32 *buf;
38         unsigned int v1;
39
40         /* lui v1, hi16_mcount */
41         v1 = 3;
42         buf = (u32 *)&insn_lui_v1_hi16_mcount;
43         UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);
44
45         /* jal (ftrace_caller + 8), jump over the first two instruction */
46         buf = (u32 *)&insn_jal_ftrace_caller;
47         uasm_i_jal(&buf, (FTRACE_ADDR + 8));
48
49 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
50         /* j ftrace_graph_caller */
51         buf = (u32 *)&insn_j_ftrace_graph_caller;
52         uasm_i_j(&buf, (unsigned long)ftrace_graph_caller);
53 #endif
54 }
55
56 static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
57 {
58         int faulted;
59
60         /* *(unsigned int *)ip = new_code; */
61         safe_store_code(new_code, ip, faulted);
62
63         if (unlikely(faulted))
64                 return -EFAULT;
65
66         flush_icache_range(ip, ip + 8);
67
68         return 0;
69 }
70
71 int ftrace_make_nop(struct module *mod,
72                     struct dyn_ftrace *rec, unsigned long addr)
73 {
74         unsigned int new;
75         unsigned long ip = rec->ip;
76
77         /*
78          * We have compiled module with -mlong-calls, but compiled the kernel
79          * without it, we need to cope with them respectively.
80          */
81         if (ip & 0x40000000) {
82 #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
83                 /*
84                  * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000005)
85                  * addiu v1, v1, low_16bit_of_mcount
86                  * move at, ra
87                  * move $12, ra_address
88                  * jalr v1
89                  *  sub sp, sp, 8
90                  *                                  1: offset = 5 instructions
91                  */
92                 new = INSN_B_1F_5;
93 #else
94                 /*
95                  * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000004)
96                  * addiu v1, v1, low_16bit_of_mcount
97                  * move at, ra
98                  * jalr v1
99                  *  nop | move $12, ra_address | sub sp, sp, 8
100                  *                                  1: offset = 4 instructions
101                  */
102                 new = INSN_B_1F_4;
103 #endif
104         } else {
105                 /*
106                  * move at, ra
107                  * jal _mcount          --> nop
108                  */
109                 new = INSN_NOP;
110         }
111         return ftrace_modify_code(ip, new);
112 }
113
114 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
115 {
116         unsigned int new;
117         unsigned long ip = rec->ip;
118
119         /* ip, module: 0xc0000000, kernel: 0x80000000 */
120         new = (ip & 0x40000000) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller;
121
122         return ftrace_modify_code(ip, new);
123 }
124
125 #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
126
127 int ftrace_update_ftrace_func(ftrace_func_t func)
128 {
129         unsigned int new;
130
131         new = INSN_JAL((unsigned long)func);
132
133         return ftrace_modify_code(FTRACE_CALL_IP, new);
134 }
135
136 int __init ftrace_dyn_arch_init(void *data)
137 {
138         /* Encode the instructions when booting */
139         ftrace_dyn_arch_init_insns();
140
141         /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
142         ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
143
144         /* The return code is retured via data */
145         *(unsigned long *)data = 0;
146
147         return 0;
148 }
149 #endif                          /* CONFIG_DYNAMIC_FTRACE */
150
151 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
152
153 #ifdef CONFIG_DYNAMIC_FTRACE
154
155 extern void ftrace_graph_call(void);
156 #define FTRACE_GRAPH_CALL_IP    ((unsigned long)(&ftrace_graph_call))
157
158 int ftrace_enable_ftrace_graph_caller(void)
159 {
160         return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
161                         insn_j_ftrace_graph_caller);
162 }
163
164 int ftrace_disable_ftrace_graph_caller(void)
165 {
166         return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
167 }
168
169 #endif                          /* !CONFIG_DYNAMIC_FTRACE */
170
171 #ifndef KBUILD_MCOUNT_RA_ADDRESS
172 #define S_RA_SP (0xafbf << 16)  /* s{d,w} ra, offset(sp) */
173 #define S_R_SP  (0xafb0 << 16)  /* s{d,w} R, offset(sp) */
174 #define OFFSET_MASK     0xffff  /* stack offset range: 0 ~ PT_SIZE */
175
176 unsigned long ftrace_get_parent_addr(unsigned long self_addr,
177                                      unsigned long parent,
178                                      unsigned long parent_addr,
179                                      unsigned long fp)
180 {
181         unsigned long sp, ip, ra;
182         unsigned int code;
183         int faulted;
184
185         /* in module or kernel? */
186         if (self_addr & 0x40000000) {
187                 /* module: move to the instruction "lui v1, HI_16BIT_OF_MCOUNT" */
188                 ip = self_addr - 20;
189         } else {
190                 /* kernel: move to the instruction "move ra, at" */
191                 ip = self_addr - 12;
192         }
193
194         /* search the text until finding the non-store instruction or "s{d,w}
195          * ra, offset(sp)" instruction */
196         do {
197                 ip -= 4;
198
199                 /* get the code at "ip": code = *(unsigned int *)ip; */
200                 safe_load_code(code, ip, faulted);
201
202                 if (unlikely(faulted))
203                         return 0;
204
205                 /* If we hit the non-store instruction before finding where the
206                  * ra is stored, then this is a leaf function and it does not
207                  * store the ra on the stack. */
208                 if ((code & S_R_SP) != S_R_SP)
209                         return parent_addr;
210
211         } while (((code & S_RA_SP) != S_RA_SP));
212
213         sp = fp + (code & OFFSET_MASK);
214
215         /* ra = *(unsigned long *)sp; */
216         safe_load_stack(ra, sp, faulted);
217         if (unlikely(faulted))
218                 return 0;
219
220         if (ra == parent)
221                 return sp;
222         return 0;
223 }
224
225 #endif
226
227 /*
228  * Hook the return address and push it in the stack of return addrs
229  * in current thread info.
230  */
231 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
232                            unsigned long fp)
233 {
234         unsigned long old;
235         struct ftrace_graph_ent trace;
236         unsigned long return_hooker = (unsigned long)
237             &return_to_handler;
238         int faulted;
239
240         if (unlikely(atomic_read(&current->tracing_graph_pause)))
241                 return;
242
243         /* "parent" is the stack address saved the return address of the caller
244          * of _mcount.
245          *
246          * if the gcc < 4.5, a leaf function does not save the return address
247          * in the stack address, so, we "emulate" one in _mcount's stack space,
248          * and hijack it directly, but for a non-leaf function, it save the
249          * return address to the its own stack space, we can not hijack it
250          * directly, but need to find the real stack address,
251          * ftrace_get_parent_addr() does it!
252          *
253          * if gcc>= 4.5, with the new -mmcount-ra-address option, for a
254          * non-leaf function, the location of the return address will be saved
255          * to $12 for us, and for a leaf function, only put a zero into $12. we
256          * do it in ftrace_graph_caller of mcount.S.
257          */
258
259         /* old = *parent; */
260         safe_load_stack(old, parent, faulted);
261         if (unlikely(faulted))
262                 goto out;
263 #ifndef KBUILD_MCOUNT_RA_ADDRESS
264         parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old,
265                                                          (unsigned long)parent,
266                                                          fp);
267         /* If fails when getting the stack address of the non-leaf function's
268          * ra, stop function graph tracer and return */
269         if (parent == 0)
270                 goto out;
271 #endif
272         /* *parent = return_hooker; */
273         safe_store_stack(return_hooker, parent, faulted);
274         if (unlikely(faulted))
275                 goto out;
276
277         if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) ==
278             -EBUSY) {
279                 *parent = old;
280                 return;
281         }
282
283         trace.func = self_addr;
284
285         /* Only trace if the calling function expects to */
286         if (!ftrace_graph_entry(&trace)) {
287                 current->curr_ret_stack--;
288                 *parent = old;
289         }
290         return;
291 out:
292         ftrace_graph_stop();
293         WARN_ON(1);
294 }
295 #endif                          /* CONFIG_FUNCTION_GRAPH_TRACER */