]>
Commit | Line | Data |
---|---|---|
1c873be7 MF |
1 | /* |
2 | * mcount and friends -- ftrace stuff | |
3 | * | |
aebfef03 | 4 | * Copyright (C) 2009-2010 Analog Devices Inc. |
1c873be7 MF |
5 | * Licensed under the GPL-2 or later. |
6 | */ | |
7 | ||
8 | #include <linux/linkage.h> | |
9 | #include <asm/ftrace.h> | |
10 | ||
11 | .text | |
12 | ||
13 | /* GCC will have called us before setting up the function prologue, so we | |
14 | * can clobber the normal scratch registers, but we need to make sure to | |
15 | * save/restore the registers used for argument passing (R0-R2) in case | |
16 | * the profiled function is using them. With data registers, R3 is the | |
17 | * only one we can blow away. With pointer registers, we have P0-P2. | |
18 | * | |
19 | * Upon entry, the RETS will point to the top of the current profiled | |
5bf9cbef YL |
20 | * function. And since GCC pushed the previous RETS for us, the previous |
21 | * function will be waiting there. mmmm pie. | |
1c873be7 MF |
22 | */ |
23 | ENTRY(__mcount) | |
aebfef03 MF |
24 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
25 | /* optional micro optimization: return if stopped */ | |
26 | p1.l = _function_trace_stop; | |
27 | p1.h = _function_trace_stop; | |
28 | r3 = [p1]; | |
29 | cc = r3 == 0; | |
30 | if ! cc jump _ftrace_stub (bp); | |
31 | #endif | |
32 | ||
1c873be7 MF |
33 | /* save third function arg early so we can do testing below */ |
34 | [--sp] = r2; | |
35 | ||
36 | /* load the function pointer to the tracer */ | |
37 | p0.l = _ftrace_trace_function; | |
38 | p0.h = _ftrace_trace_function; | |
39 | r3 = [p0]; | |
40 | ||
41 | /* optional micro optimization: don't call the stub tracer */ | |
42 | r2.l = _ftrace_stub; | |
43 | r2.h = _ftrace_stub; | |
44 | cc = r2 == r3; | |
45 | if ! cc jump .Ldo_trace; | |
46 | ||
1ee76d7e MF |
47 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
48 | /* if the ftrace_graph_return function pointer is not set to | |
49 | * the ftrace_stub entry, call prepare_ftrace_return(). | |
50 | */ | |
51 | p0.l = _ftrace_graph_return; | |
52 | p0.h = _ftrace_graph_return; | |
53 | r3 = [p0]; | |
54 | cc = r2 == r3; | |
55 | if ! cc jump _ftrace_graph_caller; | |
56 | ||
57 | /* similarly, if the ftrace_graph_entry function pointer is not | |
58 | * set to the ftrace_graph_entry_stub entry, ... | |
59 | */ | |
60 | p0.l = _ftrace_graph_entry; | |
61 | p0.h = _ftrace_graph_entry; | |
62 | r2.l = _ftrace_graph_entry_stub; | |
63 | r2.h = _ftrace_graph_entry_stub; | |
64 | r3 = [p0]; | |
65 | cc = r2 == r3; | |
66 | if ! cc jump _ftrace_graph_caller; | |
67 | #endif | |
68 | ||
1c873be7 MF |
69 | r2 = [sp++]; |
70 | rts; | |
71 | ||
72 | .Ldo_trace: | |
73 | ||
74 | /* save first/second function arg and the return register */ | |
75 | [--sp] = r0; | |
76 | [--sp] = r1; | |
77 | [--sp] = rets; | |
78 | ||
79 | /* setup the tracer function */ | |
80 | p0 = r3; | |
81 | ||
5bf9cbef YL |
82 | /* function_trace_call(unsigned long ip, unsigned long parent_ip): |
83 | * ip: this point was called by ... | |
84 | * parent_ip: ... this function | |
85 | * the ip itself will need adjusting for the mcount call | |
1c873be7 | 86 | */ |
5bf9cbef YL |
87 | r0 = rets; |
88 | r1 = [sp + 16]; /* skip the 4 local regs on stack */ | |
89 | r0 += -MCOUNT_INSN_SIZE; | |
1c873be7 MF |
90 | |
91 | /* call the tracer */ | |
92 | call (p0); | |
93 | ||
94 | /* restore state and get out of dodge */ | |
1ee76d7e | 95 | .Lfinish_trace: |
1c873be7 MF |
96 | rets = [sp++]; |
97 | r1 = [sp++]; | |
98 | r0 = [sp++]; | |
99 | r2 = [sp++]; | |
100 | ||
101 | .globl _ftrace_stub | |
102 | _ftrace_stub: | |
103 | rts; | |
104 | ENDPROC(__mcount) | |
1ee76d7e MF |
105 | |
106 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
107 | /* The prepare_ftrace_return() function is similar to the trace function | |
108 | * except it takes a pointer to the location of the frompc. This is so | |
109 | * the prepare_ftrace_return() can hijack it temporarily for probing | |
110 | * purposes. | |
111 | */ | |
112 | ENTRY(_ftrace_graph_caller) | |
113 | /* save first/second function arg and the return register */ | |
114 | [--sp] = r0; | |
115 | [--sp] = r1; | |
116 | [--sp] = rets; | |
117 | ||
5bf9cbef YL |
118 | /* prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) */ |
119 | r0 = sp; | |
1ee76d7e | 120 | r1 = rets; |
5bf9cbef | 121 | r0 += 16; /* skip the 4 local regs on stack */ |
1ee76d7e MF |
122 | r1 += -MCOUNT_INSN_SIZE; |
123 | call _prepare_ftrace_return; | |
124 | ||
125 | jump .Lfinish_trace; | |
126 | ENDPROC(_ftrace_graph_caller) | |
127 | ||
128 | /* Undo the rewrite caused by ftrace_graph_caller(). The common function | |
129 | * ftrace_return_to_handler() will return the original rets so we can | |
130 | * restore it and be on our way. | |
131 | */ | |
132 | ENTRY(_return_to_handler) | |
133 | /* make sure original return values are saved */ | |
134 | [--sp] = p0; | |
135 | [--sp] = r0; | |
136 | [--sp] = r1; | |
137 | ||
138 | /* get original return address */ | |
139 | call _ftrace_return_to_handler; | |
140 | rets = r0; | |
141 | ||
142 | /* anomaly 05000371 - make sure we have at least three instructions | |
143 | * between rets setting and the return | |
144 | */ | |
145 | r1 = [sp++]; | |
146 | r0 = [sp++]; | |
147 | p0 = [sp++]; | |
148 | rts; | |
149 | ENDPROC(_return_to_handler) | |
150 | #endif |