]> bbs.cooldavid.org Git - net-next-2.6.git/blob - kernel/trace/trace_kprobe.c
d8061c3e02c9427b8dfd961eba050f710fed6552
[net-next-2.6.git] / kernel / trace / trace_kprobe.c
1 /*
2  * Kprobes-based tracing events
3  *
4  * Created by Masami Hiramatsu <mhiramat@redhat.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19
20 #include <linux/module.h>
21 #include <linux/uaccess.h>
22 #include <linux/kprobes.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/smp.h>
26 #include <linux/debugfs.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/ctype.h>
30 #include <linux/ptrace.h>
31 #include <linux/perf_event.h>
32
33 #include "trace.h"
34 #include "trace_output.h"
35
36 #define MAX_TRACE_ARGS 128
37 #define MAX_ARGSTR_LEN 63
38 #define MAX_EVENT_NAME_LEN 64
39 #define KPROBE_EVENT_SYSTEM "kprobes"
40
41 /* Reserved field names */
42 #define FIELD_STRING_IP "__probe_ip"
43 #define FIELD_STRING_NARGS "__probe_nargs"
44 #define FIELD_STRING_RETIP "__probe_ret_ip"
45 #define FIELD_STRING_FUNC "__probe_func"
46
47 const char *reserved_field_names[] = {
48         "common_type",
49         "common_flags",
50         "common_preempt_count",
51         "common_pid",
52         "common_tgid",
53         "common_lock_depth",
54         FIELD_STRING_IP,
55         FIELD_STRING_NARGS,
56         FIELD_STRING_RETIP,
57         FIELD_STRING_FUNC,
58 };
59
60 struct fetch_func {
61         unsigned long (*func)(struct pt_regs *, void *);
62         void *data;
63 };
64
65 static __kprobes unsigned long call_fetch(struct fetch_func *f,
66                                           struct pt_regs *regs)
67 {
68         return f->func(regs, f->data);
69 }
70
71 /* fetch handlers */
72 static __kprobes unsigned long fetch_register(struct pt_regs *regs,
73                                               void *offset)
74 {
75         return regs_get_register(regs, (unsigned int)((unsigned long)offset));
76 }
77
78 static __kprobes unsigned long fetch_stack(struct pt_regs *regs,
79                                            void *num)
80 {
81         return regs_get_kernel_stack_nth(regs,
82                                          (unsigned int)((unsigned long)num));
83 }
84
85 static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr)
86 {
87         unsigned long retval;
88
89         if (probe_kernel_address(addr, retval))
90                 return 0;
91         return retval;
92 }
93
94 static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs,
95                                               void *dummy)
96 {
97         return regs_return_value(regs);
98 }
99
100 static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs,
101                                                    void *dummy)
102 {
103         return kernel_stack_pointer(regs);
104 }
105
106 /* Memory fetching by symbol */
107 struct symbol_cache {
108         char *symbol;
109         long offset;
110         unsigned long addr;
111 };
112
113 static unsigned long update_symbol_cache(struct symbol_cache *sc)
114 {
115         sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
116         if (sc->addr)
117                 sc->addr += sc->offset;
118         return sc->addr;
119 }
120
121 static void free_symbol_cache(struct symbol_cache *sc)
122 {
123         kfree(sc->symbol);
124         kfree(sc);
125 }
126
127 static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
128 {
129         struct symbol_cache *sc;
130
131         if (!sym || strlen(sym) == 0)
132                 return NULL;
133         sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
134         if (!sc)
135                 return NULL;
136
137         sc->symbol = kstrdup(sym, GFP_KERNEL);
138         if (!sc->symbol) {
139                 kfree(sc);
140                 return NULL;
141         }
142         sc->offset = offset;
143
144         update_symbol_cache(sc);
145         return sc;
146 }
147
148 static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data)
149 {
150         struct symbol_cache *sc = data;
151
152         if (sc->addr)
153                 return fetch_memory(regs, (void *)sc->addr);
154         else
155                 return 0;
156 }
157
158 /* Special indirect memory access interface */
159 struct indirect_fetch_data {
160         struct fetch_func orig;
161         long offset;
162 };
163
164 static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data)
165 {
166         struct indirect_fetch_data *ind = data;
167         unsigned long addr;
168
169         addr = call_fetch(&ind->orig, regs);
170         if (addr) {
171                 addr += ind->offset;
172                 return fetch_memory(regs, (void *)addr);
173         } else
174                 return 0;
175 }
176
177 static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data)
178 {
179         if (data->orig.func == fetch_indirect)
180                 free_indirect_fetch_data(data->orig.data);
181         else if (data->orig.func == fetch_symbol)
182                 free_symbol_cache(data->orig.data);
183         kfree(data);
184 }
185
186 /**
187  * Kprobe event core functions
188  */
189
190 struct probe_arg {
191         struct fetch_func       fetch;
192         const char              *name;
193 };
194
195 /* Flags for trace_probe */
196 #define TP_FLAG_TRACE   1
197 #define TP_FLAG_PROFILE 2
198
199 struct trace_probe {
200         struct list_head        list;
201         struct kretprobe        rp;     /* Use rp.kp for kprobe use */
202         unsigned long           nhit;
203         unsigned int            flags;  /* For TP_FLAG_* */
204         const char              *symbol;        /* symbol name */
205         struct ftrace_event_class       class;
206         struct ftrace_event_call        call;
207         unsigned int            nr_args;
208         struct probe_arg        args[];
209 };
210
211 #define SIZEOF_TRACE_PROBE(n)                   \
212         (offsetof(struct trace_probe, args) +   \
213         (sizeof(struct probe_arg) * (n)))
214
215 static __kprobes int probe_is_return(struct trace_probe *tp)
216 {
217         return tp->rp.handler != NULL;
218 }
219
220 static __kprobes const char *probe_symbol(struct trace_probe *tp)
221 {
222         return tp->symbol ? tp->symbol : "unknown";
223 }
224
225 static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff)
226 {
227         int ret = -EINVAL;
228
229         if (ff->func == fetch_register) {
230                 const char *name;
231                 name = regs_query_register_name((unsigned int)((long)ff->data));
232                 ret = snprintf(buf, n, "%%%s", name);
233         } else if (ff->func == fetch_stack)
234                 ret = snprintf(buf, n, "$stack%lu", (unsigned long)ff->data);
235         else if (ff->func == fetch_memory)
236                 ret = snprintf(buf, n, "@0x%p", ff->data);
237         else if (ff->func == fetch_symbol) {
238                 struct symbol_cache *sc = ff->data;
239                 if (sc->offset)
240                         ret = snprintf(buf, n, "@%s%+ld", sc->symbol,
241                                         sc->offset);
242                 else
243                         ret = snprintf(buf, n, "@%s", sc->symbol);
244         } else if (ff->func == fetch_retvalue)
245                 ret = snprintf(buf, n, "$retval");
246         else if (ff->func == fetch_stack_address)
247                 ret = snprintf(buf, n, "$stack");
248         else if (ff->func == fetch_indirect) {
249                 struct indirect_fetch_data *id = ff->data;
250                 size_t l = 0;
251                 ret = snprintf(buf, n, "%+ld(", id->offset);
252                 if (ret >= n)
253                         goto end;
254                 l += ret;
255                 ret = probe_arg_string(buf + l, n - l, &id->orig);
256                 if (ret < 0)
257                         goto end;
258                 l += ret;
259                 ret = snprintf(buf + l, n - l, ")");
260                 ret += l;
261         }
262 end:
263         if (ret >= n)
264                 return -ENOSPC;
265         return ret;
266 }
267
268 static int register_probe_event(struct trace_probe *tp);
269 static void unregister_probe_event(struct trace_probe *tp);
270
271 static DEFINE_MUTEX(probe_lock);
272 static LIST_HEAD(probe_list);
273
274 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
275 static int kretprobe_dispatcher(struct kretprobe_instance *ri,
276                                 struct pt_regs *regs);
277
278 /* Check the name is good for event/group */
279 static int check_event_name(const char *name)
280 {
281         if (!isalpha(*name) && *name != '_')
282                 return 0;
283         while (*++name != '\0') {
284                 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
285                         return 0;
286         }
287         return 1;
288 }
289
290 /*
291  * Allocate new trace_probe and initialize it (including kprobes).
292  */
293 static struct trace_probe *alloc_trace_probe(const char *group,
294                                              const char *event,
295                                              void *addr,
296                                              const char *symbol,
297                                              unsigned long offs,
298                                              int nargs, int is_return)
299 {
300         struct trace_probe *tp;
301         int ret = -ENOMEM;
302
303         tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
304         if (!tp)
305                 return ERR_PTR(ret);
306
307         if (symbol) {
308                 tp->symbol = kstrdup(symbol, GFP_KERNEL);
309                 if (!tp->symbol)
310                         goto error;
311                 tp->rp.kp.symbol_name = tp->symbol;
312                 tp->rp.kp.offset = offs;
313         } else
314                 tp->rp.kp.addr = addr;
315
316         if (is_return)
317                 tp->rp.handler = kretprobe_dispatcher;
318         else
319                 tp->rp.kp.pre_handler = kprobe_dispatcher;
320
321         if (!event || !check_event_name(event)) {
322                 ret = -EINVAL;
323                 goto error;
324         }
325
326         tp->call.class = &tp->class;
327         tp->call.name = kstrdup(event, GFP_KERNEL);
328         if (!tp->call.name)
329                 goto error;
330
331         if (!group || !check_event_name(group)) {
332                 ret = -EINVAL;
333                 goto error;
334         }
335
336         tp->class.system = kstrdup(group, GFP_KERNEL);
337         if (!tp->class.system)
338                 goto error;
339
340         INIT_LIST_HEAD(&tp->list);
341         return tp;
342 error:
343         kfree(tp->call.name);
344         kfree(tp->symbol);
345         kfree(tp);
346         return ERR_PTR(ret);
347 }
348
349 static void free_probe_arg(struct probe_arg *arg)
350 {
351         if (arg->fetch.func == fetch_symbol)
352                 free_symbol_cache(arg->fetch.data);
353         else if (arg->fetch.func == fetch_indirect)
354                 free_indirect_fetch_data(arg->fetch.data);
355         kfree(arg->name);
356 }
357
358 static void free_trace_probe(struct trace_probe *tp)
359 {
360         int i;
361
362         for (i = 0; i < tp->nr_args; i++)
363                 free_probe_arg(&tp->args[i]);
364
365         kfree(tp->call.class->system);
366         kfree(tp->call.name);
367         kfree(tp->symbol);
368         kfree(tp);
369 }
370
371 static struct trace_probe *find_probe_event(const char *event,
372                                             const char *group)
373 {
374         struct trace_probe *tp;
375
376         list_for_each_entry(tp, &probe_list, list)
377                 if (strcmp(tp->call.name, event) == 0 &&
378                     strcmp(tp->call.class->system, group) == 0)
379                         return tp;
380         return NULL;
381 }
382
383 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
384 static void unregister_trace_probe(struct trace_probe *tp)
385 {
386         if (probe_is_return(tp))
387                 unregister_kretprobe(&tp->rp);
388         else
389                 unregister_kprobe(&tp->rp.kp);
390         list_del(&tp->list);
391         unregister_probe_event(tp);
392 }
393
394 /* Register a trace_probe and probe_event */
395 static int register_trace_probe(struct trace_probe *tp)
396 {
397         struct trace_probe *old_tp;
398         int ret;
399
400         mutex_lock(&probe_lock);
401
402         /* register as an event */
403         old_tp = find_probe_event(tp->call.name, tp->call.class->system);
404         if (old_tp) {
405                 /* delete old event */
406                 unregister_trace_probe(old_tp);
407                 free_trace_probe(old_tp);
408         }
409         ret = register_probe_event(tp);
410         if (ret) {
411                 pr_warning("Faild to register probe event(%d)\n", ret);
412                 goto end;
413         }
414
415         tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
416         if (probe_is_return(tp))
417                 ret = register_kretprobe(&tp->rp);
418         else
419                 ret = register_kprobe(&tp->rp.kp);
420
421         if (ret) {
422                 pr_warning("Could not insert probe(%d)\n", ret);
423                 if (ret == -EILSEQ) {
424                         pr_warning("Probing address(0x%p) is not an "
425                                    "instruction boundary.\n",
426                                    tp->rp.kp.addr);
427                         ret = -EINVAL;
428                 }
429                 unregister_probe_event(tp);
430         } else
431                 list_add_tail(&tp->list, &probe_list);
432 end:
433         mutex_unlock(&probe_lock);
434         return ret;
435 }
436
437 /* Split symbol and offset. */
438 static int split_symbol_offset(char *symbol, unsigned long *offset)
439 {
440         char *tmp;
441         int ret;
442
443         if (!offset)
444                 return -EINVAL;
445
446         tmp = strchr(symbol, '+');
447         if (tmp) {
448                 /* skip sign because strict_strtol doesn't accept '+' */
449                 ret = strict_strtoul(tmp + 1, 0, offset);
450                 if (ret)
451                         return ret;
452                 *tmp = '\0';
453         } else
454                 *offset = 0;
455         return 0;
456 }
457
458 #define PARAM_MAX_ARGS 16
459 #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
460
461 static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return)
462 {
463         int ret = 0;
464         unsigned long param;
465
466         if (strcmp(arg, "retval") == 0) {
467                 if (is_return) {
468                         ff->func = fetch_retvalue;
469                         ff->data = NULL;
470                 } else
471                         ret = -EINVAL;
472         } else if (strncmp(arg, "stack", 5) == 0) {
473                 if (arg[5] == '\0') {
474                         ff->func = fetch_stack_address;
475                         ff->data = NULL;
476                 } else if (isdigit(arg[5])) {
477                         ret = strict_strtoul(arg + 5, 10, &param);
478                         if (ret || param > PARAM_MAX_STACK)
479                                 ret = -EINVAL;
480                         else {
481                                 ff->func = fetch_stack;
482                                 ff->data = (void *)param;
483                         }
484                 } else
485                         ret = -EINVAL;
486         } else
487                 ret = -EINVAL;
488         return ret;
489 }
490
491 /* Recursive argument parser */
492 static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
493 {
494         int ret = 0;
495         unsigned long param;
496         long offset;
497         char *tmp;
498
499         switch (arg[0]) {
500         case '$':
501                 ret = parse_probe_vars(arg + 1, ff, is_return);
502                 break;
503         case '%':       /* named register */
504                 ret = regs_query_register_offset(arg + 1);
505                 if (ret >= 0) {
506                         ff->func = fetch_register;
507                         ff->data = (void *)(unsigned long)ret;
508                         ret = 0;
509                 }
510                 break;
511         case '@':       /* memory or symbol */
512                 if (isdigit(arg[1])) {
513                         ret = strict_strtoul(arg + 1, 0, &param);
514                         if (ret)
515                                 break;
516                         ff->func = fetch_memory;
517                         ff->data = (void *)param;
518                 } else {
519                         ret = split_symbol_offset(arg + 1, &offset);
520                         if (ret)
521                                 break;
522                         ff->data = alloc_symbol_cache(arg + 1, offset);
523                         if (ff->data)
524                                 ff->func = fetch_symbol;
525                         else
526                                 ret = -EINVAL;
527                 }
528                 break;
529         case '+':       /* indirect memory */
530         case '-':
531                 tmp = strchr(arg, '(');
532                 if (!tmp) {
533                         ret = -EINVAL;
534                         break;
535                 }
536                 *tmp = '\0';
537                 ret = strict_strtol(arg + 1, 0, &offset);
538                 if (ret)
539                         break;
540                 if (arg[0] == '-')
541                         offset = -offset;
542                 arg = tmp + 1;
543                 tmp = strrchr(arg, ')');
544                 if (tmp) {
545                         struct indirect_fetch_data *id;
546                         *tmp = '\0';
547                         id = kzalloc(sizeof(struct indirect_fetch_data),
548                                      GFP_KERNEL);
549                         if (!id)
550                                 return -ENOMEM;
551                         id->offset = offset;
552                         ret = __parse_probe_arg(arg, &id->orig, is_return);
553                         if (ret)
554                                 kfree(id);
555                         else {
556                                 ff->func = fetch_indirect;
557                                 ff->data = (void *)id;
558                         }
559                 } else
560                         ret = -EINVAL;
561                 break;
562         default:
563                 /* TODO: support custom handler */
564                 ret = -EINVAL;
565         }
566         return ret;
567 }
568
569 /* String length checking wrapper */
570 static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
571 {
572         if (strlen(arg) > MAX_ARGSTR_LEN) {
573                 pr_info("Argument is too long.: %s\n",  arg);
574                 return -ENOSPC;
575         }
576         return __parse_probe_arg(arg, ff, is_return);
577 }
578
579 /* Return 1 if name is reserved or already used by another argument */
580 static int conflict_field_name(const char *name,
581                                struct probe_arg *args, int narg)
582 {
583         int i;
584         for (i = 0; i < ARRAY_SIZE(reserved_field_names); i++)
585                 if (strcmp(reserved_field_names[i], name) == 0)
586                         return 1;
587         for (i = 0; i < narg; i++)
588                 if (strcmp(args[i].name, name) == 0)
589                         return 1;
590         return 0;
591 }
592
593 static int create_trace_probe(int argc, char **argv)
594 {
595         /*
596          * Argument syntax:
597          *  - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
598          *  - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
599          * Fetch args:
600          *  $retval     : fetch return value
601          *  $stack      : fetch stack address
602          *  $stackN     : fetch Nth of stack (N:0-)
603          *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
604          *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
605          *  %REG        : fetch register REG
606          * Indirect memory fetch:
607          *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
608          * Alias name of args:
609          *  NAME=FETCHARG : set NAME as alias of FETCHARG.
610          */
611         struct trace_probe *tp;
612         int i, ret = 0;
613         int is_return = 0, is_delete = 0;
614         char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL;
615         unsigned long offset = 0;
616         void *addr = NULL;
617         char buf[MAX_EVENT_NAME_LEN];
618
619         /* argc must be >= 1 */
620         if (argv[0][0] == 'p')
621                 is_return = 0;
622         else if (argv[0][0] == 'r')
623                 is_return = 1;
624         else if (argv[0][0] == '-')
625                 is_delete = 1;
626         else {
627                 pr_info("Probe definition must be started with 'p', 'r' or"
628                         " '-'.\n");
629                 return -EINVAL;
630         }
631
632         if (argv[0][1] == ':') {
633                 event = &argv[0][2];
634                 if (strchr(event, '/')) {
635                         group = event;
636                         event = strchr(group, '/') + 1;
637                         event[-1] = '\0';
638                         if (strlen(group) == 0) {
639                                 pr_info("Group name is not specified\n");
640                                 return -EINVAL;
641                         }
642                 }
643                 if (strlen(event) == 0) {
644                         pr_info("Event name is not specified\n");
645                         return -EINVAL;
646                 }
647         }
648         if (!group)
649                 group = KPROBE_EVENT_SYSTEM;
650
651         if (is_delete) {
652                 if (!event) {
653                         pr_info("Delete command needs an event name.\n");
654                         return -EINVAL;
655                 }
656                 tp = find_probe_event(event, group);
657                 if (!tp) {
658                         pr_info("Event %s/%s doesn't exist.\n", group, event);
659                         return -ENOENT;
660                 }
661                 /* delete an event */
662                 unregister_trace_probe(tp);
663                 free_trace_probe(tp);
664                 return 0;
665         }
666
667         if (argc < 2) {
668                 pr_info("Probe point is not specified.\n");
669                 return -EINVAL;
670         }
671         if (isdigit(argv[1][0])) {
672                 if (is_return) {
673                         pr_info("Return probe point must be a symbol.\n");
674                         return -EINVAL;
675                 }
676                 /* an address specified */
677                 ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr);
678                 if (ret) {
679                         pr_info("Failed to parse address.\n");
680                         return ret;
681                 }
682         } else {
683                 /* a symbol specified */
684                 symbol = argv[1];
685                 /* TODO: support .init module functions */
686                 ret = split_symbol_offset(symbol, &offset);
687                 if (ret) {
688                         pr_info("Failed to parse symbol.\n");
689                         return ret;
690                 }
691                 if (offset && is_return) {
692                         pr_info("Return probe must be used without offset.\n");
693                         return -EINVAL;
694                 }
695         }
696         argc -= 2; argv += 2;
697
698         /* setup a probe */
699         if (!event) {
700                 /* Make a new event name */
701                 if (symbol)
702                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
703                                  is_return ? 'r' : 'p', symbol, offset);
704                 else
705                         snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
706                                  is_return ? 'r' : 'p', addr);
707                 event = buf;
708         }
709         tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
710                                is_return);
711         if (IS_ERR(tp)) {
712                 pr_info("Failed to allocate trace_probe.(%d)\n",
713                         (int)PTR_ERR(tp));
714                 return PTR_ERR(tp);
715         }
716
717         /* parse arguments */
718         ret = 0;
719         for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
720                 /* Parse argument name */
721                 arg = strchr(argv[i], '=');
722                 if (arg)
723                         *arg++ = '\0';
724                 else
725                         arg = argv[i];
726
727                 if (conflict_field_name(argv[i], tp->args, i)) {
728                         pr_info("Argument%d name '%s' conflicts with "
729                                 "another field.\n", i, argv[i]);
730                         ret = -EINVAL;
731                         goto error;
732                 }
733
734                 tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
735                 if (!tp->args[i].name) {
736                         pr_info("Failed to allocate argument%d name '%s'.\n",
737                                 i, argv[i]);
738                         ret = -ENOMEM;
739                         goto error;
740                 }
741
742                 /* Parse fetch argument */
743                 ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return);
744                 if (ret) {
745                         pr_info("Parse error at argument%d. (%d)\n", i, ret);
746                         kfree(tp->args[i].name);
747                         goto error;
748                 }
749
750                 tp->nr_args++;
751         }
752
753         ret = register_trace_probe(tp);
754         if (ret)
755                 goto error;
756         return 0;
757
758 error:
759         free_trace_probe(tp);
760         return ret;
761 }
762
763 static void cleanup_all_probes(void)
764 {
765         struct trace_probe *tp;
766
767         mutex_lock(&probe_lock);
768         /* TODO: Use batch unregistration */
769         while (!list_empty(&probe_list)) {
770                 tp = list_entry(probe_list.next, struct trace_probe, list);
771                 unregister_trace_probe(tp);
772                 free_trace_probe(tp);
773         }
774         mutex_unlock(&probe_lock);
775 }
776
777
778 /* Probes listing interfaces */
779 static void *probes_seq_start(struct seq_file *m, loff_t *pos)
780 {
781         mutex_lock(&probe_lock);
782         return seq_list_start(&probe_list, *pos);
783 }
784
785 static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
786 {
787         return seq_list_next(v, &probe_list, pos);
788 }
789
790 static void probes_seq_stop(struct seq_file *m, void *v)
791 {
792         mutex_unlock(&probe_lock);
793 }
794
795 static int probes_seq_show(struct seq_file *m, void *v)
796 {
797         struct trace_probe *tp = v;
798         int i, ret;
799         char buf[MAX_ARGSTR_LEN + 1];
800
801         seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
802         seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
803
804         if (!tp->symbol)
805                 seq_printf(m, " 0x%p", tp->rp.kp.addr);
806         else if (tp->rp.kp.offset)
807                 seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset);
808         else
809                 seq_printf(m, " %s", probe_symbol(tp));
810
811         for (i = 0; i < tp->nr_args; i++) {
812                 ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch);
813                 if (ret < 0) {
814                         pr_warning("Argument%d decoding error(%d).\n", i, ret);
815                         return ret;
816                 }
817                 seq_printf(m, " %s=%s", tp->args[i].name, buf);
818         }
819         seq_printf(m, "\n");
820         return 0;
821 }
822
823 static const struct seq_operations probes_seq_op = {
824         .start  = probes_seq_start,
825         .next   = probes_seq_next,
826         .stop   = probes_seq_stop,
827         .show   = probes_seq_show
828 };
829
830 static int probes_open(struct inode *inode, struct file *file)
831 {
832         if ((file->f_mode & FMODE_WRITE) &&
833             (file->f_flags & O_TRUNC))
834                 cleanup_all_probes();
835
836         return seq_open(file, &probes_seq_op);
837 }
838
839 static int command_trace_probe(const char *buf)
840 {
841         char **argv;
842         int argc = 0, ret = 0;
843
844         argv = argv_split(GFP_KERNEL, buf, &argc);
845         if (!argv)
846                 return -ENOMEM;
847
848         if (argc)
849                 ret = create_trace_probe(argc, argv);
850
851         argv_free(argv);
852         return ret;
853 }
854
855 #define WRITE_BUFSIZE 128
856
857 static ssize_t probes_write(struct file *file, const char __user *buffer,
858                             size_t count, loff_t *ppos)
859 {
860         char *kbuf, *tmp;
861         int ret;
862         size_t done;
863         size_t size;
864
865         kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
866         if (!kbuf)
867                 return -ENOMEM;
868
869         ret = done = 0;
870         while (done < count) {
871                 size = count - done;
872                 if (size >= WRITE_BUFSIZE)
873                         size = WRITE_BUFSIZE - 1;
874                 if (copy_from_user(kbuf, buffer + done, size)) {
875                         ret = -EFAULT;
876                         goto out;
877                 }
878                 kbuf[size] = '\0';
879                 tmp = strchr(kbuf, '\n');
880                 if (tmp) {
881                         *tmp = '\0';
882                         size = tmp - kbuf + 1;
883                 } else if (done + size < count) {
884                         pr_warning("Line length is too long: "
885                                    "Should be less than %d.", WRITE_BUFSIZE);
886                         ret = -EINVAL;
887                         goto out;
888                 }
889                 done += size;
890                 /* Remove comments */
891                 tmp = strchr(kbuf, '#');
892                 if (tmp)
893                         *tmp = '\0';
894
895                 ret = command_trace_probe(kbuf);
896                 if (ret)
897                         goto out;
898         }
899         ret = done;
900 out:
901         kfree(kbuf);
902         return ret;
903 }
904
905 static const struct file_operations kprobe_events_ops = {
906         .owner          = THIS_MODULE,
907         .open           = probes_open,
908         .read           = seq_read,
909         .llseek         = seq_lseek,
910         .release        = seq_release,
911         .write          = probes_write,
912 };
913
914 /* Probes profiling interfaces */
915 static int probes_profile_seq_show(struct seq_file *m, void *v)
916 {
917         struct trace_probe *tp = v;
918
919         seq_printf(m, "  %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
920                    tp->rp.kp.nmissed);
921
922         return 0;
923 }
924
925 static const struct seq_operations profile_seq_op = {
926         .start  = probes_seq_start,
927         .next   = probes_seq_next,
928         .stop   = probes_seq_stop,
929         .show   = probes_profile_seq_show
930 };
931
932 static int profile_open(struct inode *inode, struct file *file)
933 {
934         return seq_open(file, &profile_seq_op);
935 }
936
937 static const struct file_operations kprobe_profile_ops = {
938         .owner          = THIS_MODULE,
939         .open           = profile_open,
940         .read           = seq_read,
941         .llseek         = seq_lseek,
942         .release        = seq_release,
943 };
944
945 /* Kprobe handler */
946 static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
947 {
948         struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
949         struct kprobe_trace_entry *entry;
950         struct ring_buffer_event *event;
951         struct ring_buffer *buffer;
952         int size, i, pc;
953         unsigned long irq_flags;
954         struct ftrace_event_call *call = &tp->call;
955
956         tp->nhit++;
957
958         local_save_flags(irq_flags);
959         pc = preempt_count();
960
961         size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
962
963         event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
964                                                   irq_flags, pc);
965         if (!event)
966                 return;
967
968         entry = ring_buffer_event_data(event);
969         entry->nargs = tp->nr_args;
970         entry->ip = (unsigned long)kp->addr;
971         for (i = 0; i < tp->nr_args; i++)
972                 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
973
974         if (!filter_current_check_discard(buffer, call, entry, event))
975                 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
976 }
977
978 /* Kretprobe handler */
979 static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
980                                           struct pt_regs *regs)
981 {
982         struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
983         struct kretprobe_trace_entry *entry;
984         struct ring_buffer_event *event;
985         struct ring_buffer *buffer;
986         int size, i, pc;
987         unsigned long irq_flags;
988         struct ftrace_event_call *call = &tp->call;
989
990         local_save_flags(irq_flags);
991         pc = preempt_count();
992
993         size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
994
995         event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
996                                                   irq_flags, pc);
997         if (!event)
998                 return;
999
1000         entry = ring_buffer_event_data(event);
1001         entry->nargs = tp->nr_args;
1002         entry->func = (unsigned long)tp->rp.kp.addr;
1003         entry->ret_ip = (unsigned long)ri->ret_addr;
1004         for (i = 0; i < tp->nr_args; i++)
1005                 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1006
1007         if (!filter_current_check_discard(buffer, call, entry, event))
1008                 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
1009 }
1010
1011 /* Event entry printers */
1012 enum print_line_t
1013 print_kprobe_event(struct trace_iterator *iter, int flags,
1014                    struct trace_event *event)
1015 {
1016         struct kprobe_trace_entry *field;
1017         struct trace_seq *s = &iter->seq;
1018         struct trace_probe *tp;
1019         int i;
1020
1021         field = (struct kprobe_trace_entry *)iter->ent;
1022         tp = container_of(event, struct trace_probe, call.event);
1023
1024         if (!trace_seq_printf(s, "%s: (", tp->call.name))
1025                 goto partial;
1026
1027         if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1028                 goto partial;
1029
1030         if (!trace_seq_puts(s, ")"))
1031                 goto partial;
1032
1033         for (i = 0; i < field->nargs; i++)
1034                 if (!trace_seq_printf(s, " %s=%lx",
1035                                       tp->args[i].name, field->args[i]))
1036                         goto partial;
1037
1038         if (!trace_seq_puts(s, "\n"))
1039                 goto partial;
1040
1041         return TRACE_TYPE_HANDLED;
1042 partial:
1043         return TRACE_TYPE_PARTIAL_LINE;
1044 }
1045
1046 enum print_line_t
1047 print_kretprobe_event(struct trace_iterator *iter, int flags,
1048                       struct trace_event *event)
1049 {
1050         struct kretprobe_trace_entry *field;
1051         struct trace_seq *s = &iter->seq;
1052         struct trace_probe *tp;
1053         int i;
1054
1055         field = (struct kretprobe_trace_entry *)iter->ent;
1056         tp = container_of(event, struct trace_probe, call.event);
1057
1058         if (!trace_seq_printf(s, "%s: (", tp->call.name))
1059                 goto partial;
1060
1061         if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1062                 goto partial;
1063
1064         if (!trace_seq_puts(s, " <- "))
1065                 goto partial;
1066
1067         if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1068                 goto partial;
1069
1070         if (!trace_seq_puts(s, ")"))
1071                 goto partial;
1072
1073         for (i = 0; i < field->nargs; i++)
1074                 if (!trace_seq_printf(s, " %s=%lx",
1075                                       tp->args[i].name, field->args[i]))
1076                         goto partial;
1077
1078         if (!trace_seq_puts(s, "\n"))
1079                 goto partial;
1080
1081         return TRACE_TYPE_HANDLED;
1082 partial:
1083         return TRACE_TYPE_PARTIAL_LINE;
1084 }
1085
1086 static int probe_event_enable(struct ftrace_event_call *call)
1087 {
1088         struct trace_probe *tp = (struct trace_probe *)call->data;
1089
1090         tp->flags |= TP_FLAG_TRACE;
1091         if (probe_is_return(tp))
1092                 return enable_kretprobe(&tp->rp);
1093         else
1094                 return enable_kprobe(&tp->rp.kp);
1095 }
1096
1097 static void probe_event_disable(struct ftrace_event_call *call)
1098 {
1099         struct trace_probe *tp = (struct trace_probe *)call->data;
1100
1101         tp->flags &= ~TP_FLAG_TRACE;
1102         if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) {
1103                 if (probe_is_return(tp))
1104                         disable_kretprobe(&tp->rp);
1105                 else
1106                         disable_kprobe(&tp->rp.kp);
1107         }
1108 }
1109
1110 static int probe_event_raw_init(struct ftrace_event_call *event_call)
1111 {
1112         return 0;
1113 }
1114
1115 #undef DEFINE_FIELD
1116 #define DEFINE_FIELD(type, item, name, is_signed)                       \
1117         do {                                                            \
1118                 ret = trace_define_field(event_call, #type, name,       \
1119                                          offsetof(typeof(field), item), \
1120                                          sizeof(field.item), is_signed, \
1121                                          FILTER_OTHER);                 \
1122                 if (ret)                                                \
1123                         return ret;                                     \
1124         } while (0)
1125
1126 static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
1127 {
1128         int ret, i;
1129         struct kprobe_trace_entry field;
1130         struct trace_probe *tp = (struct trace_probe *)event_call->data;
1131
1132         DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1133         DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
1134         /* Set argument names as fields */
1135         for (i = 0; i < tp->nr_args; i++)
1136                 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
1137         return 0;
1138 }
1139
1140 static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
1141 {
1142         int ret, i;
1143         struct kretprobe_trace_entry field;
1144         struct trace_probe *tp = (struct trace_probe *)event_call->data;
1145
1146         DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1147         DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1148         DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
1149         /* Set argument names as fields */
1150         for (i = 0; i < tp->nr_args; i++)
1151                 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
1152         return 0;
1153 }
1154
1155 static int __set_print_fmt(struct trace_probe *tp, char *buf, int len)
1156 {
1157         int i;
1158         int pos = 0;
1159
1160         const char *fmt, *arg;
1161
1162         if (!probe_is_return(tp)) {
1163                 fmt = "(%lx)";
1164                 arg = "REC->" FIELD_STRING_IP;
1165         } else {
1166                 fmt = "(%lx <- %lx)";
1167                 arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
1168         }
1169
1170         /* When len=0, we just calculate the needed length */
1171 #define LEN_OR_ZERO (len ? len - pos : 0)
1172
1173         pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", fmt);
1174
1175         for (i = 0; i < tp->nr_args; i++) {
1176                 pos += snprintf(buf + pos, LEN_OR_ZERO, " %s=%%lx",
1177                                 tp->args[i].name);
1178         }
1179
1180         pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
1181
1182         for (i = 0; i < tp->nr_args; i++) {
1183                 pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s",
1184                                 tp->args[i].name);
1185         }
1186
1187 #undef LEN_OR_ZERO
1188
1189         /* return the length of print_fmt */
1190         return pos;
1191 }
1192
1193 static int set_print_fmt(struct trace_probe *tp)
1194 {
1195         int len;
1196         char *print_fmt;
1197
1198         /* First: called with 0 length to calculate the needed length */
1199         len = __set_print_fmt(tp, NULL, 0);
1200         print_fmt = kmalloc(len + 1, GFP_KERNEL);
1201         if (!print_fmt)
1202                 return -ENOMEM;
1203
1204         /* Second: actually write the @print_fmt */
1205         __set_print_fmt(tp, print_fmt, len + 1);
1206         tp->call.print_fmt = print_fmt;
1207
1208         return 0;
1209 }
1210
1211 #ifdef CONFIG_PERF_EVENTS
1212
1213 /* Kprobe profile handler */
1214 static __kprobes void kprobe_perf_func(struct kprobe *kp,
1215                                          struct pt_regs *regs)
1216 {
1217         struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1218         struct ftrace_event_call *call = &tp->call;
1219         struct kprobe_trace_entry *entry;
1220         int size, __size, i;
1221         unsigned long irq_flags;
1222         int rctx;
1223
1224         __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
1225         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1226         size -= sizeof(u32);
1227         if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1228                      "profile buffer not large enough"))
1229                 return;
1230
1231         entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
1232         if (!entry)
1233                 return;
1234
1235         entry->nargs = tp->nr_args;
1236         entry->ip = (unsigned long)kp->addr;
1237         for (i = 0; i < tp->nr_args; i++)
1238                 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1239
1240         perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
1241 }
1242
1243 /* Kretprobe profile handler */
1244 static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
1245                                             struct pt_regs *regs)
1246 {
1247         struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1248         struct ftrace_event_call *call = &tp->call;
1249         struct kretprobe_trace_entry *entry;
1250         int size, __size, i;
1251         unsigned long irq_flags;
1252         int rctx;
1253
1254         __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
1255         size = ALIGN(__size + sizeof(u32), sizeof(u64));
1256         size -= sizeof(u32);
1257         if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1258                      "profile buffer not large enough"))
1259                 return;
1260
1261         entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
1262         if (!entry)
1263                 return;
1264
1265         entry->nargs = tp->nr_args;
1266         entry->func = (unsigned long)tp->rp.kp.addr;
1267         entry->ret_ip = (unsigned long)ri->ret_addr;
1268         for (i = 0; i < tp->nr_args; i++)
1269                 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1270
1271         perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1,
1272                                irq_flags, regs);
1273 }
1274
1275 static int probe_perf_enable(struct ftrace_event_call *call)
1276 {
1277         struct trace_probe *tp = (struct trace_probe *)call->data;
1278
1279         tp->flags |= TP_FLAG_PROFILE;
1280
1281         if (probe_is_return(tp))
1282                 return enable_kretprobe(&tp->rp);
1283         else
1284                 return enable_kprobe(&tp->rp.kp);
1285 }
1286
1287 static void probe_perf_disable(struct ftrace_event_call *call)
1288 {
1289         struct trace_probe *tp = (struct trace_probe *)call->data;
1290
1291         tp->flags &= ~TP_FLAG_PROFILE;
1292
1293         if (!(tp->flags & TP_FLAG_TRACE)) {
1294                 if (probe_is_return(tp))
1295                         disable_kretprobe(&tp->rp);
1296                 else
1297                         disable_kprobe(&tp->rp.kp);
1298         }
1299 }
1300 #endif  /* CONFIG_PERF_EVENTS */
1301
1302 static __kprobes
1303 int kprobe_register(struct ftrace_event_call *event, enum trace_reg type)
1304 {
1305         switch (type) {
1306         case TRACE_REG_REGISTER:
1307                 return probe_event_enable(event);
1308         case TRACE_REG_UNREGISTER:
1309                 probe_event_disable(event);
1310                 return 0;
1311
1312 #ifdef CONFIG_PERF_EVENTS
1313         case TRACE_REG_PERF_REGISTER:
1314                 return probe_perf_enable(event);
1315         case TRACE_REG_PERF_UNREGISTER:
1316                 probe_perf_disable(event);
1317                 return 0;
1318 #endif
1319         }
1320         return 0;
1321 }
1322
1323 static __kprobes
1324 int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1325 {
1326         struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1327
1328         if (tp->flags & TP_FLAG_TRACE)
1329                 kprobe_trace_func(kp, regs);
1330 #ifdef CONFIG_PERF_EVENTS
1331         if (tp->flags & TP_FLAG_PROFILE)
1332                 kprobe_perf_func(kp, regs);
1333 #endif
1334         return 0;       /* We don't tweek kernel, so just return 0 */
1335 }
1336
1337 static __kprobes
1338 int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1339 {
1340         struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1341
1342         if (tp->flags & TP_FLAG_TRACE)
1343                 kretprobe_trace_func(ri, regs);
1344 #ifdef CONFIG_PERF_EVENTS
1345         if (tp->flags & TP_FLAG_PROFILE)
1346                 kretprobe_perf_func(ri, regs);
1347 #endif
1348         return 0;       /* We don't tweek kernel, so just return 0 */
1349 }
1350
1351 static struct trace_event_functions kretprobe_funcs = {
1352         .trace          = print_kretprobe_event
1353 };
1354
1355 static struct trace_event_functions kprobe_funcs = {
1356         .trace          = print_kprobe_event
1357 };
1358
1359 static int register_probe_event(struct trace_probe *tp)
1360 {
1361         struct ftrace_event_call *call = &tp->call;
1362         int ret;
1363
1364         /* Initialize ftrace_event_call */
1365         if (probe_is_return(tp)) {
1366                 INIT_LIST_HEAD(&call->class->fields);
1367                 call->event.funcs = &kretprobe_funcs;
1368                 call->class->raw_init = probe_event_raw_init;
1369                 call->class->define_fields = kretprobe_event_define_fields;
1370         } else {
1371                 INIT_LIST_HEAD(&call->class->fields);
1372                 call->event.funcs = &kprobe_funcs;
1373                 call->class->raw_init = probe_event_raw_init;
1374                 call->class->define_fields = kprobe_event_define_fields;
1375         }
1376         if (set_print_fmt(tp) < 0)
1377                 return -ENOMEM;
1378         call->id = register_ftrace_event(&call->event);
1379         if (!call->id) {
1380                 kfree(call->print_fmt);
1381                 return -ENODEV;
1382         }
1383         call->enabled = 0;
1384         call->class->reg = kprobe_register;
1385         call->data = tp;
1386         ret = trace_add_event_call(call);
1387         if (ret) {
1388                 pr_info("Failed to register kprobe event: %s\n", call->name);
1389                 kfree(call->print_fmt);
1390                 unregister_ftrace_event(&call->event);
1391         }
1392         return ret;
1393 }
1394
1395 static void unregister_probe_event(struct trace_probe *tp)
1396 {
1397         /* tp->event is unregistered in trace_remove_event_call() */
1398         trace_remove_event_call(&tp->call);
1399         kfree(tp->call.print_fmt);
1400 }
1401
1402 /* Make a debugfs interface for controling probe points */
1403 static __init int init_kprobe_trace(void)
1404 {
1405         struct dentry *d_tracer;
1406         struct dentry *entry;
1407
1408         d_tracer = tracing_init_dentry();
1409         if (!d_tracer)
1410                 return 0;
1411
1412         entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
1413                                     NULL, &kprobe_events_ops);
1414
1415         /* Event list interface */
1416         if (!entry)
1417                 pr_warning("Could not create debugfs "
1418                            "'kprobe_events' entry\n");
1419
1420         /* Profile interface */
1421         entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
1422                                     NULL, &kprobe_profile_ops);
1423
1424         if (!entry)
1425                 pr_warning("Could not create debugfs "
1426                            "'kprobe_profile' entry\n");
1427         return 0;
1428 }
1429 fs_initcall(init_kprobe_trace);
1430
1431
1432 #ifdef CONFIG_FTRACE_STARTUP_TEST
1433
1434 static int kprobe_trace_selftest_target(int a1, int a2, int a3,
1435                                         int a4, int a5, int a6)
1436 {
1437         return a1 + a2 + a3 + a4 + a5 + a6;
1438 }
1439
1440 static __init int kprobe_trace_self_tests_init(void)
1441 {
1442         int ret, warn = 0;
1443         int (*target)(int, int, int, int, int, int);
1444         struct trace_probe *tp;
1445
1446         target = kprobe_trace_selftest_target;
1447
1448         pr_info("Testing kprobe tracing: ");
1449
1450         ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target "
1451                                   "$stack $stack0 +0($stack)");
1452         if (WARN_ON_ONCE(ret)) {
1453                 pr_warning("error on probing function entry.\n");
1454                 warn++;
1455         } else {
1456                 /* Enable trace point */
1457                 tp = find_probe_event("testprobe", KPROBE_EVENT_SYSTEM);
1458                 if (WARN_ON_ONCE(tp == NULL)) {
1459                         pr_warning("error on getting new probe.\n");
1460                         warn++;
1461                 } else
1462                         probe_event_enable(&tp->call);
1463         }
1464
1465         ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
1466                                   "$retval");
1467         if (WARN_ON_ONCE(ret)) {
1468                 pr_warning("error on probing function return.\n");
1469                 warn++;
1470         } else {
1471                 /* Enable trace point */
1472                 tp = find_probe_event("testprobe2", KPROBE_EVENT_SYSTEM);
1473                 if (WARN_ON_ONCE(tp == NULL)) {
1474                         pr_warning("error on getting new probe.\n");
1475                         warn++;
1476                 } else
1477                         probe_event_enable(&tp->call);
1478         }
1479
1480         if (warn)
1481                 goto end;
1482
1483         ret = target(1, 2, 3, 4, 5, 6);
1484
1485         ret = command_trace_probe("-:testprobe");
1486         if (WARN_ON_ONCE(ret)) {
1487                 pr_warning("error on deleting a probe.\n");
1488                 warn++;
1489         }
1490
1491         ret = command_trace_probe("-:testprobe2");
1492         if (WARN_ON_ONCE(ret)) {
1493                 pr_warning("error on deleting a probe.\n");
1494                 warn++;
1495         }
1496
1497 end:
1498         cleanup_all_probes();
1499         if (warn)
1500                 pr_cont("NG: Some tests are failed. Please check them.\n");
1501         else
1502                 pr_cont("OK\n");
1503         return 0;
1504 }
1505
1506 late_initcall(kprobe_trace_self_tests_init);
1507
1508 #endif