]> bbs.cooldavid.org Git - net-next-2.6.git/blob - tools/perf/builtin-timechart.c
dd4d82ac7aa4ac67a51e6d2eedfc653c8c35832c
[net-next-2.6.git] / tools / perf / builtin-timechart.c
1 /*
2  * builtin-timechart.c - make an svg timechart of system activity
3  *
4  * (C) Copyright 2009 Intel Corporation
5  *
6  * Authors:
7  *     Arjan van de Ven <arjan@linux.intel.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; version 2
12  * of the License.
13  */
14
15 #include "builtin.h"
16
17 #include "util/util.h"
18
19 #include "util/color.h"
20 #include <linux/list.h>
21 #include "util/cache.h"
22 #include <linux/rbtree.h>
23 #include "util/symbol.h"
24 #include "util/string.h"
25 #include "util/callchain.h"
26 #include "util/strlist.h"
27
28 #include "perf.h"
29 #include "util/header.h"
30 #include "util/parse-options.h"
31 #include "util/parse-events.h"
32 #include "util/svghelper.h"
33
34 static char             const *input_name = "perf.data";
35 static char             const *output_name = "output.svg";
36
37
38 static unsigned long    page_size;
39 static unsigned long    mmap_window = 32;
40 static u64              sample_type;
41
42 static unsigned int     numcpus;
43 static u64              min_freq;       /* Lowest CPU frequency seen */
44 static u64              max_freq;       /* Highest CPU frequency seen */
45 static u64              turbo_frequency;
46
47 static u64              first_time, last_time;
48
49 static int              power_only;
50
51
52 static struct perf_header       *header;
53
54 struct per_pid;
55 struct per_pidcomm;
56
57 struct cpu_sample;
58 struct power_event;
59 struct wake_event;
60
61 struct sample_wrapper;
62
63 /*
64  * Datastructure layout:
65  * We keep an list of "pid"s, matching the kernels notion of a task struct.
66  * Each "pid" entry, has a list of "comm"s.
67  *      this is because we want to track different programs different, while
68  *      exec will reuse the original pid (by design).
69  * Each comm has a list of samples that will be used to draw
70  * final graph.
71  */
72
73 struct per_pid {
74         struct per_pid *next;
75
76         int             pid;
77         int             ppid;
78
79         u64             start_time;
80         u64             end_time;
81         u64             total_time;
82         int             display;
83
84         struct per_pidcomm *all;
85         struct per_pidcomm *current;
86
87         int painted;
88 };
89
90
91 struct per_pidcomm {
92         struct per_pidcomm *next;
93
94         u64             start_time;
95         u64             end_time;
96         u64             total_time;
97
98         int             Y;
99         int             display;
100
101         long            state;
102         u64             state_since;
103
104         char            *comm;
105
106         struct cpu_sample *samples;
107 };
108
109 struct sample_wrapper {
110         struct sample_wrapper *next;
111
112         u64             timestamp;
113         unsigned char   data[0];
114 };
115
116 #define TYPE_NONE       0
117 #define TYPE_RUNNING    1
118 #define TYPE_WAITING    2
119 #define TYPE_BLOCKED    3
120
121 struct cpu_sample {
122         struct cpu_sample *next;
123
124         u64 start_time;
125         u64 end_time;
126         int type;
127         int cpu;
128 };
129
130 static struct per_pid *all_data;
131
132 #define CSTATE 1
133 #define PSTATE 2
134
135 struct power_event {
136         struct power_event *next;
137         int type;
138         int state;
139         u64 start_time;
140         u64 end_time;
141         int cpu;
142 };
143
144 struct wake_event {
145         struct wake_event *next;
146         int waker;
147         int wakee;
148         u64 time;
149 };
150
151 static struct power_event    *power_events;
152 static struct wake_event     *wake_events;
153
154 struct sample_wrapper *all_samples;
155
156
157 struct process_filter;
158 struct process_filter {
159         char                    *name;
160         int                     pid;
161         struct process_filter   *next;
162 };
163
164 static struct process_filter *process_filter;
165
166
167 static struct per_pid *find_create_pid(int pid)
168 {
169         struct per_pid *cursor = all_data;
170
171         while (cursor) {
172                 if (cursor->pid == pid)
173                         return cursor;
174                 cursor = cursor->next;
175         }
176         cursor = malloc(sizeof(struct per_pid));
177         assert(cursor != NULL);
178         memset(cursor, 0, sizeof(struct per_pid));
179         cursor->pid = pid;
180         cursor->next = all_data;
181         all_data = cursor;
182         return cursor;
183 }
184
185 static void pid_set_comm(int pid, char *comm)
186 {
187         struct per_pid *p;
188         struct per_pidcomm *c;
189         p = find_create_pid(pid);
190         c = p->all;
191         while (c) {
192                 if (c->comm && strcmp(c->comm, comm) == 0) {
193                         p->current = c;
194                         return;
195                 }
196                 if (!c->comm) {
197                         c->comm = strdup(comm);
198                         p->current = c;
199                         return;
200                 }
201                 c = c->next;
202         }
203         c = malloc(sizeof(struct per_pidcomm));
204         assert(c != NULL);
205         memset(c, 0, sizeof(struct per_pidcomm));
206         c->comm = strdup(comm);
207         p->current = c;
208         c->next = p->all;
209         p->all = c;
210 }
211
212 static void pid_fork(int pid, int ppid, u64 timestamp)
213 {
214         struct per_pid *p, *pp;
215         p = find_create_pid(pid);
216         pp = find_create_pid(ppid);
217         p->ppid = ppid;
218         if (pp->current && pp->current->comm && !p->current)
219                 pid_set_comm(pid, pp->current->comm);
220
221         p->start_time = timestamp;
222         if (p->current) {
223                 p->current->start_time = timestamp;
224                 p->current->state_since = timestamp;
225         }
226 }
227
228 static void pid_exit(int pid, u64 timestamp)
229 {
230         struct per_pid *p;
231         p = find_create_pid(pid);
232         p->end_time = timestamp;
233         if (p->current)
234                 p->current->end_time = timestamp;
235 }
236
237 static void
238 pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
239 {
240         struct per_pid *p;
241         struct per_pidcomm *c;
242         struct cpu_sample *sample;
243
244         p = find_create_pid(pid);
245         c = p->current;
246         if (!c) {
247                 c = malloc(sizeof(struct per_pidcomm));
248                 assert(c != NULL);
249                 memset(c, 0, sizeof(struct per_pidcomm));
250                 p->current = c;
251                 c->next = p->all;
252                 p->all = c;
253         }
254
255         sample = malloc(sizeof(struct cpu_sample));
256         assert(sample != NULL);
257         memset(sample, 0, sizeof(struct cpu_sample));
258         sample->start_time = start;
259         sample->end_time = end;
260         sample->type = type;
261         sample->next = c->samples;
262         sample->cpu = cpu;
263         c->samples = sample;
264
265         if (sample->type == TYPE_RUNNING && end > start && start > 0) {
266                 c->total_time += (end-start);
267                 p->total_time += (end-start);
268         }
269
270         if (c->start_time == 0 || c->start_time > start)
271                 c->start_time = start;
272         if (p->start_time == 0 || p->start_time > start)
273                 p->start_time = start;
274
275         if (cpu > numcpus)
276                 numcpus = cpu;
277 }
278
279 #define MAX_CPUS 4096
280
281 static u64 cpus_cstate_start_times[MAX_CPUS];
282 static int cpus_cstate_state[MAX_CPUS];
283 static u64 cpus_pstate_start_times[MAX_CPUS];
284 static u64 cpus_pstate_state[MAX_CPUS];
285
286 static int
287 process_comm_event(event_t *event)
288 {
289         pid_set_comm(event->comm.pid, event->comm.comm);
290         return 0;
291 }
292 static int
293 process_fork_event(event_t *event)
294 {
295         pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
296         return 0;
297 }
298
299 static int
300 process_exit_event(event_t *event)
301 {
302         pid_exit(event->fork.pid, event->fork.time);
303         return 0;
304 }
305
306 struct trace_entry {
307         u32                     size;
308         unsigned short          type;
309         unsigned char           flags;
310         unsigned char           preempt_count;
311         int                     pid;
312         int                     tgid;
313 };
314
315 struct power_entry {
316         struct trace_entry te;
317         s64     type;
318         s64     value;
319 };
320
321 #define TASK_COMM_LEN 16
322 struct wakeup_entry {
323         struct trace_entry te;
324         char comm[TASK_COMM_LEN];
325         int   pid;
326         int   prio;
327         int   success;
328 };
329
330 /*
331  * trace_flag_type is an enumeration that holds different
332  * states when a trace occurs. These are:
333  *  IRQS_OFF            - interrupts were disabled
334  *  IRQS_NOSUPPORT      - arch does not support irqs_disabled_flags
335  *  NEED_RESCED         - reschedule is requested
336  *  HARDIRQ             - inside an interrupt handler
337  *  SOFTIRQ             - inside a softirq handler
338  */
339 enum trace_flag_type {
340         TRACE_FLAG_IRQS_OFF             = 0x01,
341         TRACE_FLAG_IRQS_NOSUPPORT       = 0x02,
342         TRACE_FLAG_NEED_RESCHED         = 0x04,
343         TRACE_FLAG_HARDIRQ              = 0x08,
344         TRACE_FLAG_SOFTIRQ              = 0x10,
345 };
346
347
348
349 struct sched_switch {
350         struct trace_entry te;
351         char prev_comm[TASK_COMM_LEN];
352         int  prev_pid;
353         int  prev_prio;
354         long prev_state; /* Arjan weeps. */
355         char next_comm[TASK_COMM_LEN];
356         int  next_pid;
357         int  next_prio;
358 };
359
360 static void c_state_start(int cpu, u64 timestamp, int state)
361 {
362         cpus_cstate_start_times[cpu] = timestamp;
363         cpus_cstate_state[cpu] = state;
364 }
365
366 static void c_state_end(int cpu, u64 timestamp)
367 {
368         struct power_event *pwr;
369         pwr = malloc(sizeof(struct power_event));
370         if (!pwr)
371                 return;
372         memset(pwr, 0, sizeof(struct power_event));
373
374         pwr->state = cpus_cstate_state[cpu];
375         pwr->start_time = cpus_cstate_start_times[cpu];
376         pwr->end_time = timestamp;
377         pwr->cpu = cpu;
378         pwr->type = CSTATE;
379         pwr->next = power_events;
380
381         power_events = pwr;
382 }
383
384 static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
385 {
386         struct power_event *pwr;
387         pwr = malloc(sizeof(struct power_event));
388
389         if (new_freq > 8000000) /* detect invalid data */
390                 return;
391
392         if (!pwr)
393                 return;
394         memset(pwr, 0, sizeof(struct power_event));
395
396         pwr->state = cpus_pstate_state[cpu];
397         pwr->start_time = cpus_pstate_start_times[cpu];
398         pwr->end_time = timestamp;
399         pwr->cpu = cpu;
400         pwr->type = PSTATE;
401         pwr->next = power_events;
402
403         if (!pwr->start_time)
404                 pwr->start_time = first_time;
405
406         power_events = pwr;
407
408         cpus_pstate_state[cpu] = new_freq;
409         cpus_pstate_start_times[cpu] = timestamp;
410
411         if ((u64)new_freq > max_freq)
412                 max_freq = new_freq;
413
414         if (new_freq < min_freq || min_freq == 0)
415                 min_freq = new_freq;
416
417         if (new_freq == max_freq - 1000)
418                         turbo_frequency = max_freq;
419 }
420
421 static void
422 sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te)
423 {
424         struct wake_event *we;
425         struct per_pid *p;
426         struct wakeup_entry *wake = (void *)te;
427
428         we = malloc(sizeof(struct wake_event));
429         if (!we)
430                 return;
431
432         memset(we, 0, sizeof(struct wake_event));
433         we->time = timestamp;
434         we->waker = pid;
435
436         if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ))
437                 we->waker = -1;
438
439         we->wakee = wake->pid;
440         we->next = wake_events;
441         wake_events = we;
442         p = find_create_pid(we->wakee);
443
444         if (p && p->current && p->current->state == TYPE_NONE) {
445                 p->current->state_since = timestamp;
446                 p->current->state = TYPE_WAITING;
447         }
448         if (p && p->current && p->current->state == TYPE_BLOCKED) {
449                 pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp);
450                 p->current->state_since = timestamp;
451                 p->current->state = TYPE_WAITING;
452         }
453 }
454
455 static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
456 {
457         struct per_pid *p = NULL, *prev_p;
458         struct sched_switch *sw = (void *)te;
459
460
461         prev_p = find_create_pid(sw->prev_pid);
462
463         p = find_create_pid(sw->next_pid);
464
465         if (prev_p->current && prev_p->current->state != TYPE_NONE)
466                 pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp);
467         if (p && p->current) {
468                 if (p->current->state != TYPE_NONE)
469                         pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp);
470
471                         p->current->state_since = timestamp;
472                         p->current->state = TYPE_RUNNING;
473         }
474
475         if (prev_p->current) {
476                 prev_p->current->state = TYPE_NONE;
477                 prev_p->current->state_since = timestamp;
478                 if (sw->prev_state & 2)
479                         prev_p->current->state = TYPE_BLOCKED;
480                 if (sw->prev_state == 0)
481                         prev_p->current->state = TYPE_WAITING;
482         }
483 }
484
485
486 static int
487 process_sample_event(event_t *event)
488 {
489         int cursor = 0;
490         u64 addr = 0;
491         u64 stamp = 0;
492         u32 cpu = 0;
493         u32 pid = 0;
494         struct trace_entry *te;
495
496         if (sample_type & PERF_SAMPLE_IP)
497                 cursor++;
498
499         if (sample_type & PERF_SAMPLE_TID) {
500                 pid = event->sample.array[cursor]>>32;
501                 cursor++;
502         }
503         if (sample_type & PERF_SAMPLE_TIME) {
504                 stamp = event->sample.array[cursor++];
505
506                 if (!first_time || first_time > stamp)
507                         first_time = stamp;
508                 if (last_time < stamp)
509                         last_time = stamp;
510
511         }
512         if (sample_type & PERF_SAMPLE_ADDR)
513                 addr = event->sample.array[cursor++];
514         if (sample_type & PERF_SAMPLE_ID)
515                 cursor++;
516         if (sample_type & PERF_SAMPLE_STREAM_ID)
517                 cursor++;
518         if (sample_type & PERF_SAMPLE_CPU)
519                 cpu = event->sample.array[cursor++] & 0xFFFFFFFF;
520         if (sample_type & PERF_SAMPLE_PERIOD)
521                 cursor++;
522
523         te = (void *)&event->sample.array[cursor];
524
525         if (sample_type & PERF_SAMPLE_RAW && te->size > 0) {
526                 char *event_str;
527                 struct power_entry *pe;
528
529                 pe = (void *)te;
530
531                 event_str = perf_header__find_event(te->type);
532
533                 if (!event_str)
534                         return 0;
535
536                 if (strcmp(event_str, "power:power_start") == 0)
537                         c_state_start(cpu, stamp, pe->value);
538
539                 if (strcmp(event_str, "power:power_end") == 0)
540                         c_state_end(cpu, stamp);
541
542                 if (strcmp(event_str, "power:power_frequency") == 0)
543                         p_state_change(cpu, stamp, pe->value);
544
545                 if (strcmp(event_str, "sched:sched_wakeup") == 0)
546                         sched_wakeup(cpu, stamp, pid, te);
547
548                 if (strcmp(event_str, "sched:sched_switch") == 0)
549                         sched_switch(cpu, stamp, te);
550         }
551         return 0;
552 }
553
554 /*
555  * After the last sample we need to wrap up the current C/P state
556  * and close out each CPU for these.
557  */
558 static void end_sample_processing(void)
559 {
560         u64 cpu;
561         struct power_event *pwr;
562
563         for (cpu = 0; cpu <= numcpus; cpu++) {
564                 pwr = malloc(sizeof(struct power_event));
565                 if (!pwr)
566                         return;
567                 memset(pwr, 0, sizeof(struct power_event));
568
569                 /* C state */
570 #if 0
571                 pwr->state = cpus_cstate_state[cpu];
572                 pwr->start_time = cpus_cstate_start_times[cpu];
573                 pwr->end_time = last_time;
574                 pwr->cpu = cpu;
575                 pwr->type = CSTATE;
576                 pwr->next = power_events;
577
578                 power_events = pwr;
579 #endif
580                 /* P state */
581
582                 pwr = malloc(sizeof(struct power_event));
583                 if (!pwr)
584                         return;
585                 memset(pwr, 0, sizeof(struct power_event));
586
587                 pwr->state = cpus_pstate_state[cpu];
588                 pwr->start_time = cpus_pstate_start_times[cpu];
589                 pwr->end_time = last_time;
590                 pwr->cpu = cpu;
591                 pwr->type = PSTATE;
592                 pwr->next = power_events;
593
594                 if (!pwr->start_time)
595                         pwr->start_time = first_time;
596                 if (!pwr->state)
597                         pwr->state = min_freq;
598                 power_events = pwr;
599         }
600 }
601
602 static u64 sample_time(event_t *event)
603 {
604         int cursor;
605
606         cursor = 0;
607         if (sample_type & PERF_SAMPLE_IP)
608                 cursor++;
609         if (sample_type & PERF_SAMPLE_TID)
610                 cursor++;
611         if (sample_type & PERF_SAMPLE_TIME)
612                 return event->sample.array[cursor];
613         return 0;
614 }
615
616
617 /*
618  * We first queue all events, sorted backwards by insertion.
619  * The order will get flipped later.
620  */
621 static int
622 queue_sample_event(event_t *event)
623 {
624         struct sample_wrapper *copy, *prev;
625         int size;
626
627         size = event->sample.header.size + sizeof(struct sample_wrapper) + 8;
628
629         copy = malloc(size);
630         if (!copy)
631                 return 1;
632
633         memset(copy, 0, size);
634
635         copy->next = NULL;
636         copy->timestamp = sample_time(event);
637
638         memcpy(&copy->data, event, event->sample.header.size);
639
640         /* insert in the right place in the list */
641
642         if (!all_samples) {
643                 /* first sample ever */
644                 all_samples = copy;
645                 return 0;
646         }
647
648         if (all_samples->timestamp < copy->timestamp) {
649                 /* insert at the head of the list */
650                 copy->next = all_samples;
651                 all_samples = copy;
652                 return 0;
653         }
654
655         prev = all_samples;
656         while (prev->next) {
657                 if (prev->next->timestamp < copy->timestamp) {
658                         copy->next = prev->next;
659                         prev->next = copy;
660                         return 0;
661                 }
662                 prev = prev->next;
663         }
664         /* insert at the end of the list */
665         prev->next = copy;
666
667         return 0;
668 }
669
670 static void sort_queued_samples(void)
671 {
672         struct sample_wrapper *cursor, *next;
673
674         cursor = all_samples;
675         all_samples = NULL;
676
677         while (cursor) {
678                 next = cursor->next;
679                 cursor->next = all_samples;
680                 all_samples = cursor;
681                 cursor = next;
682         }
683 }
684
685 /*
686  * Sort the pid datastructure
687  */
688 static void sort_pids(void)
689 {
690         struct per_pid *new_list, *p, *cursor, *prev;
691         /* sort by ppid first, then by pid, lowest to highest */
692
693         new_list = NULL;
694
695         while (all_data) {
696                 p = all_data;
697                 all_data = p->next;
698                 p->next = NULL;
699
700                 if (new_list == NULL) {
701                         new_list = p;
702                         p->next = NULL;
703                         continue;
704                 }
705                 prev = NULL;
706                 cursor = new_list;
707                 while (cursor) {
708                         if (cursor->ppid > p->ppid ||
709                                 (cursor->ppid == p->ppid && cursor->pid > p->pid)) {
710                                 /* must insert before */
711                                 if (prev) {
712                                         p->next = prev->next;
713                                         prev->next = p;
714                                         cursor = NULL;
715                                         continue;
716                                 } else {
717                                         p->next = new_list;
718                                         new_list = p;
719                                         cursor = NULL;
720                                         continue;
721                                 }
722                         }
723
724                         prev = cursor;
725                         cursor = cursor->next;
726                         if (!cursor)
727                                 prev->next = p;
728                 }
729         }
730         all_data = new_list;
731 }
732
733
734 static void draw_c_p_states(void)
735 {
736         struct power_event *pwr;
737         pwr = power_events;
738
739         /*
740          * two pass drawing so that the P state bars are on top of the C state blocks
741          */
742         while (pwr) {
743                 if (pwr->type == CSTATE)
744                         svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
745                 pwr = pwr->next;
746         }
747
748         pwr = power_events;
749         while (pwr) {
750                 if (pwr->type == PSTATE) {
751                         if (!pwr->state)
752                                 pwr->state = min_freq;
753                         svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
754                 }
755                 pwr = pwr->next;
756         }
757 }
758
759 static void draw_wakeups(void)
760 {
761         struct wake_event *we;
762         struct per_pid *p;
763         struct per_pidcomm *c;
764
765         we = wake_events;
766         while (we) {
767                 int from = 0, to = 0;
768                 char *task_from = NULL, *task_to = NULL;
769
770                 /* locate the column of the waker and wakee */
771                 p = all_data;
772                 while (p) {
773                         if (p->pid == we->waker || p->pid == we->wakee) {
774                                 c = p->all;
775                                 while (c) {
776                                         if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
777                                                 if (p->pid == we->waker && !from) {
778                                                         from = c->Y;
779                                                         task_from = strdup(c->comm);
780                                                 }
781                                                 if (p->pid == we->wakee && !to) {
782                                                         to = c->Y;
783                                                         task_to = strdup(c->comm);
784                                                 }
785                                         }
786                                         c = c->next;
787                                 }
788                                 c = p->all;
789                                 while (c) {
790                                         if (p->pid == we->waker && !from) {
791                                                 from = c->Y;
792                                                 task_from = strdup(c->comm);
793                                         }
794                                         if (p->pid == we->wakee && !to) {
795                                                 to = c->Y;
796                                                 task_to = strdup(c->comm);
797                                         }
798                                         c = c->next;
799                                 }
800                         }
801                         p = p->next;
802                 }
803
804                 if (!task_from) {
805                         task_from = malloc(40);
806                         sprintf(task_from, "[%i]", we->waker);
807                 }
808                 if (!task_to) {
809                         task_to = malloc(40);
810                         sprintf(task_to, "[%i]", we->wakee);
811                 }
812
813                 if (we->waker == -1)
814                         svg_interrupt(we->time, to);
815                 else if (from && to && abs(from - to) == 1)
816                         svg_wakeline(we->time, from, to);
817                 else
818                         svg_partial_wakeline(we->time, from, task_from, to, task_to);
819                 we = we->next;
820
821                 free(task_from);
822                 free(task_to);
823         }
824 }
825
826 static void draw_cpu_usage(void)
827 {
828         struct per_pid *p;
829         struct per_pidcomm *c;
830         struct cpu_sample *sample;
831         p = all_data;
832         while (p) {
833                 c = p->all;
834                 while (c) {
835                         sample = c->samples;
836                         while (sample) {
837                                 if (sample->type == TYPE_RUNNING)
838                                         svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm);
839
840                                 sample = sample->next;
841                         }
842                         c = c->next;
843                 }
844                 p = p->next;
845         }
846 }
847
848 static void draw_process_bars(void)
849 {
850         struct per_pid *p;
851         struct per_pidcomm *c;
852         struct cpu_sample *sample;
853         int Y = 0;
854
855         Y = 2 * numcpus + 2;
856
857         p = all_data;
858         while (p) {
859                 c = p->all;
860                 while (c) {
861                         if (!c->display) {
862                                 c->Y = 0;
863                                 c = c->next;
864                                 continue;
865                         }
866
867                         svg_box(Y, c->start_time, c->end_time, "process");
868                         sample = c->samples;
869                         while (sample) {
870                                 if (sample->type == TYPE_RUNNING)
871                                         svg_sample(Y, sample->cpu, sample->start_time, sample->end_time);
872                                 if (sample->type == TYPE_BLOCKED)
873                                         svg_box(Y, sample->start_time, sample->end_time, "blocked");
874                                 if (sample->type == TYPE_WAITING)
875                                         svg_waiting(Y, sample->start_time, sample->end_time);
876                                 sample = sample->next;
877                         }
878
879                         if (c->comm) {
880                                 char comm[256];
881                                 if (c->total_time > 5000000000) /* 5 seconds */
882                                         sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0);
883                                 else
884                                         sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0);
885
886                                 svg_text(Y, c->start_time, comm);
887                         }
888                         c->Y = Y;
889                         Y++;
890                         c = c->next;
891                 }
892                 p = p->next;
893         }
894 }
895
896 static void add_process_filter(const char *string)
897 {
898         struct process_filter *filt;
899         int pid;
900
901         pid = strtoull(string, NULL, 10);
902         filt = malloc(sizeof(struct process_filter));
903         if (!filt)
904                 return;
905
906         filt->name = strdup(string);
907         filt->pid  = pid;
908         filt->next = process_filter;
909
910         process_filter = filt;
911 }
912
913 static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
914 {
915         struct process_filter *filt;
916         if (!process_filter)
917                 return 1;
918
919         filt = process_filter;
920         while (filt) {
921                 if (filt->pid && p->pid == filt->pid)
922                         return 1;
923                 if (strcmp(filt->name, c->comm) == 0)
924                         return 1;
925                 filt = filt->next;
926         }
927         return 0;
928 }
929
930 static int determine_display_tasks_filtered(void)
931 {
932         struct per_pid *p;
933         struct per_pidcomm *c;
934         int count = 0;
935
936         p = all_data;
937         while (p) {
938                 p->display = 0;
939                 if (p->start_time == 1)
940                         p->start_time = first_time;
941
942                 /* no exit marker, task kept running to the end */
943                 if (p->end_time == 0)
944                         p->end_time = last_time;
945
946                 c = p->all;
947
948                 while (c) {
949                         c->display = 0;
950
951                         if (c->start_time == 1)
952                                 c->start_time = first_time;
953
954                         if (passes_filter(p, c)) {
955                                 c->display = 1;
956                                 p->display = 1;
957                                 count++;
958                         }
959
960                         if (c->end_time == 0)
961                                 c->end_time = last_time;
962
963                         c = c->next;
964                 }
965                 p = p->next;
966         }
967         return count;
968 }
969
970 static int determine_display_tasks(u64 threshold)
971 {
972         struct per_pid *p;
973         struct per_pidcomm *c;
974         int count = 0;
975
976         if (process_filter)
977                 return determine_display_tasks_filtered();
978
979         p = all_data;
980         while (p) {
981                 p->display = 0;
982                 if (p->start_time == 1)
983                         p->start_time = first_time;
984
985                 /* no exit marker, task kept running to the end */
986                 if (p->end_time == 0)
987                         p->end_time = last_time;
988                 if (p->total_time >= threshold && !power_only)
989                         p->display = 1;
990
991                 c = p->all;
992
993                 while (c) {
994                         c->display = 0;
995
996                         if (c->start_time == 1)
997                                 c->start_time = first_time;
998
999                         if (c->total_time >= threshold && !power_only) {
1000                                 c->display = 1;
1001                                 count++;
1002                         }
1003
1004                         if (c->end_time == 0)
1005                                 c->end_time = last_time;
1006
1007                         c = c->next;
1008                 }
1009                 p = p->next;
1010         }
1011         return count;
1012 }
1013
1014
1015
1016 #define TIME_THRESH 10000000
1017
1018 static void write_svg_file(const char *filename)
1019 {
1020         u64 i;
1021         int count;
1022
1023         numcpus++;
1024
1025
1026         count = determine_display_tasks(TIME_THRESH);
1027
1028         /* We'd like to show at least 15 tasks; be less picky if we have fewer */
1029         if (count < 15)
1030                 count = determine_display_tasks(TIME_THRESH / 10);
1031
1032         open_svg(filename, numcpus, count, first_time, last_time);
1033
1034         svg_time_grid();
1035         svg_legenda();
1036
1037         for (i = 0; i < numcpus; i++)
1038                 svg_cpu_box(i, max_freq, turbo_frequency);
1039
1040         draw_cpu_usage();
1041         draw_process_bars();
1042         draw_c_p_states();
1043         draw_wakeups();
1044
1045         svg_close();
1046 }
1047
1048 static int
1049 process_event(event_t *event)
1050 {
1051
1052         switch (event->header.type) {
1053
1054         case PERF_RECORD_COMM:
1055                 return process_comm_event(event);
1056         case PERF_RECORD_FORK:
1057                 return process_fork_event(event);
1058         case PERF_RECORD_EXIT:
1059                 return process_exit_event(event);
1060         case PERF_RECORD_SAMPLE:
1061                 return queue_sample_event(event);
1062
1063         /*
1064          * We dont process them right now but they are fine:
1065          */
1066         case PERF_RECORD_MMAP:
1067         case PERF_RECORD_THROTTLE:
1068         case PERF_RECORD_UNTHROTTLE:
1069                 return 0;
1070
1071         default:
1072                 return -1;
1073         }
1074
1075         return 0;
1076 }
1077
1078 static void process_samples(void)
1079 {
1080         struct sample_wrapper *cursor;
1081         event_t *event;
1082
1083         sort_queued_samples();
1084
1085         cursor = all_samples;
1086         while (cursor) {
1087                 event = (void *)&cursor->data;
1088                 cursor = cursor->next;
1089                 process_sample_event(event);
1090         }
1091 }
1092
1093
1094 static int __cmd_timechart(void)
1095 {
1096         int err, rc = EXIT_FAILURE;
1097         unsigned long offset = 0;
1098         unsigned long head, shift;
1099         struct stat statbuf;
1100         event_t *event;
1101         uint32_t size;
1102         char *buf;
1103         int input;
1104
1105         input = open(input_name, O_RDONLY);
1106         if (input < 0) {
1107                 fprintf(stderr, " failed to open file: %s", input_name);
1108                 if (!strcmp(input_name, "perf.data"))
1109                         fprintf(stderr, "  (try 'perf record' first)");
1110                 fprintf(stderr, "\n");
1111                 exit(-1);
1112         }
1113
1114         err = fstat(input, &statbuf);
1115         if (err < 0) {
1116                 perror("failed to stat file");
1117                 exit(-1);
1118         }
1119
1120         if (!statbuf.st_size) {
1121                 fprintf(stderr, "zero-sized file, nothing to do!\n");
1122                 exit(0);
1123         }
1124
1125         header = perf_header__new();
1126         if (header == NULL)
1127                 return -ENOMEM;
1128
1129         err = perf_header__read(header, input);
1130         if (err < 0) {
1131                 perf_header__delete(header);
1132                 return err;
1133         }
1134
1135         head = header->data_offset;
1136
1137         sample_type = perf_header__sample_type(header);
1138
1139         shift = page_size * (head / page_size);
1140         offset += shift;
1141         head -= shift;
1142
1143 remap:
1144         buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
1145                            MAP_SHARED, input, offset);
1146         if (buf == MAP_FAILED) {
1147                 perror("failed to mmap file");
1148                 exit(-1);
1149         }
1150
1151 more:
1152         event = (event_t *)(buf + head);
1153
1154         size = event->header.size;
1155         if (!size)
1156                 size = 8;
1157
1158         if (head + event->header.size >= page_size * mmap_window) {
1159                 int ret2;
1160
1161                 shift = page_size * (head / page_size);
1162
1163                 ret2 = munmap(buf, page_size * mmap_window);
1164                 assert(ret2 == 0);
1165
1166                 offset += shift;
1167                 head -= shift;
1168                 goto remap;
1169         }
1170
1171         size = event->header.size;
1172
1173         if (!size || process_event(event) < 0) {
1174                 pr_warning("%p [%p]: skipping unknown header type: %d\n",
1175                            (void *)(offset + head),
1176                            (void *)(long)(event->header.size),
1177                            event->header.type);
1178                 /*
1179                  * assume we lost track of the stream, check alignment, and
1180                  * increment a single u64 in the hope to catch on again 'soon'.
1181                  */
1182
1183                 if (unlikely(head & 7))
1184                         head &= ~7ULL;
1185
1186                 size = 8;
1187         }
1188
1189         head += size;
1190
1191         if (offset + head >= header->data_offset + header->data_size)
1192                 goto done;
1193
1194         if (offset + head < (unsigned long)statbuf.st_size)
1195                 goto more;
1196
1197 done:
1198         rc = EXIT_SUCCESS;
1199         close(input);
1200
1201
1202         process_samples();
1203
1204         end_sample_processing();
1205
1206         sort_pids();
1207
1208         write_svg_file(output_name);
1209
1210         pr_info("Written %2.1f seconds of trace to %s.\n",
1211                 (last_time - first_time) / 1000000000.0, output_name);
1212
1213         return rc;
1214 }
1215
1216 static const char * const timechart_usage[] = {
1217         "perf timechart [<options>] {record}",
1218         NULL
1219 };
1220
1221 static const char *record_args[] = {
1222         "record",
1223         "-a",
1224         "-R",
1225         "-M",
1226         "-f",
1227         "-c", "1",
1228         "-e", "power:power_start",
1229         "-e", "power:power_end",
1230         "-e", "power:power_frequency",
1231         "-e", "sched:sched_wakeup",
1232         "-e", "sched:sched_switch",
1233 };
1234
1235 static int __cmd_record(int argc, const char **argv)
1236 {
1237         unsigned int rec_argc, i, j;
1238         const char **rec_argv;
1239
1240         rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1241         rec_argv = calloc(rec_argc + 1, sizeof(char *));
1242
1243         for (i = 0; i < ARRAY_SIZE(record_args); i++)
1244                 rec_argv[i] = strdup(record_args[i]);
1245
1246         for (j = 1; j < (unsigned int)argc; j++, i++)
1247                 rec_argv[i] = argv[j];
1248
1249         return cmd_record(i, rec_argv, NULL);
1250 }
1251
1252 static int
1253 parse_process(const struct option *opt __used, const char *arg, int __used unset)
1254 {
1255         if (arg)
1256                 add_process_filter(arg);
1257         return 0;
1258 }
1259
1260 static const struct option options[] = {
1261         OPT_STRING('i', "input", &input_name, "file",
1262                     "input file name"),
1263         OPT_STRING('o', "output", &output_name, "file",
1264                     "output file name"),
1265         OPT_INTEGER('w', "width", &svg_page_width,
1266                     "page width"),
1267         OPT_BOOLEAN('P', "power-only", &power_only,
1268                     "output power data only"),
1269         OPT_CALLBACK('p', "process", NULL, "process",
1270                       "process selector. Pass a pid or process name.",
1271                        parse_process),
1272         OPT_END()
1273 };
1274
1275
1276 int cmd_timechart(int argc, const char **argv, const char *prefix __used)
1277 {
1278         symbol__init(0);
1279
1280         page_size = getpagesize();
1281
1282         argc = parse_options(argc, argv, options, timechart_usage,
1283                         PARSE_OPT_STOP_AT_NON_OPTION);
1284
1285         if (argc && !strncmp(argv[0], "rec", 3))
1286                 return __cmd_record(argc, argv);
1287         else if (argc)
1288                 usage_with_options(timechart_usage, options);
1289
1290         setup_pager();
1291
1292         return __cmd_timechart();
1293 }